1 /*
2  * Hunt - A refined core library for D programming language.
3  *
4  * Copyright (C) 2018-2019 HuntLabs
5  *
6  * Website: https://www.huntlabs.net/
7  *
8  * Licensed under the Apache-2.0 License.
9  *
10  */
11 
12 module hunt.concurrency.ThreadPoolExecutor;
13 
14 import hunt.concurrency.AbstractExecutorService;
15 import hunt.concurrency.AbstractOwnableSynchronizer;
16 import hunt.concurrency.AbstractQueuedSynchronizer;
17 import hunt.concurrency.atomic.AtomicHelper;
18 import hunt.concurrency.BlockingQueue;
19 import hunt.concurrency.Exceptions;
20 import hunt.concurrency.ExecutorService;
21 import hunt.concurrency.Future;
22 import hunt.concurrency.thread.ThreadEx;
23 import hunt.concurrency.ThreadFactory;
24 
25 import hunt.collection;
26 import hunt.Exceptions;
27 import hunt.Functions;
28 import hunt.Integer;
29 import hunt.util.DateTime;
30 import hunt.util.Common;
31 
32 import core.sync.mutex;
33 import core.sync.condition;
34 import core.thread;
35 import std.algorithm;
36 import std.conv;
37 
38 import hunt.logging.ConsoleLogger;
39 
40 // import hunt.collection.ArrayList;
41 // import java.util.ConcurrentModificationException;
42 // import java.util.HashSet;
43 // import java.util.List;
44 // import hunt.concurrency.locks.AbstractQueuedSynchronizer;
45 // import hunt.concurrency.locks.Mutex;
46 
47 /**
48  * An {@link ExecutorService} that executes each submitted task using
49  * one of possibly several pooled threads, normally configured
50  * using {@link Executors} factory methods.
51  *
52  * <p>Thread pools address two different problems: they usually
53  * provide improved performance when executing large numbers of
54  * asynchronous tasks, due to reduced per-task invocation overhead,
55  * and they provide a means of bounding and managing the resources,
56  * including threads, consumed when executing a collection of tasks.
57  * Each {@code ThreadPoolExecutor} also maintains some basic
58  * statistics, such as the number of completed tasks.
59  *
60  * <p>To be useful across a wide range of contexts, this class
61  * provides many adjustable parameters and extensibility
62  * hooks. However, programmers are urged to use the more convenient
63  * {@link Executors} factory methods {@link
64  * Executors#newCachedThreadPool} (unbounded thread pool, with
65  * automatic thread reclamation), {@link Executors#newFixedThreadPool}
66  * (fixed size thread pool) and {@link
67  * Executors#newSingleThreadExecutor} (single background thread), that
68  * preconfigure settings for the most common usage
69  * scenarios. Otherwise, use the following guide when manually
70  * configuring and tuning this class:
71  *
72  * <dl>
73  *
74  * <dt>Core and maximum pool sizes</dt>
75  *
76  * <dd>A {@code ThreadPoolExecutor} will automatically adjust the
77  * pool size (see {@link #getPoolSize})
78  * according to the bounds set by
79  * corePoolSize (see {@link #getCorePoolSize}) and
80  * maximumPoolSize (see {@link #getMaximumPoolSize}).
81  *
82  * When a new task is submitted in method {@link #execute(Runnable)},
83  * if fewer than corePoolSize threads are running, a new thread is
84  * created to handle the request, even if other worker threads are
85  * idle.  Else if fewer than maximumPoolSize threads are running, a
86  * new thread will be created to handle the request only if the queue
87  * is full.  By setting corePoolSize and maximumPoolSize the same, you
88  * create a fixed-size thread pool. By setting maximumPoolSize to an
89  * essentially unbounded value such as {@code Integer.MAX_VALUE}, you
90  * allow the pool to accommodate an arbitrary number of concurrent
91  * tasks. Most typically, core and maximum pool sizes are set only
92  * upon construction, but they may also be changed dynamically using
93  * {@link #setCorePoolSize} and {@link #setMaximumPoolSize}. </dd>
94  *
95  * <dt>On-demand construction</dt>
96  *
97  * <dd>By default, even core threads are initially created and
98  * started only when new tasks arrive, but this can be overridden
99  * dynamically using method {@link #prestartCoreThread} or {@link
100  * #prestartAllCoreThreads}.  You probably want to prestart threads if
101  * you construct the pool with a non-empty queue. </dd>
102  *
103  * <dt>Creating new threads</dt>
104  *
105  * <dd>New threads are created using a {@link ThreadFactory}.  If not
106  * otherwise specified, a {@link Executors#defaultThreadFactory} is
107  * used, that creates threads to all be in the same {@link
108  * ThreadGroupEx} and with the same {@code NORM_PRIORITY} priority and
109  * non-daemon status. By supplying a different ThreadFactory, you can
110  * alter the thread's name, thread group, priority, daemon status,
111  * etc. If a {@code ThreadFactory} fails to create a thread when asked
112  * by returning null from {@code newThread}, the executor will
113  * continue, but might not be able to execute any tasks. Threads
114  * should possess the "modifyThread" {@code RuntimePermission}. If
115  * worker threads or other threads using the pool do not possess this
116  * permission, service may be degraded: configuration changes may not
117  * take effect in a timely manner, and a shutdown pool may remain in a
118  * state in which termination is possible but not completed.</dd>
119  *
120  * <dt>Keep-alive times</dt>
121  *
122  * <dd>If the pool currently has more than corePoolSize threads,
123  * excess threads will be terminated if they have been idle for more
124  * than the keepAliveTime (see {@link #getKeepAliveTime(TimeUnit)}).
125  * This provides a means of reducing resource consumption when the
126  * pool is not being actively used. If the pool becomes more active
127  * later, new threads will be constructed. This parameter can also be
128  * changed dynamically using method {@link #setKeepAliveTime(long,
129  * TimeUnit)}.  Using a value of {@code Long.MAX_VALUE} {@link
130  * TimeUnit#NANOSECONDS} effectively disables idle threads from ever
131  * terminating prior to shut down. By default, the keep-alive policy
132  * applies only when there are more than corePoolSize threads, but
133  * method {@link #allowCoreThreadTimeOut(bool)} can be used to
134  * apply this time-out policy to core threads as well, so long as the
135  * keepAliveTime value is non-zero. </dd>
136  *
137  * <dt>Queuing</dt>
138  *
139  * <dd>Any {@link BlockingQueue} may be used to transfer and hold
140  * submitted tasks.  The use of this queue interacts with pool sizing:
141  *
142  * <ul>
143  *
144  * <li>If fewer than corePoolSize threads are running, the Executor
145  * always prefers adding a new thread
146  * rather than queuing.
147  *
148  * <li>If corePoolSize or more threads are running, the Executor
149  * always prefers queuing a request rather than adding a new
150  * thread.
151  *
152  * <li>If a request cannot be queued, a new thread is created unless
153  * this would exceed maximumPoolSize, in which case, the task will be
154  * rejected.
155  *
156  * </ul>
157  *
158  * There are three general strategies for queuing:
159  * <ol>
160  *
161  * <li><em> Direct handoffs.</em> A good default choice for a work
162  * queue is a {@link SynchronousQueue} that hands off tasks to threads
163  * without otherwise holding them. Here, an attempt to queue a task
164  * will fail if no threads are immediately available to run it, so a
165  * new thread will be constructed. This policy avoids lockups when
166  * handling sets of requests that might have internal dependencies.
167  * Direct handoffs generally require unbounded maximumPoolSizes to
168  * avoid rejection of new submitted tasks. This in turn admits the
169  * possibility of unbounded thread growth when commands continue to
170  * arrive on average faster than they can be processed.
171  *
172  * <li><em> Unbounded queues.</em> Using an unbounded queue (for
173  * example a {@link LinkedBlockingQueue} without a predefined
174  * capacity) will cause new tasks to wait in the queue when all
175  * corePoolSize threads are busy. Thus, no more than corePoolSize
176  * threads will ever be created. (And the value of the maximumPoolSize
177  * therefore doesn't have any effect.)  This may be appropriate when
178  * each task is completely independent of others, so tasks cannot
179  * affect each others execution; for example, in a web page server.
180  * While this style of queuing can be useful in smoothing out
181  * bursts of requests, it admits the possibility of
182  * unbounded work queue growth when commands continue to arrive on
183  * average faster than they can be processed.
184  *
185  * <li><em>Bounded queues.</em> A bounded queue (for example, an
186  * {@link ArrayBlockingQueue}) helps prevent resource exhaustion when
187  * used with finite maximumPoolSizes, but can be more difficult to
188  * tune and control.  Queue sizes and maximum pool sizes may be traded
189  * off for each other: Using large queues and small pools minimizes
190  * CPU usage, OS resources, and context-switching overhead, but can
191  * lead to artificially low throughput.  If tasks frequently block (for
192  * example if they are I/O bound), a system may be able to schedule
193  * time for more threads than you otherwise allow. Use of small queues
194  * generally requires larger pool sizes, which keeps CPUs busier but
195  * may encounter unacceptable scheduling overhead, which also
196  * decreases throughput.
197  *
198  * </ol>
199  *
200  * </dd>
201  *
202  * <dt>Rejected tasks</dt>
203  *
204  * <dd>New tasks submitted in method {@link #execute(Runnable)} will be
205  * <em>rejected</em> when the Executor has been shut down, and also when
206  * the Executor uses finite bounds for both maximum threads and work queue
207  * capacity, and is saturated.  In either case, the {@code execute} method
208  * invokes the {@link
209  * RejectedExecutionHandler#rejectedExecution(Runnable, ThreadPoolExecutor)}
210  * method of its {@link RejectedExecutionHandler}.  Four predefined handler
211  * policies are provided:
212  *
213  * <ol>
214  *
215  * <li>In the default {@link ThreadPoolExecutor.AbortPolicy}, the handler
216  * throws a runtime {@link RejectedExecutionException} upon rejection.
217  *
218  * <li>In {@link ThreadPoolExecutor.CallerRunsPolicy}, the thread
219  * that invokes {@code execute} itself runs the task. This provides a
220  * simple feedback control mechanism that will slow down the rate that
221  * new tasks are submitted.
222  *
223  * <li>In {@link ThreadPoolExecutor.DiscardPolicy}, a task that
224  * cannot be executed is simply dropped.
225  *
226  * <li>In {@link ThreadPoolExecutor.DiscardOldestPolicy}, if the
227  * executor is not shut down, the task at the head of the work queue
228  * is dropped, and then execution is retried (which can fail again,
229  * causing this to be repeated.)
230  *
231  * </ol>
232  *
233  * It is possible to define and use other kinds of {@link
234  * RejectedExecutionHandler} classes. Doing so requires some care
235  * especially when policies are designed to work only under particular
236  * capacity or queuing policies. </dd>
237  *
238  * <dt>Hook methods</dt>
239  *
240  * <dd>This class provides {@code protected} overridable
241  * {@link #beforeExecute(Thread, Runnable)} and
242  * {@link #afterExecute(Runnable, Throwable)} methods that are called
243  * before and after execution of each task.  These can be used to
244  * manipulate the execution environment; for example, reinitializing
245  * ThreadLocals, gathering statistics, or adding log entries.
246  * Additionally, method {@link #terminated} can be overridden to perform
247  * any special processing that needs to be done once the Executor has
248  * fully terminated.
249  *
250  * <p>If hook, callback, or BlockingQueue methods throw exceptions,
251  * internal worker threads may in turn fail, abruptly terminate, and
252  * possibly be replaced.</dd>
253  *
254  * <dt>Queue maintenance</dt>
255  *
256  * <dd>Method {@link #getQueue()} allows access to the work queue
257  * for purposes of monitoring and debugging.  Use of this method for
258  * any other purpose is strongly discouraged.  Two supplied methods,
259  * {@link #remove(Runnable)} and {@link #purge} are available to
260  * assist in storage reclamation when large numbers of queued tasks
261  * become cancelled.</dd>
262  *
263  * <dt>Reclamation</dt>
264  *
265  * <dd>A pool that is no longer referenced in a program <em>AND</em>
266  * has no remaining threads may be reclaimed (garbage collected)
267  * without being explicitly shutdown. You can configure a pool to
268  * allow all unused threads to eventually die by setting appropriate
269  * keep-alive times, using a lower bound of zero core threads and/or
270  * setting {@link #allowCoreThreadTimeOut(bool)}.  </dd>
271  *
272  * </dl>
273  *
274  * <p><b>Extension example</b>. Most extensions of this class
275  * override one or more of the protected hook methods. For example,
276  * here is a subclass that adds a simple pause/resume feature:
277  *
278  * <pre> {@code
279  * class PausableThreadPoolExecutor : ThreadPoolExecutor {
280  *   private bool isPaused;
281  *   private Mutex pauseLock = new Mutex();
282  *   private Condition unpaused = pauseLock.newCondition();
283  *
284  *   PausableThreadPoolExecutor(...) { super(...); }
285  *
286  *   protected void beforeExecute(Thread t, Runnable r) {
287  *     super.beforeExecute(t, r);
288  *     pauseLock.lock();
289  *     try {
290  *       while (isPaused) unpaused.await();
291  *     } catch (InterruptedException ie) {
292  *       t.interrupt();
293  *     } finally {
294  *       pauseLock.unlock();
295  *     }
296  *   }
297  *
298  *   void pause() {
299  *     pauseLock.lock();
300  *     try {
301  *       isPaused = true;
302  *     } finally {
303  *       pauseLock.unlock();
304  *     }
305  *   }
306  *
307  *   void resume() {
308  *     pauseLock.lock();
309  *     try {
310  *       isPaused = false;
311  *       unpaused.notifyAll();
312  *     } finally {
313  *       pauseLock.unlock();
314  *     }
315  *   }
316  * }}</pre>
317  *
318  * @author Doug Lea
319  */
320 class ThreadPoolExecutor : AbstractExecutorService {
321     /**
322      * The main pool control state, ctl, is an atomic integer packing
323      * two conceptual fields
324      *   workerCount, indicating the effective number of threads
325      *   runState,    indicating whether running, shutting down etc
326      *
327      * In order to pack them into one int, we limit workerCount to
328      * (2^29)-1 (about 500 million) threads rather than (2^31)-1 (2
329      * billion) otherwise representable. If this is ever an issue in
330      * the future, the variable can be changed to be an AtomicLong,
331      * and the shift/mask constants below adjusted. But until the need
332      * arises, this code is a bit faster and simpler using an int.
333      *
334      * The workerCount is the number of workers that have been
335      * permitted to start and not permitted to stop.  The value may be
336      * transiently different from the actual number of live threads,
337      * for example when a ThreadFactory fails to create a thread when
338      * asked, and when exiting threads are still performing
339      * bookkeeping before terminating. The user-visible pool size is
340      * reported as the current size of the workers set.
341      *
342      * The runState provides the main lifecycle control, taking on values:
343      *
344      *   RUNNING:  Accept new tasks and process queued tasks
345      *   SHUTDOWN: Don't accept new tasks, but process queued tasks
346      *   STOP:     Don't accept new tasks, don't process queued tasks,
347      *             and interrupt in-progress tasks
348      *   TIDYING:  All tasks have terminated, workerCount is zero,
349      *             the thread transitioning to state TIDYING
350      *             will run the terminated() hook method
351      *   TERMINATED: terminated() has completed
352      *
353      * The numerical order among these values matters, to allow
354      * ordered comparisons. The runState monotonically increases over
355      * time, but need not hit each state. The transitions are:
356      *
357      * RUNNING -> SHUTDOWN
358      *    On invocation of shutdown()
359      * (RUNNING or SHUTDOWN) -> STOP
360      *    On invocation of shutdownNow()
361      * SHUTDOWN -> TIDYING
362      *    When both queue and pool are empty
363      * STOP -> TIDYING
364      *    When pool is empty
365      * TIDYING -> TERMINATED
366      *    When the terminated() hook method has completed
367      *
368      * Threads waiting in awaitTermination() will return when the
369      * state reaches TERMINATED.
370      *
371      * Detecting the transition from SHUTDOWN to TIDYING is less
372      * straightforward than you'd like because the queue may become
373      * empty after non-empty and vice versa during SHUTDOWN state, but
374      * we can only terminate if, after seeing that it is empty, we see
375      * that workerCount is 0 (which sometimes entails a recheck -- see
376      * below).
377      */
378     private shared(int) ctl; // = new AtomicInteger(ctlOf(RUNNING, 0));
379     private enum int COUNT_BITS = Integer.SIZE - 3;
380     private enum int COUNT_MASK = (1 << COUNT_BITS) - 1;
381 
382     // runState is stored in the high-order bits
383     private enum int RUNNING    = -1 << COUNT_BITS;
384     private enum int SHUTDOWN   =  0 << COUNT_BITS;
385     private enum int STOP       =  1 << COUNT_BITS;
386     private enum int TIDYING    =  2 << COUNT_BITS;
387     private enum int TERMINATED =  3 << COUNT_BITS;
388 
389     /**
390      * The queue used for holding tasks and handing off to worker
391      * threads.  We do not require that workQueue.poll() returning
392      * null necessarily means that workQueue.isEmpty(), so rely
393      * solely on isEmpty to see if the queue is empty (which we must
394      * do for example when deciding whether to transition from
395      * SHUTDOWN to TIDYING).  This accommodates special-purpose
396      * queues such as DelayQueues for which poll() is allowed to
397      * return null even if it may later return non-null when delays
398      * expire.
399      */
400     private BlockingQueue!(Runnable) workQueue;
401 
402     /**
403      * Lock held on access to workers set and related bookkeeping.
404      * While we could use a concurrent set of some sort, it turns out
405      * to be generally preferable to use a lock. Among the reasons is
406      * that this serializes interruptIdleWorkers, which avoids
407      * unnecessary interrupt storms, especially during shutdown.
408      * Otherwise exiting threads would concurrently interrupt those
409      * that have not yet interrupted. It also simplifies some of the
410      * associated statistics bookkeeping of largestPoolSize etc. We
411      * also hold mainLock on shutdown and shutdownNow, for the sake of
412      * ensuring workers set is stable while separately checking
413      * permission to interrupt and actually interrupting.
414      */
415     private Mutex mainLock;
416 
417     /**
418      * Set containing all worker threads in pool. Accessed only when
419      * holding mainLock.
420      */
421     private HashSet!(Worker) workers;
422 
423     /**
424      * Wait condition to support awaitTermination.
425      */
426     private Condition termination;
427 
428     /**
429      * Tracks largest attained pool size. Accessed only under
430      * mainLock.
431      */
432     private int largestPoolSize;
433 
434     /**
435      * Counter for completed tasks. Updated only on termination of
436      * worker threads. Accessed only under mainLock.
437      */
438     private long completedTaskCount;
439 
440     /*
441      * All user control parameters are declared as volatiles so that
442      * ongoing actions are based on freshest values, but without need
443      * for locking, since no internal invariants depend on them
444      * changing synchronously with respect to other actions.
445      */
446 
447     /**
448      * Factory for new threads. All threads are created using this
449      * factory (via method addWorker).  All callers must be prepared
450      * for addWorker to fail, which may reflect a system or user's
451      * policy limiting the number of threads.  Even though it is not
452      * treated as an error, failure to create threads may result in
453      * new tasks being rejected or existing ones remaining stuck in
454      * the queue.
455      *
456      * We go further and preserve pool invariants even in the face of
457      * errors such as OutOfMemoryError, that might be thrown while
458      * trying to create threads.  Such errors are rather common due to
459      * the need to allocate a native stack in Thread.start, and users
460      * will want to perform clean pool shutdown to clean up.  There
461      * will likely be enough memory available for the cleanup code to
462      * complete without encountering yet another OutOfMemoryError.
463      */
464     private ThreadFactory threadFactory;
465 
466     /**
467      * Handler called when saturated or shutdown in execute.
468      */
469     private RejectedExecutionHandler handler;
470 
471     /**
472      * Timeout in nanoseconds for idle threads waiting for work.
473      * Threads use this timeout when there are more than corePoolSize
474      * present or if allowCoreThreadTimeOut. Otherwise they wait
475      * forever for new work.
476      */
477     private long keepAliveTime;
478 
479     /**
480      * If false (default), core threads stay alive even when idle.
481      * If true, core threads use keepAliveTime to time out waiting
482      * for work.
483      */
484     private bool _allowCoreThreadTimeOut;
485 
486     /**
487      * Core pool size is the minimum number of workers to keep alive
488      * (and not allow to time out etc) unless allowCoreThreadTimeOut
489      * is set, in which case the minimum is zero.
490      *
491      * Since the worker count is actually stored in COUNT_BITS bits,
492      * the effective limit is {@code corePoolSize & COUNT_MASK}.
493      */
494     private int corePoolSize;
495 
496     /**
497      * Maximum pool size.
498      *
499      * Since the worker count is actually stored in COUNT_BITS bits,
500      * the effective limit is {@code maximumPoolSize & COUNT_MASK}.
501      */
502     private int maximumPoolSize;
503 
504     /**
505      * Permission required for callers of shutdown and shutdownNow.
506      * We additionally require (see checkShutdownAccess) that callers
507      * have permission to actually interrupt threads in the worker set
508      * (as governed by Thread.interrupt, which relies on
509      * ThreadGroupEx.checkAccess, which in turn relies on
510      * SecurityManager.checkAccess). Shutdowns are attempted only if
511      * these checks pass.
512      *
513      * All actual invocations of Thread.interrupt (see
514      * interruptIdleWorkers and interruptWorkers) ignore
515      * SecurityExceptions, meaning that the attempted interrupts
516      * silently fail. In the case of shutdown, they should not fail
517      * unless the SecurityManager has inconsistent policies, sometimes
518      * allowing access to a thread and sometimes not. In such cases,
519      * failure to actually interrupt threads may disable or delay full
520      * termination. Other uses of interruptIdleWorkers are advisory,
521      * and failure to actually interrupt will merely delay response to
522      * configuration changes so is not handled exceptionally.
523      */
524     // private __gshared RuntimePermission shutdownPerm =
525     //     new RuntimePermission("modifyThread");
526 
527 
528     /**
529      * The default rejected execution handler.
530      */
531     private __gshared RejectedExecutionHandler defaultHandler;
532 
533 
534     shared static this() {
535         defaultHandler = new AbortPolicy();
536     }
537 
538     private void initialize() {
539         mainLock = new Mutex();
540         termination = new Condition(mainLock);
541         ctl = ctlOf(RUNNING, 0);
542         workers = new HashSet!(Worker)();
543     }
544 
545     // Packing and unpacking ctl
546     private static int runStateOf(int c)     { return c & ~COUNT_MASK; }
547     private static int workerCountOf(int c)  { return c & COUNT_MASK; }
548     private static int ctlOf(int rs, int wc) { return rs | wc; }
549 
550     /*
551      * Bit field accessors that don't require unpacking ctl.
552      * These depend on the bit layout and on workerCount being never negative.
553      */
554 
555     private static bool runStateLessThan(int c, int s) {
556         return c < s;
557     }
558 
559     private static bool runStateAtLeast(int c, int s) {
560         return c >= s;
561     }
562 
563     private static bool isRunning(int c) {
564         return c < SHUTDOWN;
565     }
566 
567     /**
568      * Attempts to CAS-increment the workerCount field of ctl.
569      */
570     private bool compareAndIncrementWorkerCount(int expect) {
571         return AtomicHelper.compareAndSet(ctl, expect, expect + 1);
572     }
573 
574     /**
575      * Attempts to CAS-decrement the workerCount field of ctl.
576      */
577     private bool compareAndDecrementWorkerCount(int expect) {
578         return AtomicHelper.compareAndSet(ctl, expect, expect - 1);
579     }
580 
581     /**
582      * Decrements the workerCount field of ctl. This is called only on
583      * abrupt termination of a thread (see processWorkerExit). Other
584      * decrements are performed within getTask.
585      */
586     private void decrementWorkerCount() {
587         AtomicHelper.decrement(ctl);
588     }
589     
590     /**
591      * Class Worker mainly maintains interrupt control state for
592      * threads running tasks, along with other minor bookkeeping.
593      * This class opportunistically extends AbstractQueuedSynchronizer
594      * to simplify acquiring and releasing a lock surrounding each
595      * task execution.  This protects against interrupts that are
596      * intended to wake up a worker thread waiting for a task from
597      * instead interrupting a task being run.  We implement a simple
598      * non-reentrant mutual exclusion lock rather than use
599      * Mutex because we do not want worker tasks to be able to
600      * reacquire the lock when they invoke pool control methods like
601      * setCorePoolSize.  Additionally, to suppress interrupts until
602      * the thread actually starts running tasks, we initialize lock
603      * state to a negative value, and clear it upon start (in
604      * runWorker).
605      */
606     private final class Worker : AbstractQueuedSynchronizer, Runnable
607     {
608         /** Thread this worker is running in.  Null if factory fails. */
609         Thread thread;
610         /** Initial task to run.  Possibly null. */
611         Runnable firstTask;
612         /** Per-thread task counter */
613         long completedTasks;
614 
615         // TODO: switch to AbstractQueuedLongSynchronizer and move
616         // completedTasks into the lock word.
617 
618         /**
619          * Creates with given first task and thread from ThreadFactory.
620          * @param firstTask the first task (null if none)
621          */
622         this(Runnable firstTask) {
623             setState(-1); // inhibit interrupts until runWorker
624             this.firstTask = firstTask;
625             this.thread = getThreadFactory().newThread(new class Runnable {
626                 void run() {
627                     runWorker(this.outer);
628                 }
629             });
630         }
631 
632         /** Delegates main run loop to outer runWorker. */
633         void run() {
634             runWorker(this);
635         }
636 
637         // Lock methods
638         //
639         // The value 0 represents the unlocked state.
640         // The value 1 represents the locked state.
641 
642         protected override bool isHeldExclusively() {
643             return getState() != 0;
644         }
645 
646         protected override bool tryAcquire(int unused) {
647             if (compareAndSetState(0, 1)) { 
648                 setExclusiveOwnerThread(Thread.getThis());
649                 return true;
650             }
651             return false;
652         }
653 
654         protected override bool tryRelease(int unused) {
655             setExclusiveOwnerThread(null);
656             setState(0);
657             return true;
658         }
659 
660         void lock()        { acquire(1); }
661         bool tryLock()  { return tryAcquire(1); }
662         void unlock()      { release(1); }
663         bool isLocked() { return isHeldExclusively(); }
664 
665         void interruptIfStarted() {
666             ThreadEx t;
667             if (getState() >= 0 && (t = cast(ThreadEx)thread) !is null && !t.isInterrupted()) {
668                 try {
669                     t.interrupt();
670                 } catch (Exception ignore) {
671                     version(HUNT_DEBUG) warning(ignore.msg);
672                 }
673             }
674         }
675     }
676 
677     /*
678      * Methods for setting control state
679      */
680 
681     /**
682      * Transitions runState to given target, or leaves it alone if
683      * already at least the given target.
684      *
685      * @param targetState the desired state, either SHUTDOWN or STOP
686      *        (but not TIDYING or TERMINATED -- use tryTerminate for that)
687      */
688     private void advanceRunState(int targetState) {
689         // assert targetState == SHUTDOWN || targetState == STOP;
690         for (;;) {
691             int c = ctl;
692             if (runStateAtLeast(c, targetState) ||
693                 AtomicHelper.compareAndSet(ctl, c, ctlOf(targetState, workerCountOf(c))))
694                 break;
695         }
696     }
697 
698     /**
699      * Transitions to TERMINATED state if either (SHUTDOWN and pool
700      * and queue empty) or (STOP and pool empty).  If otherwise
701      * eligible to terminate but workerCount is nonzero, interrupts an
702      * idle worker to ensure that shutdown signals propagate. This
703      * method must be called following any action that might make
704      * termination possible -- reducing worker count or removing tasks
705      * from the queue during shutdown. The method is non-private to
706      * allow access from ScheduledThreadPoolExecutor.
707      */
708     final void tryTerminate() {
709         for (;;) {
710             int c = ctl;
711             if (isRunning(c) ||
712                 runStateAtLeast(c, TIDYING) ||
713                 (runStateLessThan(c, STOP) && ! workQueue.isEmpty()))
714                 return;
715             if (workerCountOf(c) != 0) { // Eligible to terminate
716                 interruptIdleWorkers(ONLY_ONE);
717                 return;
718             }
719 
720             Mutex mainLock = this.mainLock;
721             mainLock.lock();
722             try {
723                 if (AtomicHelper.compareAndSet(ctl, c, ctlOf(TIDYING, 0))) {
724                     try {
725                         terminated();
726                     } finally {
727                         ctl = ctlOf(TERMINATED, 0);
728                         termination.notifyAll();
729                     }
730                     return;
731                 }
732             } finally {
733                 mainLock.unlock();
734             }
735             // else retry on failed CAS
736         }
737     }
738 
739     /*
740      * Methods for controlling interrupts to worker threads.
741      */
742 
743     /**
744      * If there is a security manager, makes sure caller has
745      * permission to shut down threads in general (see shutdownPerm).
746      * If this passes, additionally makes sure the caller is allowed
747      * to interrupt each worker thread. This might not be true even if
748      * first check passed, if the SecurityManager treats some threads
749      * specially.
750      */
751     private void checkShutdownAccess() {
752         // FIXME: Needing refactor or cleanup -@zxp at 1/2/2019, 2:12:25 AM
753         // remove this
754         // debug implementationMissing(false);
755         // assert mainLock.isHeldByCurrentThread();
756         // SecurityManager security = System.getSecurityManager();
757         // if (security !is null) {
758         //     security.checkPermission(shutdownPerm);
759         //     for (Worker w : workers)
760         //         security.checkAccess(w.thread);
761         // }
762     }
763 
764     /**
765      * Interrupts all threads, even if active. Ignores SecurityExceptions
766      * (in which case some threads may remain uninterrupted).
767      */
768     private void interruptWorkers() {
769         // assert mainLock.isHeldByCurrentThread();
770         foreach (Worker w ; workers)
771             w.interruptIfStarted();
772     }
773 
774     /**
775      * Interrupts threads that might be waiting for tasks (as
776      * indicated by not being locked) so they can check for
777      * termination or configuration changes. Ignores
778      * SecurityExceptions (in which case some threads may remain
779      * uninterrupted).
780      *
781      * @param onlyOne If true, interrupt at most one worker. This is
782      * called only from tryTerminate when termination is otherwise
783      * enabled but there are still other workers.  In this case, at
784      * most one waiting worker is interrupted to propagate shutdown
785      * signals in case all threads are currently waiting.
786      * Interrupting any arbitrary thread ensures that newly arriving
787      * workers since shutdown began will also eventually exit.
788      * To guarantee eventual termination, it suffices to always
789      * interrupt only one idle worker, but shutdown() interrupts all
790      * idle workers so that redundant workers exit promptly, not
791      * waiting for a straggler task to finish.
792      */
793     private void interruptIdleWorkers(bool onlyOne) {
794         Mutex mainLock = this.mainLock;
795         mainLock.lock();
796         try {
797             foreach(Worker w ; workers) {
798                 ThreadEx t = cast(ThreadEx)w.thread;
799                 if (t !is null && !t.isInterrupted() && w.tryLock()) {
800                     try {
801                         t.interrupt();
802                     } catch (Exception ignore) {
803                         version(HUNT_DEBUG) {
804                             warning(ignore.toString());
805                         }
806                     } finally {
807                         w.unlock();
808                     }
809                 }
810                 if (onlyOne)
811                     break;
812             }
813         } finally {
814             mainLock.unlock();
815         }
816     }
817 
818     /**
819      * Common form of interruptIdleWorkers, to avoid having to
820      * remember what the bool argument means.
821      */
822     private void interruptIdleWorkers() {
823         interruptIdleWorkers(false);
824     }
825 
826     private enum bool ONLY_ONE = true;
827 
828     /*
829      * Misc utilities, most of which are also exported to
830      * ScheduledThreadPoolExecutor
831      */
832 
833     /**
834      * Invokes the rejected execution handler for the given command.
835      * Package-protected for use by ScheduledThreadPoolExecutor.
836      */
837     final void reject(Runnable command) {
838         handler.rejectedExecution(command, this);
839     }
840 
841     /**
842      * Performs any further cleanup following run state transition on
843      * invocation of shutdown.  A no-op here, but used by
844      * ScheduledThreadPoolExecutor to cancel delayed tasks.
845      */
846     void onShutdown() {
847     }
848 
849     /**
850      * Drains the task queue into a new list, normally using
851      * drainTo. But if the queue is a DelayQueue or any other kind of
852      * queue for which poll or drainTo may fail to remove some
853      * elements, it deletes them one by one.
854      */
855     private List!(Runnable) drainQueue() {
856         BlockingQueue!(Runnable) q = workQueue;
857         ArrayList!(Runnable) taskList = new ArrayList!(Runnable)();
858         q.drainTo(taskList);
859         if (!q.isEmpty()) {
860             foreach (Runnable r ; q.toArray()) {
861                 if (q.remove(r))
862                     taskList.add(r);
863             }
864         }
865         return taskList;
866     }
867 
868     /*
869      * Methods for creating, running and cleaning up after workers
870      */
871 
872     /**
873      * Checks if a new worker can be added with respect to current
874      * pool state and the given bound (either core or maximum). If so,
875      * the worker count is adjusted accordingly, and, if possible, a
876      * new worker is created and started, running firstTask as its
877      * first task. This method returns false if the pool is stopped or
878      * eligible to shut down. It also returns false if the thread
879      * factory fails to create a thread when asked.  If the thread
880      * creation fails, either due to the thread factory returning
881      * null, or due to an exception (typically OutOfMemoryError in
882      * Thread.start()), we roll back cleanly.
883      *
884      * @param firstTask the task the new thread should run first (or
885      * null if none). Workers are created with an initial first task
886      * (in method execute()) to bypass queuing when there are fewer
887      * than corePoolSize threads (in which case we always start one),
888      * or when the queue is full (in which case we must bypass queue).
889      * Initially idle threads are usually created via
890      * prestartCoreThread or to replace other dying workers.
891      *
892      * @param core if true use corePoolSize as bound, else
893      * maximumPoolSize. (A bool indicator is used here rather than a
894      * value to ensure reads of fresh values after checking other pool
895      * state).
896      * @return true if successful
897      */
898     private bool addWorker(Runnable firstTask, bool core) {
899         retry:
900         for (int c = ctl;;) {
901             // Check if queue empty only if necessary.
902             if (runStateAtLeast(c, SHUTDOWN)
903                 && (runStateAtLeast(c, STOP)
904                     || firstTask !is null
905                     || workQueue.isEmpty()))
906                 return false;
907 
908             for (;;) {
909                 if (workerCountOf(c)
910                     >= ((core ? corePoolSize : maximumPoolSize) & COUNT_MASK))
911                     return false;
912                 if (compareAndIncrementWorkerCount(c))
913                     break retry;
914                 c = ctl;  // Re-read ctl
915                 if (runStateAtLeast(c, SHUTDOWN))
916                     continue retry;
917                 // else CAS failed due to workerCount change; retry inner loop
918             }
919         }
920 
921         bool workerStarted = false;
922         bool workerAdded = false;
923         Worker w = null;
924         try {
925             w = new Worker(firstTask);
926             Thread t = w.thread;
927             if (t !is null) {
928                 Mutex mainLock = this.mainLock;
929                 mainLock.lock();
930                 try {
931                     // Recheck while holding lock.
932                     // Back out on ThreadFactory failure or if
933                     // shut down before lock acquired.
934                     int c = ctl;
935 
936                     if (isRunning(c) ||
937                         (runStateLessThan(c, STOP) && firstTask is null)) {
938                         // implementationMissing(false);
939                         // TODO: Tasks pending completion -@zxp at 10/18/2018, 9:14:13 AM
940                         // 
941                         // if (t.isAlive()) // precheck that t is startable
942                         //     throw new IllegalThreadStateException();
943                         workers.add(w);
944                         int s = workers.size();
945                         if (s > largestPoolSize)
946                             largestPoolSize = s;
947                         workerAdded = true;
948                     }
949                 } finally {
950                     mainLock.unlock();
951                 }
952                 if (workerAdded) {
953                     t.start();
954                     workerStarted = true;
955                 }
956             }
957         } finally {
958             if (! workerStarted)
959                 addWorkerFailed(w);
960         }
961         return workerStarted;
962     }
963 
964     /**
965      * Rolls back the worker thread creation.
966      * - removes worker from workers, if present
967      * - decrements worker count
968      * - rechecks for termination, in case the existence of this
969      *   worker was holding up termination
970      */
971     private void addWorkerFailed(Worker w) {
972         Mutex mainLock = this.mainLock;
973         mainLock.lock();
974         try {
975             if (w !is null)
976                 workers.remove(w);
977             decrementWorkerCount();
978             tryTerminate();
979         } finally {
980             mainLock.unlock();
981         }
982     }
983 
984     /**
985      * Performs cleanup and bookkeeping for a dying worker. Called
986      * only from worker threads. Unless completedAbruptly is set,
987      * assumes that workerCount has already been adjusted to account
988      * for exit.  This method removes thread from worker set, and
989      * possibly terminates the pool or replaces the worker if either
990      * it exited due to user task exception or if fewer than
991      * corePoolSize workers are running or queue is non-empty but
992      * there are no workers.
993      *
994      * @param w the worker
995      * @param completedAbruptly if the worker died due to user exception
996      */
997     private void processWorkerExit(Worker w, bool completedAbruptly) {
998         if (completedAbruptly) // If abrupt, then workerCount wasn't adjusted
999             decrementWorkerCount();
1000 
1001         Mutex mainLock = this.mainLock;
1002         mainLock.lock();
1003         try {
1004             completedTaskCount += w.completedTasks;
1005             workers.remove(w);
1006         } finally {
1007             mainLock.unlock();
1008         }
1009 
1010         tryTerminate();
1011 
1012         int c = ctl;
1013         if (runStateLessThan(c, STOP)) {
1014             if (!completedAbruptly) {
1015                 int min = _allowCoreThreadTimeOut ? 0 : corePoolSize;
1016                 if (min == 0 && ! workQueue.isEmpty())
1017                     min = 1;
1018                 if (workerCountOf(c) >= min)
1019                     return; // replacement not needed
1020             }
1021             addWorker(null, false);
1022         }
1023     }
1024 
1025     /**
1026      * Performs blocking or timed wait for a task, depending on
1027      * current configuration settings, or returns null if this worker
1028      * must exit because of any of:
1029      * 1. There are more than maximumPoolSize workers (due to
1030      *    a call to setMaximumPoolSize).
1031      * 2. The pool is stopped.
1032      * 3. The pool is shutdown and the queue is empty.
1033      * 4. This worker timed out waiting for a task, and timed-out
1034      *    workers are subject to termination (that is,
1035      *    {@code allowCoreThreadTimeOut || workerCount > corePoolSize})
1036      *    both before and after the timed wait, and if the queue is
1037      *    non-empty, this worker is not the last thread in the pool.
1038      *
1039      * @return task, or null if the worker must exit, in which case
1040      *         workerCount is decremented
1041      */
1042     private Runnable getTask() {
1043         bool timedOut = false; // Did the last poll() time out?
1044 
1045         for (;;) {
1046             int c = ctl;
1047 
1048             // Check if queue empty only if necessary.
1049             if (runStateAtLeast(c, SHUTDOWN)
1050                 && (runStateAtLeast(c, STOP) || workQueue.isEmpty())) {
1051                 decrementWorkerCount();
1052                 return null;
1053             }
1054 
1055             int wc = workerCountOf(c);
1056 
1057             // Are workers subject to culling?
1058             bool timed = _allowCoreThreadTimeOut || wc > corePoolSize;
1059 
1060             if ((wc > maximumPoolSize || (timed && timedOut))
1061                 && (wc > 1 || workQueue.isEmpty())) {
1062                 if (compareAndDecrementWorkerCount(c))
1063                     return null;
1064                 continue;
1065             }
1066 
1067             try {
1068                 Runnable r = timed ?
1069                     workQueue.poll(dur!(TimeUnit.HectoNanosecond)(keepAliveTime)) :
1070                     workQueue.take();
1071                 if (r !is null)
1072                     return r;
1073                 timedOut = true;
1074             } catch (InterruptedException retry) {
1075                 timedOut = false;
1076             }
1077         }
1078     }
1079 
1080     /**
1081      * Main worker run loop.  Repeatedly gets tasks from queue and
1082      * executes them, while coping with a number of issues:
1083      *
1084      * 1. We may start out with an initial task, in which case we
1085      * don't need to get the first one. Otherwise, as long as pool is
1086      * running, we get tasks from getTask. If it returns null then the
1087      * worker exits due to changed pool state or configuration
1088      * parameters.  Other exits result from exception throws in
1089      * external code, in which case completedAbruptly holds, which
1090      * usually leads processWorkerExit to replace this thread.
1091      *
1092      * 2. Before running any task, the lock is acquired to prevent
1093      * other pool interrupts while the task is executing, and then we
1094      * ensure that unless pool is stopping, this thread does not have
1095      * its interrupt set.
1096      *
1097      * 3. Each task run is preceded by a call to beforeExecute, which
1098      * might throw an exception, in which case we cause thread to die
1099      * (breaking loop with completedAbruptly true) without processing
1100      * the task.
1101      *
1102      * 4. Assuming beforeExecute completes normally, we run the task,
1103      * gathering any of its thrown exceptions to send to afterExecute.
1104      * We separately handle RuntimeException, Error (both of which the
1105      * specs guarantee that we trap) and arbitrary Throwables.
1106      * Because we cannot rethrow Throwables within Runnable.run, we
1107      * wrap them within Errors on the way out (to the thread's
1108      * UncaughtExceptionHandler).  Any thrown exception also
1109      * conservatively causes thread to die.
1110      *
1111      * 5. After task.run completes, we call afterExecute, which may
1112      * also throw an exception, which will also cause thread to
1113      * die. According to JLS Sec 14.20, this exception is the one that
1114      * will be in effect even if task.run throws.
1115      *
1116      * The net effect of the exception mechanics is that afterExecute
1117      * and the thread's UncaughtExceptionHandler have as accurate
1118      * information as we can provide about any problems encountered by
1119      * user code.
1120      *
1121      * @param w the worker
1122      */
1123     final void runWorker(Worker w) {
1124         Thread wt = Thread.getThis();
1125         Runnable task = w.firstTask;
1126         w.firstTask = null;
1127         w.unlock(); // allow interrupts
1128         bool completedAbruptly = true;
1129         try {
1130             while (task !is null || (task = getTask()) !is null) {
1131                 w.lock();
1132                 // If pool is stopping, ensure thread is interrupted;
1133                 // if not, ensure thread is not interrupted.  This
1134                 // requires a recheck in second case to deal with
1135                 // shutdownNow race while clearing interrupt
1136 
1137                 // implementationMissing(false);
1138                 // if ((runStateAtLeast(ctl, STOP) ||
1139                 //      (Thread.interrupted() &&
1140                 //       runStateAtLeast(ctl, STOP))) &&
1141                 //     !wt.isInterrupted())
1142                 //     wt.interrupt();
1143                 try {
1144                     beforeExecute(wt, task);
1145                     try {
1146                         task.run();
1147                         afterExecute(task, null);
1148                     } catch (Throwable ex) {
1149                         afterExecute(task, ex);
1150                         throw ex;
1151                     }
1152                 } finally {
1153                     task = null;
1154                     w.completedTasks++;
1155                     w.unlock();
1156                 }
1157             }
1158             completedAbruptly = false;
1159         } finally {
1160             processWorkerExit(w, completedAbruptly);
1161         }
1162     }
1163 
1164     // constructors and methods
1165 
1166     /**
1167      * Creates a new {@code ThreadPoolExecutor} with the given initial
1168      * parameters, the default thread factory and the default rejected
1169      * execution handler.
1170      *
1171      * <p>It may be more convenient to use one of the {@link Executors}
1172      * factory methods instead of this general purpose constructor.
1173      *
1174      * @param corePoolSize the number of threads to keep in the pool, even
1175      *        if they are idle, unless {@code allowCoreThreadTimeOut} is set
1176      * @param maximumPoolSize the maximum number of threads to allow in the
1177      *        pool
1178      * @param keepAliveTime when the number of threads is greater than
1179      *        the core, this is the maximum time that excess idle threads
1180      *        will wait for new tasks before terminating.
1181      * @param workQueue the queue to use for holding tasks before they are
1182      *        executed.  This queue will hold only the {@code Runnable}
1183      *        tasks submitted by the {@code execute} method.
1184      * @throws IllegalArgumentException if one of the following holds:<br>
1185      *         {@code corePoolSize < 0}<br>
1186      *         {@code keepAliveTime < 0}<br>
1187      *         {@code maximumPoolSize <= 0}<br>
1188      *         {@code maximumPoolSize < corePoolSize}
1189      * @throws NullPointerException if {@code workQueue} is null
1190      */
1191     this(int corePoolSize, int maximumPoolSize, Duration keepAliveTime,
1192         BlockingQueue!(Runnable) workQueue) {
1193         this(corePoolSize, maximumPoolSize, keepAliveTime, workQueue,
1194              ThreadFactory.defaultThreadFactory(), defaultHandler);
1195     }
1196 
1197     /**
1198      * Creates a new {@code ThreadPoolExecutor} with the given initial
1199      * parameters and {@linkplain ThreadPoolExecutor.AbortPolicy
1200      * default rejected execution handler}.
1201      *
1202      * @param corePoolSize the number of threads to keep in the pool, even
1203      *        if they are idle, unless {@code allowCoreThreadTimeOut} is set
1204      * @param maximumPoolSize the maximum number of threads to allow in the
1205      *        pool
1206      * @param keepAliveTime when the number of threads is greater than
1207      *        the core, this is the maximum time that excess idle threads
1208      *        will wait for new tasks before terminating.
1209      * @param workQueue the queue to use for holding tasks before they are
1210      *        executed.  This queue will hold only the {@code Runnable}
1211      *        tasks submitted by the {@code execute} method.
1212      * @param threadFactory the factory to use when the executor
1213      *        creates a new thread
1214      * @throws IllegalArgumentException if one of the following holds:<br>
1215      *         {@code corePoolSize < 0}<br>
1216      *         {@code keepAliveTime < 0}<br>
1217      *         {@code maximumPoolSize <= 0}<br>
1218      *         {@code maximumPoolSize < corePoolSize}
1219      * @throws NullPointerException if {@code workQueue}
1220      *         or {@code threadFactory} is null
1221      */
1222     this(int corePoolSize, int maximumPoolSize, Duration keepAliveTime, 
1223          BlockingQueue!(Runnable) workQueue, ThreadFactory threadFactory) {
1224         this(corePoolSize, maximumPoolSize, keepAliveTime, workQueue,
1225              threadFactory, defaultHandler);
1226     }
1227 
1228     /**
1229      * Creates a new {@code ThreadPoolExecutor} with the given initial
1230      * parameters and
1231      * {@linkplain ThreadFactory#defaultThreadFactory default thread factory}.
1232      *
1233      * @param corePoolSize the number of threads to keep in the pool, even
1234      *        if they are idle, unless {@code allowCoreThreadTimeOut} is set
1235      * @param maximumPoolSize the maximum number of threads to allow in the
1236      *        pool
1237      * @param keepAliveTime when the number of threads is greater than
1238      *        the core, this is the maximum time that excess idle threads
1239      *        will wait for new tasks before terminating.
1240      * @param workQueue the queue to use for holding tasks before they are
1241      *        executed.  This queue will hold only the {@code Runnable}
1242      *        tasks submitted by the {@code execute} method.
1243      * @param handler the handler to use when execution is blocked
1244      *        because the thread bounds and queue capacities are reached
1245      * @throws IllegalArgumentException if one of the following holds:<br>
1246      *         {@code corePoolSize < 0}<br>
1247      *         {@code keepAliveTime < 0}<br>
1248      *         {@code maximumPoolSize <= 0}<br>
1249      *         {@code maximumPoolSize < corePoolSize}
1250      * @throws NullPointerException if {@code workQueue}
1251      *         or {@code handler} is null
1252      */
1253     this(int corePoolSize, int maximumPoolSize, Duration keepAliveTime, 
1254         BlockingQueue!(Runnable) workQueue, RejectedExecutionHandler handler) {
1255         this(corePoolSize, maximumPoolSize, keepAliveTime, workQueue,
1256              ThreadFactory.defaultThreadFactory(), handler);
1257     }
1258 
1259     /**
1260      * Creates a new {@code ThreadPoolExecutor} with the given initial
1261      * parameters.
1262      *
1263      * @param corePoolSize the number of threads to keep in the pool, even
1264      *        if they are idle, unless {@code allowCoreThreadTimeOut} is set
1265      * @param maximumPoolSize the maximum number of threads to allow in the
1266      *        pool
1267      * @param keepAliveTime when the number of threads is greater than
1268      *        the core, this is the maximum time that excess idle threads
1269      *        will wait for new tasks before terminating.
1270      * @param workQueue the queue to use for holding tasks before they are
1271      *        executed.  This queue will hold only the {@code Runnable}
1272      *        tasks submitted by the {@code execute} method.
1273      * @param threadFactory the factory to use when the executor
1274      *        creates a new thread
1275      * @param handler the handler to use when execution is blocked
1276      *        because the thread bounds and queue capacities are reached
1277      * @throws IllegalArgumentException if one of the following holds:<br>
1278      *         {@code corePoolSize < 0}<br>
1279      *         {@code keepAliveTime < 0}<br>
1280      *         {@code maximumPoolSize <= 0}<br>
1281      *         {@code maximumPoolSize < corePoolSize}
1282      * @throws NullPointerException if {@code workQueue}
1283      *         or {@code threadFactory} or {@code handler} is null
1284      */
1285     this(int corePoolSize, int maximumPoolSize, Duration keepAliveTime,
1286             BlockingQueue!(Runnable) workQueue,
1287             ThreadFactory threadFactory, RejectedExecutionHandler handler) {
1288 
1289         initialize();
1290         this.keepAliveTime = keepAliveTime.total!(TimeUnit.HectoNanosecond)();
1291         if (corePoolSize < 0 || maximumPoolSize <= 0 || 
1292             maximumPoolSize < corePoolSize || this.keepAliveTime < 0)
1293             throw new IllegalArgumentException();
1294 
1295         if (workQueue is null || threadFactory is null || handler is null)
1296             throw new NullPointerException();
1297 
1298         this.corePoolSize = corePoolSize;
1299         this.maximumPoolSize = maximumPoolSize;
1300         this.workQueue = workQueue;
1301         this.threadFactory = threadFactory;
1302         this.handler = handler;
1303     }
1304 
1305     /**
1306      * Executes the given task sometime in the future.  The task
1307      * may execute in a new thread or in an existing pooled thread.
1308      *
1309      * If the task cannot be submitted for execution, either because this
1310      * executor has been shutdown or because its capacity has been reached,
1311      * the task is handled by the current {@link RejectedExecutionHandler}.
1312      *
1313      * @param command the task to execute
1314      * @throws RejectedExecutionException at discretion of
1315      *         {@code RejectedExecutionHandler}, if the task
1316      *         cannot be accepted for execution
1317      * @throws NullPointerException if {@code command} is null
1318      */
1319     void execute(Runnable command) {
1320         if (command is null)
1321             throw new NullPointerException();
1322         /*
1323          * Proceed in 3 steps:
1324          *
1325          * 1. If fewer than corePoolSize threads are running, try to
1326          * start a new thread with the given command as its first
1327          * task.  The call to addWorker atomically checks runState and
1328          * workerCount, and so prevents false alarms that would add
1329          * threads when it shouldn't, by returning false.
1330          *
1331          * 2. If a task can be successfully queued, then we still need
1332          * to double-check whether we should have added a thread
1333          * (because existing ones died since last checking) or that
1334          * the pool shut down since entry into this method. So we
1335          * recheck state and if necessary roll back the enqueuing if
1336          * stopped, or start a new thread if there are none.
1337          *
1338          * 3. If we cannot queue task, then we try to add a new
1339          * thread.  If it fails, we know we are shut down or saturated
1340          * and so reject the task.
1341          */
1342         int c = ctl;
1343         if (workerCountOf(c) < corePoolSize) {
1344             if (addWorker(command, true))
1345                 return;
1346             c = ctl;
1347         }
1348         if (isRunning(c) && workQueue.offer(command)) {
1349             int recheck = ctl;
1350             if (! isRunning(recheck) && remove(command))
1351                 reject(command);
1352             else if (workerCountOf(recheck) == 0)
1353                 addWorker(null, false);
1354         }
1355         else if (!addWorker(command, false))
1356             reject(command);
1357     }
1358 
1359     /**
1360      * Initiates an orderly shutdown in which previously submitted
1361      * tasks are executed, but no new tasks will be accepted.
1362      * Invocation has no additional effect if already shut down.
1363      *
1364      * <p>This method does not wait for previously submitted tasks to
1365      * complete execution.  Use {@link #awaitTermination awaitTermination}
1366      * to do that.
1367      *
1368      * @throws SecurityException {@inheritDoc}
1369      */
1370     void shutdown() {
1371         Mutex mainLock = this.mainLock;
1372         mainLock.lock();
1373         try {
1374             checkShutdownAccess();
1375             advanceRunState(SHUTDOWN);
1376             interruptIdleWorkers();
1377             onShutdown(); // hook for ScheduledThreadPoolExecutor
1378         } finally {
1379             mainLock.unlock();
1380         }
1381         tryTerminate();
1382     }
1383 
1384     /**
1385      * Attempts to stop all actively executing tasks, halts the
1386      * processing of waiting tasks, and returns a list of the tasks
1387      * that were awaiting execution. These tasks are drained (removed)
1388      * from the task queue upon return from this method.
1389      *
1390      * <p>This method does not wait for actively executing tasks to
1391      * terminate.  Use {@link #awaitTermination awaitTermination} to
1392      * do that.
1393      *
1394      * <p>There are no guarantees beyond best-effort attempts to stop
1395      * processing actively executing tasks.  This implementation
1396      * interrupts tasks via {@link Thread#interrupt}; any task that
1397      * fails to respond to interrupts may never terminate.
1398      *
1399      * @throws SecurityException {@inheritDoc}
1400      */
1401     List!(Runnable) shutdownNow() {
1402         List!(Runnable) tasks;
1403         Mutex mainLock = this.mainLock;
1404         mainLock.lock();
1405         try {
1406             checkShutdownAccess();
1407             advanceRunState(STOP);
1408             interruptWorkers();
1409             tasks = drainQueue();
1410         } finally {
1411             mainLock.unlock();
1412         }
1413         tryTerminate();
1414         return tasks;
1415     }
1416 
1417     bool isShutdown() {
1418         return runStateAtLeast(ctl, SHUTDOWN);
1419     }
1420 
1421     /** Used by ScheduledThreadPoolExecutor. */
1422     bool isStopped() {
1423         return runStateAtLeast(ctl, STOP);
1424     }
1425 
1426     /**
1427      * Returns true if this executor is in the process of terminating
1428      * after {@link #shutdown} or {@link #shutdownNow} but has not
1429      * completely terminated.  This method may be useful for
1430      * debugging. A return of {@code true} reported a sufficient
1431      * period after shutdown may indicate that submitted tasks have
1432      * ignored or suppressed interruption, causing this executor not
1433      * to properly terminate.
1434      *
1435      * @return {@code true} if terminating but not yet terminated
1436      */
1437     bool isTerminating() {
1438         int c = ctl;
1439         return runStateAtLeast(c, SHUTDOWN) && runStateLessThan(c, TERMINATED);
1440     }
1441 
1442     bool isTerminated() {
1443         return runStateAtLeast(ctl, TERMINATED);
1444     }
1445 
1446     bool awaitTermination(Duration timeout) {
1447         // long nanos = timeout.total!(TimeUnit.HectoNanosecond);
1448         Mutex mainLock = this.mainLock;
1449         mainLock.lock();
1450         try {
1451             while (runStateLessThan(ctl, TERMINATED)) {
1452                 // if (nanos <= 0L)
1453                 //     return false;
1454                 // nanos = termination.awaitNanos(nanos);
1455                 // FIXME: Needing refactor or cleanup -@zxp at 10/18/2018, 9:31:16 AM
1456                 // 
1457                 if(termination.wait(timeout))
1458                     return false;
1459             }
1460             return true;
1461         } finally {
1462             mainLock.unlock();
1463         }
1464     }
1465 
1466     // Override without "throws Throwable" for compatibility with subclasses
1467     // whose finalize method invokes super.finalize() (as is recommended).
1468     // Before JDK 11, finalize() had a non-empty method body.
1469 
1470     /**
1471      * @implNote Previous versions of this class had a finalize method
1472      * that shut down this executor, but in this version, finalize
1473      * does nothing.
1474      */
1475     //@Deprecated(since="9")
1476     protected void finalize() {}
1477 
1478     /**
1479      * Sets the thread factory used to create new threads.
1480      *
1481      * @param threadFactory the new thread factory
1482      * @throws NullPointerException if threadFactory is null
1483      * @see #getThreadFactory
1484      */
1485     void setThreadFactory(ThreadFactory threadFactory) {
1486         if (threadFactory is null)
1487             throw new NullPointerException();
1488         this.threadFactory = threadFactory;
1489     }
1490 
1491     /**
1492      * Returns the thread factory used to create new threads.
1493      *
1494      * @return the current thread factory
1495      * @see #setThreadFactory(ThreadFactory)
1496      */
1497     ThreadFactory getThreadFactory() {
1498         return threadFactory;
1499     }
1500 
1501     /**
1502      * Sets a new handler for unexecutable tasks.
1503      *
1504      * @param handler the new handler
1505      * @throws NullPointerException if handler is null
1506      * @see #getRejectedExecutionHandler
1507      */
1508     void setRejectedExecutionHandler(RejectedExecutionHandler handler) {
1509         if (handler is null)
1510             throw new NullPointerException();
1511         this.handler = handler;
1512     }
1513 
1514     /**
1515      * Returns the current handler for unexecutable tasks.
1516      *
1517      * @return the current handler
1518      * @see #setRejectedExecutionHandler(RejectedExecutionHandler)
1519      */
1520     RejectedExecutionHandler getRejectedExecutionHandler() {
1521         return handler;
1522     }
1523 
1524     /**
1525      * Sets the core number of threads.  This overrides any value set
1526      * in the constructor.  If the new value is smaller than the
1527      * current value, excess existing threads will be terminated when
1528      * they next become idle.  If larger, new threads will, if needed,
1529      * be started to execute any queued tasks.
1530      *
1531      * @param corePoolSize the new core size
1532      * @throws IllegalArgumentException if {@code corePoolSize < 0}
1533      *         or {@code corePoolSize} is greater than the {@linkplain
1534      *         #getMaximumPoolSize() maximum pool size}
1535      * @see #getCorePoolSize
1536      */
1537     void setCorePoolSize(int corePoolSize) {
1538         if (corePoolSize < 0 || maximumPoolSize < corePoolSize)
1539             throw new IllegalArgumentException();
1540         int delta = corePoolSize - this.corePoolSize;
1541         this.corePoolSize = corePoolSize;
1542         if (workerCountOf(ctl) > corePoolSize)
1543             interruptIdleWorkers();
1544         else if (delta > 0) {
1545             // We don't really know how many new threads are "needed".
1546             // As a heuristic, prestart enough new workers (up to new
1547             // core size) to handle the current number of tasks in
1548             // queue, but stop if queue becomes empty while doing so.
1549             int k = min(delta, workQueue.size());
1550             while (k-- > 0 && addWorker(null, true)) {
1551                 if (workQueue.isEmpty())
1552                     break;
1553             }
1554         }
1555     }
1556 
1557     /**
1558      * Returns the core number of threads.
1559      *
1560      * @return the core number of threads
1561      * @see #setCorePoolSize
1562      */
1563     int getCorePoolSize() {
1564         return corePoolSize;
1565     }
1566 
1567     /**
1568      * Starts a core thread, causing it to idly wait for work. This
1569      * overrides the default policy of starting core threads only when
1570      * new tasks are executed. This method will return {@code false}
1571      * if all core threads have already been started.
1572      *
1573      * @return {@code true} if a thread was started
1574      */
1575     bool prestartCoreThread() {
1576         return workerCountOf(ctl) < corePoolSize &&
1577             addWorker(null, true);
1578     }
1579 
1580     /**
1581      * Same as prestartCoreThread except arranges that at least one
1582      * thread is started even if corePoolSize is 0.
1583      */
1584     void ensurePrestart() {
1585         int wc = workerCountOf(ctl);
1586         if (wc < corePoolSize)
1587             addWorker(null, true);
1588         else if (wc == 0)
1589             addWorker(null, false);
1590     }
1591 
1592     /**
1593      * Starts all core threads, causing them to idly wait for work. This
1594      * overrides the default policy of starting core threads only when
1595      * new tasks are executed.
1596      *
1597      * @return the number of threads started
1598      */
1599     int prestartAllCoreThreads() {
1600         int n = 0;
1601         while (addWorker(null, true))
1602             ++n;
1603         return n;
1604     }
1605 
1606     /**
1607      * Returns true if this pool allows core threads to time out and
1608      * terminate if no tasks arrive within the keepAlive time, being
1609      * replaced if needed when new tasks arrive. When true, the same
1610      * keep-alive policy applying to non-core threads applies also to
1611      * core threads. When false (the default), core threads are never
1612      * terminated due to lack of incoming tasks.
1613      *
1614      * @return {@code true} if core threads are allowed to time out,
1615      *         else {@code false}
1616      *
1617      */
1618     bool allowsCoreThreadTimeOut() {
1619         return _allowCoreThreadTimeOut;
1620     }
1621 
1622     /**
1623      * Sets the policy governing whether core threads may time out and
1624      * terminate if no tasks arrive within the keep-alive time, being
1625      * replaced if needed when new tasks arrive. When false, core
1626      * threads are never terminated due to lack of incoming
1627      * tasks. When true, the same keep-alive policy applying to
1628      * non-core threads applies also to core threads. To avoid
1629      * continual thread replacement, the keep-alive time must be
1630      * greater than zero when setting {@code true}. This method
1631      * should in general be called before the pool is actively used.
1632      *
1633      * @param value {@code true} if should time out, else {@code false}
1634      * @throws IllegalArgumentException if value is {@code true}
1635      *         and the current keep-alive time is not greater than zero
1636      *
1637      */
1638     void allowCoreThreadTimeOut(bool value) {
1639         if (value && keepAliveTime <= 0)
1640             throw new IllegalArgumentException("Core threads must have nonzero keep alive times");
1641         if (value != _allowCoreThreadTimeOut) {
1642             _allowCoreThreadTimeOut = value;
1643             if (value)
1644                 interruptIdleWorkers();
1645         }
1646     }
1647 
1648     /**
1649      * Sets the maximum allowed number of threads. This overrides any
1650      * value set in the constructor. If the new value is smaller than
1651      * the current value, excess existing threads will be
1652      * terminated when they next become idle.
1653      *
1654      * @param maximumPoolSize the new maximum
1655      * @throws IllegalArgumentException if the new maximum is
1656      *         less than or equal to zero, or
1657      *         less than the {@linkplain #getCorePoolSize core pool size}
1658      * @see #getMaximumPoolSize
1659      */
1660     void setMaximumPoolSize(int maximumPoolSize) {
1661         if (maximumPoolSize <= 0 || maximumPoolSize < corePoolSize)
1662             throw new IllegalArgumentException();
1663         this.maximumPoolSize = maximumPoolSize;
1664         if (workerCountOf(ctl) > maximumPoolSize)
1665             interruptIdleWorkers();
1666     }
1667 
1668     /**
1669      * Returns the maximum allowed number of threads.
1670      *
1671      * @return the maximum allowed number of threads
1672      * @see #setMaximumPoolSize
1673      */
1674     int getMaximumPoolSize() {
1675         return maximumPoolSize;
1676     }
1677 
1678     /**
1679      * Sets the thread keep-alive time, which is the amount of time
1680      * that threads may remain idle before being terminated.
1681      * Threads that wait this amount of time without processing a
1682      * task will be terminated if there are more than the core
1683      * number of threads currently in the pool, or if this pool
1684      * {@linkplain #allowsCoreThreadTimeOut() allows core thread timeout}.
1685      * This overrides any value set in the constructor.
1686      *
1687      * @param time the time to wait.  A time value of zero will cause
1688      *        excess threads to terminate immediately after executing tasks.
1689      * @param unit the time unit of the {@code time} argument
1690      * @throws IllegalArgumentException if {@code time} less than zero or
1691      *         if {@code time} is zero and {@code allowsCoreThreadTimeOut}
1692      * @see #getKeepAliveTime(TimeUnit)
1693      */
1694     void setKeepAliveTime(Duration time) {
1695         long keepAliveTime = time.total!(TimeUnit.HectoNanosecond)();
1696         if (keepAliveTime < 0)
1697             throw new IllegalArgumentException();
1698         if (keepAliveTime == 0 && allowsCoreThreadTimeOut())
1699             throw new IllegalArgumentException("Core threads must have nonzero keep alive times");
1700         long delta = keepAliveTime - this.keepAliveTime;
1701         this.keepAliveTime = keepAliveTime;
1702         if (delta < 0)
1703             interruptIdleWorkers();
1704     }
1705 
1706     /**
1707      * Returns the thread keep-alive time, which is the amount of time
1708      * that threads may remain idle before being terminated.
1709      * Threads that wait this amount of time without processing a
1710      * task will be terminated if there are more than the core
1711      * number of threads currently in the pool, or if this pool
1712      * {@linkplain #allowsCoreThreadTimeOut() allows core thread timeout}.
1713      *
1714      * @param unit the desired time unit of the result
1715      * @return the time limit
1716      * @see #setKeepAliveTime(long, TimeUnit)
1717      */
1718     long getKeepAliveTime() {
1719         // return unit.convert(keepAliveTime, TimeUnit.NANOSECONDS);
1720         return keepAliveTime;
1721     }
1722 
1723     /* User-level queue utilities */
1724 
1725     /**
1726      * Returns the task queue used by this executor. Access to the
1727      * task queue is intended primarily for debugging and monitoring.
1728      * This queue may be in active use.  Retrieving the task queue
1729      * does not prevent queued tasks from executing.
1730      *
1731      * @return the task queue
1732      */
1733     BlockingQueue!(Runnable) getQueue() {
1734         return workQueue;
1735     }
1736 
1737     /**
1738      * Removes this task from the executor's internal queue if it is
1739      * present, thus causing it not to be run if it has not already
1740      * started.
1741      *
1742      * <p>This method may be useful as one part of a cancellation
1743      * scheme.  It may fail to remove tasks that have been converted
1744      * into other forms before being placed on the internal queue.
1745      * For example, a task entered using {@code submit} might be
1746      * converted into a form that maintains {@code Future} status.
1747      * However, in such cases, method {@link #purge} may be used to
1748      * remove those Futures that have been cancelled.
1749      *
1750      * @param task the task to remove
1751      * @return {@code true} if the task was removed
1752      */
1753     bool remove(Runnable task) {
1754         bool removed = workQueue.remove(task);
1755         tryTerminate(); // In case SHUTDOWN and now empty
1756         return removed;
1757     }
1758 
1759     /**
1760      * Tries to remove from the work queue all {@link Future}
1761      * tasks that have been cancelled. This method can be useful as a
1762      * storage reclamation operation, that has no other impact on
1763      * functionality. Cancelled tasks are never executed, but may
1764      * accumulate in work queues until worker threads can actively
1765      * remove them. Invoking this method instead tries to remove them now.
1766      * However, this method may fail to remove tasks in
1767      * the presence of interference by other threads.
1768      */
1769     void purge() {
1770         BlockingQueue!(Runnable) q = workQueue;
1771         try {
1772             foreach(Runnable r; q) {
1773                 Future!Runnable f = cast(Future!Runnable) r;
1774                 if(f !is null && f.isCancelled())
1775                     q.remove(r);
1776             }
1777             // Iterator!(Runnable) it = q.iterator();
1778             // while (it.hasNext()) {
1779             //     Runnable r = it.next();
1780             //     if (r instanceof Future<?> && ((Future<?>)r).isCancelled())
1781             //         it.remove();
1782             // }
1783         } catch (ConcurrentModificationException fallThrough) {
1784             // Take slow path if we encounter interference during traversal.
1785             // Make copy for traversal and call remove for cancelled entries.
1786             // The slow path is more likely to be O(N*N).
1787             foreach (Runnable r ; q.toArray()) {
1788                 Future!Runnable f = cast(Future!Runnable) r;
1789                 if(f !is null && f.isCancelled())
1790                     q.remove(r);
1791             }
1792         }
1793 
1794         tryTerminate(); // In case SHUTDOWN and now empty
1795     }
1796 
1797     /* Statistics */
1798 
1799     /**
1800      * Returns the current number of threads in the pool.
1801      *
1802      * @return the number of threads
1803      */
1804     int getPoolSize() {
1805         Mutex mainLock = this.mainLock;
1806         mainLock.lock();
1807         try {
1808             // Remove rare and surprising possibility of
1809             // isTerminated() && getPoolSize() > 0
1810             return runStateAtLeast(ctl, TIDYING) ? 0 : workers.size();
1811         } finally {
1812             mainLock.unlock();
1813         }
1814     }
1815 
1816     /**
1817      * Returns the approximate number of threads that are actively
1818      * executing tasks.
1819      *
1820      * @return the number of threads
1821      */
1822     int getActiveCount() {
1823         Mutex mainLock = this.mainLock;
1824         mainLock.lock();
1825         try {
1826             int n = 0;
1827             foreach (Worker w ; workers)
1828                 if (w.isLocked()) ++n;
1829             return n;
1830         } finally {
1831             mainLock.unlock();
1832         }
1833     }
1834 
1835     /**
1836      * Returns the largest number of threads that have ever
1837      * simultaneously been in the pool.
1838      *
1839      * @return the number of threads
1840      */
1841     int getLargestPoolSize() {
1842         Mutex mainLock = this.mainLock;
1843         mainLock.lock();
1844         try {
1845             return largestPoolSize;
1846         } finally {
1847             mainLock.unlock();
1848         }
1849     }
1850 
1851     /**
1852      * Returns the approximate total number of tasks that have ever been
1853      * scheduled for execution. Because the states of tasks and
1854      * threads may change dynamically during computation, the returned
1855      * value is only an approximation.
1856      *
1857      * @return the number of tasks
1858      */
1859     long getTaskCount() {
1860         Mutex mainLock = this.mainLock;
1861         mainLock.lock();
1862         try {
1863             long n = completedTaskCount;
1864             foreach (Worker w ; workers) {
1865                 n += w.completedTasks;
1866                 if (w.isLocked())
1867                     ++n;
1868             }
1869             return n + workQueue.size();
1870         } finally {
1871             mainLock.unlock();
1872         }
1873     }
1874 
1875     /**
1876      * Returns the approximate total number of tasks that have
1877      * completed execution. Because the states of tasks and threads
1878      * may change dynamically during computation, the returned value
1879      * is only an approximation, but one that does not ever decrease
1880      * across successive calls.
1881      *
1882      * @return the number of tasks
1883      */
1884     long getCompletedTaskCount() {
1885         Mutex mainLock = this.mainLock;
1886         mainLock.lock();
1887         try {
1888             long n = completedTaskCount;
1889             foreach (Worker w ; workers)
1890                 n += w.completedTasks;
1891             return n;
1892         } finally {
1893             mainLock.unlock();
1894         }
1895     }
1896 
1897     /**
1898      * Returns a string identifying this pool, as well as its state,
1899      * including indications of run state and estimated worker and
1900      * task counts.
1901      *
1902      * @return a string identifying this pool, as well as its state
1903      */
1904     override string toString() {
1905         long ncompleted;
1906         int nworkers, nactive;
1907         Mutex mainLock = this.mainLock;
1908         mainLock.lock();
1909         try {
1910             ncompleted = completedTaskCount;
1911             nactive = 0;
1912             nworkers = workers.size();
1913             foreach (Worker w ; workers) {
1914                 ncompleted += w.completedTasks;
1915                 if (w.isLocked())
1916                     ++nactive;
1917             }
1918         } finally {
1919             mainLock.unlock();
1920         }
1921         int c = ctl;
1922         string runState =
1923             isRunning(c) ? "Running" :
1924             runStateAtLeast(c, TERMINATED) ? "Terminated" :
1925             "Shutting down";
1926         return super.toString() ~
1927             "[" ~ runState ~
1928             ", pool size = " ~ nworkers.to!string() ~
1929             ", active threads = " ~ nactive.to!string() ~
1930             ", queued tasks = " ~ to!string(workQueue.size()) ~
1931             ", completed tasks = " ~ ncompleted.to!string() ~
1932             "]";
1933     }
1934 
1935     /* Extension hooks */
1936 
1937     /**
1938      * Method invoked prior to executing the given Runnable in the
1939      * given thread.  This method is invoked by thread {@code t} that
1940      * will execute task {@code r}, and may be used to re-initialize
1941      * ThreadLocals, or to perform logging.
1942      *
1943      * <p>This implementation does nothing, but may be customized in
1944      * subclasses. Note: To properly nest multiple overridings, subclasses
1945      * should generally invoke {@code super.beforeExecute} at the end of
1946      * this method.
1947      *
1948      * @param t the thread that will run task {@code r}
1949      * @param r the task that will be executed
1950      */
1951     protected void beforeExecute(Thread t, Runnable r) { }
1952 
1953     /**
1954      * Method invoked upon completion of execution of the given Runnable.
1955      * This method is invoked by the thread that executed the task. If
1956      * non-null, the Throwable is the uncaught {@code RuntimeException}
1957      * or {@code Error} that caused execution to terminate abruptly.
1958      *
1959      * <p>This implementation does nothing, but may be customized in
1960      * subclasses. Note: To properly nest multiple overridings, subclasses
1961      * should generally invoke {@code super.afterExecute} at the
1962      * beginning of this method.
1963      *
1964      * <p><b>Note:</b> When actions are enclosed in tasks (such as
1965      * {@link FutureTask}) either explicitly or via methods such as
1966      * {@code submit}, these task objects catch and maintain
1967      * computational exceptions, and so they do not cause abrupt
1968      * termination, and the internal exceptions are <em>not</em>
1969      * passed to this method. If you would like to trap both kinds of
1970      * failures in this method, you can further probe for such cases,
1971      * as in this sample subclass that prints either the direct cause
1972      * or the underlying exception if a task has been aborted:
1973      *
1974      * <pre> {@code
1975      * class ExtendedExecutor : ThreadPoolExecutor {
1976      *   // ...
1977      *   protected void afterExecute(Runnable r, Throwable t) {
1978      *     super.afterExecute(r, t);
1979      *     if (t is null
1980      *         && r instanceof Future<?>
1981      *         && ((Future<?>)r).isDone()) {
1982      *       try {
1983      *         Object result = ((Future<?>) r).get();
1984      *       } catch (CancellationException ce) {
1985      *         t = ce;
1986      *       } catch (ExecutionException ee) {
1987      *         t = ee.getCause();
1988      *       } catch (InterruptedException ie) {
1989      *         // ignore/reset
1990      *         Thread.getThis().interrupt();
1991      *       }
1992      *     }
1993      *     if (t !is null)
1994      *       System.out.println(t);
1995      *   }
1996      * }}</pre>
1997      *
1998      * @param r the runnable that has completed
1999      * @param t the exception that caused termination, or null if
2000      * execution completed normally
2001      */
2002     protected void afterExecute(Runnable r, Throwable t) { }
2003 
2004     /**
2005      * Method invoked when the Executor has terminated.  Default
2006      * implementation does nothing. Note: To properly nest multiple
2007      * overridings, subclasses should generally invoke
2008      * {@code super.terminated} within this method.
2009      */
2010     protected void terminated() { }
2011 }
2012 
2013 
2014 /**
2015  * A handler for tasks that cannot be executed by a {@link ThreadPoolExecutor}.
2016  *
2017  * @author Doug Lea
2018  */
2019 interface RejectedExecutionHandler {
2020 
2021     /**
2022      * Method that may be invoked by a {@link ThreadPoolExecutor} when
2023      * {@link ThreadPoolExecutor#execute execute} cannot accept a
2024      * task.  This may occur when no more threads or queue slots are
2025      * available because their bounds would be exceeded, or upon
2026      * shutdown of the Executor.
2027      *
2028      * <p>In the absence of other alternatives, the method may throw
2029      * an unchecked {@link RejectedExecutionException}, which will be
2030      * propagated to the caller of {@code execute}.
2031      *
2032      * @param r the runnable task requested to be executed
2033      * @param executor the executor attempting to execute this task
2034      * @throws RejectedExecutionException if there is no remedy
2035      */
2036     void rejectedExecution(Runnable r, ThreadPoolExecutor executor);
2037 }
2038 
2039 /* Predefined RejectedExecutionHandlers */
2040 
2041 /**
2042  * A handler for rejected tasks that runs the rejected task
2043  * directly in the calling thread of the {@code execute} method,
2044  * unless the executor has been shut down, in which case the task
2045  * is discarded.
2046  */
2047 class CallerRunsPolicy : RejectedExecutionHandler {
2048     /**
2049      * Creates a {@code CallerRunsPolicy}.
2050      */
2051     this() { }
2052 
2053     /**
2054      * Executes task r in the caller's thread, unless the executor
2055      * has been shut down, in which case the task is discarded.
2056      *
2057      * @param r the runnable task requested to be executed
2058      * @param e the executor attempting to execute this task
2059      */
2060     void rejectedExecution(Runnable r, ThreadPoolExecutor e) {
2061         if (!e.isShutdown()) {
2062             r.run();
2063         }
2064     }
2065 }
2066 
2067 /**
2068  * A handler for rejected tasks that throws a
2069  * {@link RejectedExecutionException}.
2070  *
2071  * This is the default handler for {@link ThreadPoolExecutor} and
2072  * {@link ScheduledThreadPoolExecutor}.
2073  */
2074 class AbortPolicy : RejectedExecutionHandler {
2075     /**
2076      * Creates an {@code AbortPolicy}.
2077      */
2078     this() { }
2079 
2080     /**
2081      * Always throws RejectedExecutionException.
2082      *
2083      * @param r the runnable task requested to be executed
2084      * @param e the executor attempting to execute this task
2085      * @throws RejectedExecutionException always
2086      */
2087     void rejectedExecution(Runnable r, ThreadPoolExecutor e) {
2088         throw new RejectedExecutionException("Task " ~ (cast(Object)r).toString() ~
2089                                              " rejected from " ~
2090                                              e.toString());
2091     }
2092 }
2093 
2094 /**
2095  * A handler for rejected tasks that silently discards the
2096  * rejected task.
2097  */
2098 class DiscardPolicy : RejectedExecutionHandler {
2099     /**
2100      * Creates a {@code DiscardPolicy}.
2101      */
2102     this() { }
2103 
2104     /**
2105      * Does nothing, which has the effect of discarding task r.
2106      *
2107      * @param r the runnable task requested to be executed
2108      * @param e the executor attempting to execute this task
2109      */
2110     void rejectedExecution(Runnable r, ThreadPoolExecutor e) {
2111     }
2112 }
2113 
2114 /**
2115 * A handler for rejected tasks that discards the oldest unhandled
2116 * request and then retries {@code execute}, unless the executor
2117 * is shut down, in which case the task is discarded.
2118 */
2119 class DiscardOldestPolicy : RejectedExecutionHandler {
2120     /**
2121     * Creates a {@code DiscardOldestPolicy} for the given executor.
2122     */
2123     this() { }
2124 
2125     /**
2126     * Obtains and ignores the next task that the executor
2127     * would otherwise execute, if one is immediately available,
2128     * and then retries execution of task r, unless the executor
2129     * is shut down, in which case task r is instead discarded.
2130     *
2131     * @param r the runnable task requested to be executed
2132     * @param e the executor attempting to execute this task
2133     */
2134     void rejectedExecution(Runnable r, ThreadPoolExecutor e) {
2135         if (!e.isShutdown()) {
2136             e.getQueue().poll();
2137             e.execute(r);
2138         }
2139     }
2140 }