1 /* 2 * Hunt - A refined core library for D programming language. 3 * 4 * Copyright (C) 2018-2019 HuntLabs 5 * 6 * Website: https://www.huntlabs.net/ 7 * 8 * Licensed under the Apache-2.0 License. 9 * 10 */ 11 12 module hunt.concurrency.ThreadPoolExecutor; 13 14 import hunt.concurrency.AbstractExecutorService; 15 import hunt.concurrency.AbstractOwnableSynchronizer; 16 import hunt.concurrency.AbstractQueuedSynchronizer; 17 import hunt.concurrency.atomic.AtomicHelper; 18 import hunt.concurrency.BlockingQueue; 19 import hunt.concurrency.Exceptions; 20 import hunt.concurrency.ExecutorService; 21 import hunt.concurrency.Executor; 22 import hunt.concurrency.Future; 23 import hunt.concurrency.thread.ThreadEx; 24 import hunt.concurrency.ThreadFactory; 25 26 import hunt.collection; 27 import hunt.Exceptions; 28 import hunt.Functions; 29 import hunt.Integer; 30 import hunt.util.DateTime; 31 import hunt.util.Common; 32 33 import core.sync.mutex; 34 import core.sync.condition; 35 import core.thread; 36 import std.algorithm; 37 import std.conv; 38 39 import hunt.logging.ConsoleLogger; 40 41 // import hunt.collection.ArrayList; 42 // import java.util.ConcurrentModificationException; 43 // import java.util.HashSet; 44 // import java.util.List; 45 // import hunt.concurrency.locks.AbstractQueuedSynchronizer; 46 // import hunt.concurrency.locks.Mutex; 47 48 /** 49 * An {@link ExecutorService} that executes each submitted task using 50 * one of possibly several pooled threads, normally configured 51 * using {@link Executors} factory methods. 52 * 53 * <p>Thread pools address two different problems: they usually 54 * provide improved performance when executing large numbers of 55 * asynchronous tasks, due to reduced per-task invocation overhead, 56 * and they provide a means of bounding and managing the resources, 57 * including threads, consumed when executing a collection of tasks. 58 * Each {@code ThreadPoolExecutor} also maintains some basic 59 * statistics, such as the number of completed tasks. 60 * 61 * <p>To be useful across a wide range of contexts, this class 62 * provides many adjustable parameters and extensibility 63 * hooks. However, programmers are urged to use the more convenient 64 * {@link Executors} factory methods {@link 65 * Executors#newCachedThreadPool} (unbounded thread pool, with 66 * automatic thread reclamation), {@link Executors#newFixedThreadPool} 67 * (fixed size thread pool) and {@link 68 * Executors#newSingleThreadExecutor} (single background thread), that 69 * preconfigure settings for the most common usage 70 * scenarios. Otherwise, use the following guide when manually 71 * configuring and tuning this class: 72 * 73 * <dl> 74 * 75 * <dt>Core and maximum pool sizes</dt> 76 * 77 * <dd>A {@code ThreadPoolExecutor} will automatically adjust the 78 * pool size (see {@link #getPoolSize}) 79 * according to the bounds set by 80 * corePoolSize (see {@link #getCorePoolSize}) and 81 * maximumPoolSize (see {@link #getMaximumPoolSize}). 82 * 83 * When a new task is submitted in method {@link #execute(Runnable)}, 84 * if fewer than corePoolSize threads are running, a new thread is 85 * created to handle the request, even if other worker threads are 86 * idle. Else if fewer than maximumPoolSize threads are running, a 87 * new thread will be created to handle the request only if the queue 88 * is full. By setting corePoolSize and maximumPoolSize the same, you 89 * create a fixed-size thread pool. By setting maximumPoolSize to an 90 * essentially unbounded value such as {@code Integer.MAX_VALUE}, you 91 * allow the pool to accommodate an arbitrary number of concurrent 92 * tasks. Most typically, core and maximum pool sizes are set only 93 * upon construction, but they may also be changed dynamically using 94 * {@link #setCorePoolSize} and {@link #setMaximumPoolSize}. </dd> 95 * 96 * <dt>On-demand construction</dt> 97 * 98 * <dd>By default, even core threads are initially created and 99 * started only when new tasks arrive, but this can be overridden 100 * dynamically using method {@link #prestartCoreThread} or {@link 101 * #prestartAllCoreThreads}. You probably want to prestart threads if 102 * you construct the pool with a non-empty queue. </dd> 103 * 104 * <dt>Creating new threads</dt> 105 * 106 * <dd>New threads are created using a {@link ThreadFactory}. If not 107 * otherwise specified, a {@link Executors#defaultThreadFactory} is 108 * used, that creates threads to all be in the same {@link 109 * ThreadGroupEx} and with the same {@code NORM_PRIORITY} priority and 110 * non-daemon status. By supplying a different ThreadFactory, you can 111 * alter the thread's name, thread group, priority, daemon status, 112 * etc. If a {@code ThreadFactory} fails to create a thread when asked 113 * by returning null from {@code newThread}, the executor will 114 * continue, but might not be able to execute any tasks. Threads 115 * should possess the "modifyThread" {@code RuntimePermission}. If 116 * worker threads or other threads using the pool do not possess this 117 * permission, service may be degraded: configuration changes may not 118 * take effect in a timely manner, and a shutdown pool may remain in a 119 * state in which termination is possible but not completed.</dd> 120 * 121 * <dt>Keep-alive times</dt> 122 * 123 * <dd>If the pool currently has more than corePoolSize threads, 124 * excess threads will be terminated if they have been idle for more 125 * than the keepAliveTime (see {@link #getKeepAliveTime(TimeUnit)}). 126 * This provides a means of reducing resource consumption when the 127 * pool is not being actively used. If the pool becomes more active 128 * later, new threads will be constructed. This parameter can also be 129 * changed dynamically using method {@link #setKeepAliveTime(long, 130 * TimeUnit)}. Using a value of {@code Long.MAX_VALUE} {@link 131 * TimeUnit#NANOSECONDS} effectively disables idle threads from ever 132 * terminating prior to shut down. By default, the keep-alive policy 133 * applies only when there are more than corePoolSize threads, but 134 * method {@link #allowCoreThreadTimeOut(bool)} can be used to 135 * apply this time-out policy to core threads as well, so long as the 136 * keepAliveTime value is non-zero. </dd> 137 * 138 * <dt>Queuing</dt> 139 * 140 * <dd>Any {@link BlockingQueue} may be used to transfer and hold 141 * submitted tasks. The use of this queue interacts with pool sizing: 142 * 143 * <ul> 144 * 145 * <li>If fewer than corePoolSize threads are running, the Executor 146 * always prefers adding a new thread 147 * rather than queuing. 148 * 149 * <li>If corePoolSize or more threads are running, the Executor 150 * always prefers queuing a request rather than adding a new 151 * thread. 152 * 153 * <li>If a request cannot be queued, a new thread is created unless 154 * this would exceed maximumPoolSize, in which case, the task will be 155 * rejected. 156 * 157 * </ul> 158 * 159 * There are three general strategies for queuing: 160 * <ol> 161 * 162 * <li><em> Direct handoffs.</em> A good default choice for a work 163 * queue is a {@link SynchronousQueue} that hands off tasks to threads 164 * without otherwise holding them. Here, an attempt to queue a task 165 * will fail if no threads are immediately available to run it, so a 166 * new thread will be constructed. This policy avoids lockups when 167 * handling sets of requests that might have internal dependencies. 168 * Direct handoffs generally require unbounded maximumPoolSizes to 169 * avoid rejection of new submitted tasks. This in turn admits the 170 * possibility of unbounded thread growth when commands continue to 171 * arrive on average faster than they can be processed. 172 * 173 * <li><em> Unbounded queues.</em> Using an unbounded queue (for 174 * example a {@link LinkedBlockingQueue} without a predefined 175 * capacity) will cause new tasks to wait in the queue when all 176 * corePoolSize threads are busy. Thus, no more than corePoolSize 177 * threads will ever be created. (And the value of the maximumPoolSize 178 * therefore doesn't have any effect.) This may be appropriate when 179 * each task is completely independent of others, so tasks cannot 180 * affect each others execution; for example, in a web page server. 181 * While this style of queuing can be useful in smoothing out 182 * bursts of requests, it admits the possibility of 183 * unbounded work queue growth when commands continue to arrive on 184 * average faster than they can be processed. 185 * 186 * <li><em>Bounded queues.</em> A bounded queue (for example, an 187 * {@link ArrayBlockingQueue}) helps prevent resource exhaustion when 188 * used with finite maximumPoolSizes, but can be more difficult to 189 * tune and control. Queue sizes and maximum pool sizes may be traded 190 * off for each other: Using large queues and small pools minimizes 191 * CPU usage, OS resources, and context-switching overhead, but can 192 * lead to artificially low throughput. If tasks frequently block (for 193 * example if they are I/O bound), a system may be able to schedule 194 * time for more threads than you otherwise allow. Use of small queues 195 * generally requires larger pool sizes, which keeps CPUs busier but 196 * may encounter unacceptable scheduling overhead, which also 197 * decreases throughput. 198 * 199 * </ol> 200 * 201 * </dd> 202 * 203 * <dt>Rejected tasks</dt> 204 * 205 * <dd>New tasks submitted in method {@link #execute(Runnable)} will be 206 * <em>rejected</em> when the Executor has been shut down, and also when 207 * the Executor uses finite bounds for both maximum threads and work queue 208 * capacity, and is saturated. In either case, the {@code execute} method 209 * invokes the {@link 210 * RejectedExecutionHandler#rejectedExecution(Runnable, ThreadPoolExecutor)} 211 * method of its {@link RejectedExecutionHandler}. Four predefined handler 212 * policies are provided: 213 * 214 * <ol> 215 * 216 * <li>In the default {@link ThreadPoolExecutor.AbortPolicy}, the handler 217 * throws a runtime {@link RejectedExecutionException} upon rejection. 218 * 219 * <li>In {@link ThreadPoolExecutor.CallerRunsPolicy}, the thread 220 * that invokes {@code execute} itself runs the task. This provides a 221 * simple feedback control mechanism that will slow down the rate that 222 * new tasks are submitted. 223 * 224 * <li>In {@link ThreadPoolExecutor.DiscardPolicy}, a task that 225 * cannot be executed is simply dropped. 226 * 227 * <li>In {@link ThreadPoolExecutor.DiscardOldestPolicy}, if the 228 * executor is not shut down, the task at the head of the work queue 229 * is dropped, and then execution is retried (which can fail again, 230 * causing this to be repeated.) 231 * 232 * </ol> 233 * 234 * It is possible to define and use other kinds of {@link 235 * RejectedExecutionHandler} classes. Doing so requires some care 236 * especially when policies are designed to work only under particular 237 * capacity or queuing policies. </dd> 238 * 239 * <dt>Hook methods</dt> 240 * 241 * <dd>This class provides {@code protected} overridable 242 * {@link #beforeExecute(Thread, Runnable)} and 243 * {@link #afterExecute(Runnable, Throwable)} methods that are called 244 * before and after execution of each task. These can be used to 245 * manipulate the execution environment; for example, reinitializing 246 * ThreadLocals, gathering statistics, or adding log entries. 247 * Additionally, method {@link #terminated} can be overridden to perform 248 * any special processing that needs to be done once the Executor has 249 * fully terminated. 250 * 251 * <p>If hook, callback, or BlockingQueue methods throw exceptions, 252 * internal worker threads may in turn fail, abruptly terminate, and 253 * possibly be replaced.</dd> 254 * 255 * <dt>Queue maintenance</dt> 256 * 257 * <dd>Method {@link #getQueue()} allows access to the work queue 258 * for purposes of monitoring and debugging. Use of this method for 259 * any other purpose is strongly discouraged. Two supplied methods, 260 * {@link #remove(Runnable)} and {@link #purge} are available to 261 * assist in storage reclamation when large numbers of queued tasks 262 * become cancelled.</dd> 263 * 264 * <dt>Reclamation</dt> 265 * 266 * <dd>A pool that is no longer referenced in a program <em>AND</em> 267 * has no remaining threads may be reclaimed (garbage collected) 268 * without being explicitly shutdown. You can configure a pool to 269 * allow all unused threads to eventually die by setting appropriate 270 * keep-alive times, using a lower bound of zero core threads and/or 271 * setting {@link #allowCoreThreadTimeOut(bool)}. </dd> 272 * 273 * </dl> 274 * 275 * <p><b>Extension example</b>. Most extensions of this class 276 * override one or more of the protected hook methods. For example, 277 * here is a subclass that adds a simple pause/resume feature: 278 * 279 * <pre> {@code 280 * class PausableThreadPoolExecutor : ThreadPoolExecutor { 281 * private bool isPaused; 282 * private Mutex pauseLock = new Mutex(); 283 * private Condition unpaused = pauseLock.newCondition(); 284 * 285 * PausableThreadPoolExecutor(...) { super(...); } 286 * 287 * protected void beforeExecute(Thread t, Runnable r) { 288 * super.beforeExecute(t, r); 289 * pauseLock.lock(); 290 * try { 291 * while (isPaused) unpaused.await(); 292 * } catch (InterruptedException ie) { 293 * t.interrupt(); 294 * } finally { 295 * pauseLock.unlock(); 296 * } 297 * } 298 * 299 * void pause() { 300 * pauseLock.lock(); 301 * try { 302 * isPaused = true; 303 * } finally { 304 * pauseLock.unlock(); 305 * } 306 * } 307 * 308 * void resume() { 309 * pauseLock.lock(); 310 * try { 311 * isPaused = false; 312 * unpaused.notifyAll(); 313 * } finally { 314 * pauseLock.unlock(); 315 * } 316 * } 317 * }}</pre> 318 * 319 * @since 1.5 320 * @author Doug Lea 321 */ 322 class ThreadPoolExecutor : AbstractExecutorService { 323 /** 324 * The main pool control state, ctl, is an atomic integer packing 325 * two conceptual fields 326 * workerCount, indicating the effective number of threads 327 * runState, indicating whether running, shutting down etc 328 * 329 * In order to pack them into one int, we limit workerCount to 330 * (2^29)-1 (about 500 million) threads rather than (2^31)-1 (2 331 * billion) otherwise representable. If this is ever an issue in 332 * the future, the variable can be changed to be an AtomicLong, 333 * and the shift/mask constants below adjusted. But until the need 334 * arises, this code is a bit faster and simpler using an int. 335 * 336 * The workerCount is the number of workers that have been 337 * permitted to start and not permitted to stop. The value may be 338 * transiently different from the actual number of live threads, 339 * for example when a ThreadFactory fails to create a thread when 340 * asked, and when exiting threads are still performing 341 * bookkeeping before terminating. The user-visible pool size is 342 * reported as the current size of the workers set. 343 * 344 * The runState provides the main lifecycle control, taking on values: 345 * 346 * RUNNING: Accept new tasks and process queued tasks 347 * SHUTDOWN: Don't accept new tasks, but process queued tasks 348 * STOP: Don't accept new tasks, don't process queued tasks, 349 * and interrupt in-progress tasks 350 * TIDYING: All tasks have terminated, workerCount is zero, 351 * the thread transitioning to state TIDYING 352 * will run the terminated() hook method 353 * TERMINATED: terminated() has completed 354 * 355 * The numerical order among these values matters, to allow 356 * ordered comparisons. The runState monotonically increases over 357 * time, but need not hit each state. The transitions are: 358 * 359 * RUNNING -> SHUTDOWN 360 * On invocation of shutdown() 361 * (RUNNING or SHUTDOWN) -> STOP 362 * On invocation of shutdownNow() 363 * SHUTDOWN -> TIDYING 364 * When both queue and pool are empty 365 * STOP -> TIDYING 366 * When pool is empty 367 * TIDYING -> TERMINATED 368 * When the terminated() hook method has completed 369 * 370 * Threads waiting in awaitTermination() will return when the 371 * state reaches TERMINATED. 372 * 373 * Detecting the transition from SHUTDOWN to TIDYING is less 374 * straightforward than you'd like because the queue may become 375 * empty after non-empty and vice versa during SHUTDOWN state, but 376 * we can only terminate if, after seeing that it is empty, we see 377 * that workerCount is 0 (which sometimes entails a recheck -- see 378 * below). 379 */ 380 private shared(int) ctl; // = new AtomicInteger(ctlOf(RUNNING, 0)); 381 private enum int COUNT_BITS = Integer.SIZE - 3; 382 private enum int COUNT_MASK = (1 << COUNT_BITS) - 1; 383 384 // runState is stored in the high-order bits 385 private enum int RUNNING = -1 << COUNT_BITS; 386 private enum int SHUTDOWN = 0 << COUNT_BITS; 387 private enum int STOP = 1 << COUNT_BITS; 388 private enum int TIDYING = 2 << COUNT_BITS; 389 private enum int TERMINATED = 3 << COUNT_BITS; 390 391 /** 392 * The queue used for holding tasks and handing off to worker 393 * threads. We do not require that workQueue.poll() returning 394 * null necessarily means that workQueue.isEmpty(), so rely 395 * solely on isEmpty to see if the queue is empty (which we must 396 * do for example when deciding whether to transition from 397 * SHUTDOWN to TIDYING). This accommodates special-purpose 398 * queues such as DelayQueues for which poll() is allowed to 399 * return null even if it may later return non-null when delays 400 * expire. 401 */ 402 private BlockingQueue!(Runnable) workQueue; 403 404 /** 405 * Lock held on access to workers set and related bookkeeping. 406 * While we could use a concurrent set of some sort, it turns out 407 * to be generally preferable to use a lock. Among the reasons is 408 * that this serializes interruptIdleWorkers, which avoids 409 * unnecessary interrupt storms, especially during shutdown. 410 * Otherwise exiting threads would concurrently interrupt those 411 * that have not yet interrupted. It also simplifies some of the 412 * associated statistics bookkeeping of largestPoolSize etc. We 413 * also hold mainLock on shutdown and shutdownNow, for the sake of 414 * ensuring workers set is stable while separately checking 415 * permission to interrupt and actually interrupting. 416 */ 417 private Mutex mainLock; 418 419 /** 420 * Set containing all worker threads in pool. Accessed only when 421 * holding mainLock. 422 */ 423 private HashSet!(Worker) workers; 424 425 /** 426 * Wait condition to support awaitTermination. 427 */ 428 private Condition termination; 429 430 /** 431 * Tracks largest attained pool size. Accessed only under 432 * mainLock. 433 */ 434 private int largestPoolSize; 435 436 /** 437 * Counter for completed tasks. Updated only on termination of 438 * worker threads. Accessed only under mainLock. 439 */ 440 private long completedTaskCount; 441 442 /* 443 * All user control parameters are declared as volatiles so that 444 * ongoing actions are based on freshest values, but without need 445 * for locking, since no internal invariants depend on them 446 * changing synchronously with respect to other actions. 447 */ 448 449 /** 450 * Factory for new threads. All threads are created using this 451 * factory (via method addWorker). All callers must be prepared 452 * for addWorker to fail, which may reflect a system or user's 453 * policy limiting the number of threads. Even though it is not 454 * treated as an error, failure to create threads may result in 455 * new tasks being rejected or existing ones remaining stuck in 456 * the queue. 457 * 458 * We go further and preserve pool invariants even in the face of 459 * errors such as OutOfMemoryError, that might be thrown while 460 * trying to create threads. Such errors are rather common due to 461 * the need to allocate a native stack in Thread.start, and users 462 * will want to perform clean pool shutdown to clean up. There 463 * will likely be enough memory available for the cleanup code to 464 * complete without encountering yet another OutOfMemoryError. 465 */ 466 private ThreadFactory threadFactory; 467 468 /** 469 * Handler called when saturated or shutdown in execute. 470 */ 471 private RejectedExecutionHandler handler; 472 473 /** 474 * Timeout in nanoseconds for idle threads waiting for work. 475 * Threads use this timeout when there are more than corePoolSize 476 * present or if allowCoreThreadTimeOut. Otherwise they wait 477 * forever for new work. 478 */ 479 private long keepAliveTime; 480 481 /** 482 * If false (default), core threads stay alive even when idle. 483 * If true, core threads use keepAliveTime to time out waiting 484 * for work. 485 */ 486 private bool _allowCoreThreadTimeOut; 487 488 /** 489 * Core pool size is the minimum number of workers to keep alive 490 * (and not allow to time out etc) unless allowCoreThreadTimeOut 491 * is set, in which case the minimum is zero. 492 * 493 * Since the worker count is actually stored in COUNT_BITS bits, 494 * the effective limit is {@code corePoolSize & COUNT_MASK}. 495 */ 496 private int corePoolSize; 497 498 /** 499 * Maximum pool size. 500 * 501 * Since the worker count is actually stored in COUNT_BITS bits, 502 * the effective limit is {@code maximumPoolSize & COUNT_MASK}. 503 */ 504 private int maximumPoolSize; 505 506 /** 507 * Permission required for callers of shutdown and shutdownNow. 508 * We additionally require (see checkShutdownAccess) that callers 509 * have permission to actually interrupt threads in the worker set 510 * (as governed by Thread.interrupt, which relies on 511 * ThreadGroupEx.checkAccess, which in turn relies on 512 * SecurityManager.checkAccess). Shutdowns are attempted only if 513 * these checks pass. 514 * 515 * All actual invocations of Thread.interrupt (see 516 * interruptIdleWorkers and interruptWorkers) ignore 517 * SecurityExceptions, meaning that the attempted interrupts 518 * silently fail. In the case of shutdown, they should not fail 519 * unless the SecurityManager has inconsistent policies, sometimes 520 * allowing access to a thread and sometimes not. In such cases, 521 * failure to actually interrupt threads may disable or delay full 522 * termination. Other uses of interruptIdleWorkers are advisory, 523 * and failure to actually interrupt will merely delay response to 524 * configuration changes so is not handled exceptionally. 525 */ 526 // private __gshared RuntimePermission shutdownPerm = 527 // new RuntimePermission("modifyThread"); 528 529 530 /** 531 * The default rejected execution handler. 532 */ 533 private __gshared RejectedExecutionHandler defaultHandler; 534 535 536 shared static this() { 537 defaultHandler = new AbortPolicy(); 538 } 539 540 private void initialize() { 541 mainLock = new Mutex(); 542 termination = new Condition(mainLock); 543 ctl = ctlOf(RUNNING, 0); 544 workers = new HashSet!(Worker)(); 545 } 546 547 // Packing and unpacking ctl 548 private static int runStateOf(int c) { return c & ~COUNT_MASK; } 549 private static int workerCountOf(int c) { return c & COUNT_MASK; } 550 private static int ctlOf(int rs, int wc) { return rs | wc; } 551 552 /* 553 * Bit field accessors that don't require unpacking ctl. 554 * These depend on the bit layout and on workerCount being never negative. 555 */ 556 557 private static bool runStateLessThan(int c, int s) { 558 return c < s; 559 } 560 561 private static bool runStateAtLeast(int c, int s) { 562 return c >= s; 563 } 564 565 private static bool isRunning(int c) { 566 return c < SHUTDOWN; 567 } 568 569 /** 570 * Attempts to CAS-increment the workerCount field of ctl. 571 */ 572 private bool compareAndIncrementWorkerCount(int expect) { 573 return AtomicHelper.compareAndSet(ctl, expect, expect + 1); 574 } 575 576 /** 577 * Attempts to CAS-decrement the workerCount field of ctl. 578 */ 579 private bool compareAndDecrementWorkerCount(int expect) { 580 return AtomicHelper.compareAndSet(ctl, expect, expect - 1); 581 } 582 583 /** 584 * Decrements the workerCount field of ctl. This is called only on 585 * abrupt termination of a thread (see processWorkerExit). Other 586 * decrements are performed within getTask. 587 */ 588 private void decrementWorkerCount() { 589 AtomicHelper.decrement(ctl); 590 } 591 592 /** 593 * Class Worker mainly maintains interrupt control state for 594 * threads running tasks, along with other minor bookkeeping. 595 * This class opportunistically extends AbstractQueuedSynchronizer 596 * to simplify acquiring and releasing a lock surrounding each 597 * task execution. This protects against interrupts that are 598 * intended to wake up a worker thread waiting for a task from 599 * instead interrupting a task being run. We implement a simple 600 * non-reentrant mutual exclusion lock rather than use 601 * Mutex because we do not want worker tasks to be able to 602 * reacquire the lock when they invoke pool control methods like 603 * setCorePoolSize. Additionally, to suppress interrupts until 604 * the thread actually starts running tasks, we initialize lock 605 * state to a negative value, and clear it upon start (in 606 * runWorker). 607 */ 608 private final class Worker : AbstractQueuedSynchronizer, Runnable 609 { 610 /** Thread this worker is running in. Null if factory fails. */ 611 Thread thread; 612 /** Initial task to run. Possibly null. */ 613 Runnable firstTask; 614 /** Per-thread task counter */ 615 long completedTasks; 616 617 // TODO: switch to AbstractQueuedLongSynchronizer and move 618 // completedTasks into the lock word. 619 620 /** 621 * Creates with given first task and thread from ThreadFactory. 622 * @param firstTask the first task (null if none) 623 */ 624 this(Runnable firstTask) { 625 setState(-1); // inhibit interrupts until runWorker 626 this.firstTask = firstTask; 627 this.thread = getThreadFactory().newThread(&run); 628 } 629 630 /** Delegates main run loop to outer runWorker. */ 631 void run() { 632 runWorker(this); 633 } 634 635 // Lock methods 636 // 637 // The value 0 represents the unlocked state. 638 // The value 1 represents the locked state. 639 640 override protected bool isHeldExclusively() { 641 return getState() != 0; 642 } 643 644 override protected bool tryAcquire(int unused) { 645 if (compareAndSetState(0, 1)) { 646 setExclusiveOwnerThread(Thread.getThis()); 647 return true; 648 } 649 return false; 650 } 651 652 override protected bool tryRelease(int unused) { 653 setExclusiveOwnerThread(null); 654 setState(0); 655 return true; 656 } 657 658 void lock() { acquire(1); } 659 bool tryLock() { return tryAcquire(1); } 660 void unlock() { release(1); } 661 bool isLocked() { return isHeldExclusively(); } 662 663 void interruptIfStarted() { 664 implementationMissing(false); 665 // Thread t; 666 // if (getState() >= 0 && (t = thread) !is null && !t.isInterrupted()) { 667 // try { 668 // t.interrupt(); 669 // } catch (SecurityException ignore) { 670 // } 671 // } 672 } 673 } 674 675 /* 676 * Methods for setting control state 677 */ 678 679 /** 680 * Transitions runState to given target, or leaves it alone if 681 * already at least the given target. 682 * 683 * @param targetState the desired state, either SHUTDOWN or STOP 684 * (but not TIDYING or TERMINATED -- use tryTerminate for that) 685 */ 686 private void advanceRunState(int targetState) { 687 // assert targetState == SHUTDOWN || targetState == STOP; 688 for (;;) { 689 int c = ctl; 690 if (runStateAtLeast(c, targetState) || 691 AtomicHelper.compareAndSet(ctl, c, ctlOf(targetState, workerCountOf(c)))) 692 break; 693 } 694 } 695 696 /** 697 * Transitions to TERMINATED state if either (SHUTDOWN and pool 698 * and queue empty) or (STOP and pool empty). If otherwise 699 * eligible to terminate but workerCount is nonzero, interrupts an 700 * idle worker to ensure that shutdown signals propagate. This 701 * method must be called following any action that might make 702 * termination possible -- reducing worker count or removing tasks 703 * from the queue during shutdown. The method is non-private to 704 * allow access from ScheduledThreadPoolExecutor. 705 */ 706 final void tryTerminate() { 707 for (;;) { 708 int c = ctl; 709 if (isRunning(c) || 710 runStateAtLeast(c, TIDYING) || 711 (runStateLessThan(c, STOP) && ! workQueue.isEmpty())) 712 return; 713 if (workerCountOf(c) != 0) { // Eligible to terminate 714 interruptIdleWorkers(ONLY_ONE); 715 return; 716 } 717 718 Mutex mainLock = this.mainLock; 719 mainLock.lock(); 720 try { 721 if (AtomicHelper.compareAndSet(ctl, c, ctlOf(TIDYING, 0))) { 722 try { 723 terminated(); 724 } finally { 725 ctl = ctlOf(TERMINATED, 0); 726 termination.notifyAll(); 727 } 728 return; 729 } 730 } finally { 731 mainLock.unlock(); 732 } 733 // else retry on failed CAS 734 } 735 } 736 737 /* 738 * Methods for controlling interrupts to worker threads. 739 */ 740 741 /** 742 * If there is a security manager, makes sure caller has 743 * permission to shut down threads in general (see shutdownPerm). 744 * If this passes, additionally makes sure the caller is allowed 745 * to interrupt each worker thread. This might not be true even if 746 * first check passed, if the SecurityManager treats some threads 747 * specially. 748 */ 749 private void checkShutdownAccess() { 750 // FIXME: Needing refactor or cleanup -@zxp at 1/2/2019, 2:12:25 AM 751 // remove this 752 // debug implementationMissing(false); 753 // assert mainLock.isHeldByCurrentThread(); 754 // SecurityManager security = System.getSecurityManager(); 755 // if (security !is null) { 756 // security.checkPermission(shutdownPerm); 757 // for (Worker w : workers) 758 // security.checkAccess(w.thread); 759 // } 760 } 761 762 /** 763 * Interrupts all threads, even if active. Ignores SecurityExceptions 764 * (in which case some threads may remain uninterrupted). 765 */ 766 private void interruptWorkers() { 767 // assert mainLock.isHeldByCurrentThread(); 768 foreach (Worker w ; workers) 769 w.interruptIfStarted(); 770 } 771 772 /** 773 * Interrupts threads that might be waiting for tasks (as 774 * indicated by not being locked) so they can check for 775 * termination or configuration changes. Ignores 776 * SecurityExceptions (in which case some threads may remain 777 * uninterrupted). 778 * 779 * @param onlyOne If true, interrupt at most one worker. This is 780 * called only from tryTerminate when termination is otherwise 781 * enabled but there are still other workers. In this case, at 782 * most one waiting worker is interrupted to propagate shutdown 783 * signals in case all threads are currently waiting. 784 * Interrupting any arbitrary thread ensures that newly arriving 785 * workers since shutdown began will also eventually exit. 786 * To guarantee eventual termination, it suffices to always 787 * interrupt only one idle worker, but shutdown() interrupts all 788 * idle workers so that redundant workers exit promptly, not 789 * waiting for a straggler task to finish. 790 */ 791 private void interruptIdleWorkers(bool onlyOne) { 792 Mutex mainLock = this.mainLock; 793 mainLock.lock(); 794 try { 795 foreach(Worker w ; workers) { 796 Thread t = w.thread; 797 implementationMissing(false); 798 // if (!t.isInterrupted() && w.tryLock()) { 799 // try { 800 // t.interrupt(); 801 // } catch (SecurityException ignore) { 802 // } finally { 803 // w.unlock(); 804 // } 805 // } 806 if (onlyOne) 807 break; 808 } 809 } finally { 810 mainLock.unlock(); 811 } 812 } 813 814 /** 815 * Common form of interruptIdleWorkers, to avoid having to 816 * remember what the bool argument means. 817 */ 818 private void interruptIdleWorkers() { 819 interruptIdleWorkers(false); 820 } 821 822 private enum bool ONLY_ONE = true; 823 824 /* 825 * Misc utilities, most of which are also exported to 826 * ScheduledThreadPoolExecutor 827 */ 828 829 /** 830 * Invokes the rejected execution handler for the given command. 831 * Package-protected for use by ScheduledThreadPoolExecutor. 832 */ 833 final void reject(Runnable command) { 834 handler.rejectedExecution(command, this); 835 } 836 837 /** 838 * Performs any further cleanup following run state transition on 839 * invocation of shutdown. A no-op here, but used by 840 * ScheduledThreadPoolExecutor to cancel delayed tasks. 841 */ 842 void onShutdown() { 843 } 844 845 /** 846 * Drains the task queue into a new list, normally using 847 * drainTo. But if the queue is a DelayQueue or any other kind of 848 * queue for which poll or drainTo may fail to remove some 849 * elements, it deletes them one by one. 850 */ 851 private List!(Runnable) drainQueue() { 852 BlockingQueue!(Runnable) q = workQueue; 853 ArrayList!(Runnable) taskList = new ArrayList!(Runnable)(); 854 q.drainTo(taskList); 855 if (!q.isEmpty()) { 856 foreach (Runnable r ; q.toArray()) { 857 if (q.remove(r)) 858 taskList.add(r); 859 } 860 } 861 return taskList; 862 } 863 864 /* 865 * Methods for creating, running and cleaning up after workers 866 */ 867 868 /** 869 * Checks if a new worker can be added with respect to current 870 * pool state and the given bound (either core or maximum). If so, 871 * the worker count is adjusted accordingly, and, if possible, a 872 * new worker is created and started, running firstTask as its 873 * first task. This method returns false if the pool is stopped or 874 * eligible to shut down. It also returns false if the thread 875 * factory fails to create a thread when asked. If the thread 876 * creation fails, either due to the thread factory returning 877 * null, or due to an exception (typically OutOfMemoryError in 878 * Thread.start()), we roll back cleanly. 879 * 880 * @param firstTask the task the new thread should run first (or 881 * null if none). Workers are created with an initial first task 882 * (in method execute()) to bypass queuing when there are fewer 883 * than corePoolSize threads (in which case we always start one), 884 * or when the queue is full (in which case we must bypass queue). 885 * Initially idle threads are usually created via 886 * prestartCoreThread or to replace other dying workers. 887 * 888 * @param core if true use corePoolSize as bound, else 889 * maximumPoolSize. (A bool indicator is used here rather than a 890 * value to ensure reads of fresh values after checking other pool 891 * state). 892 * @return true if successful 893 */ 894 private bool addWorker(Runnable firstTask, bool core) { 895 retry: 896 for (int c = ctl;;) { 897 // Check if queue empty only if necessary. 898 if (runStateAtLeast(c, SHUTDOWN) 899 && (runStateAtLeast(c, STOP) 900 || firstTask !is null 901 || workQueue.isEmpty())) 902 return false; 903 904 for (;;) { 905 if (workerCountOf(c) 906 >= ((core ? corePoolSize : maximumPoolSize) & COUNT_MASK)) 907 return false; 908 if (compareAndIncrementWorkerCount(c)) 909 break retry; 910 c = ctl; // Re-read ctl 911 if (runStateAtLeast(c, SHUTDOWN)) 912 continue retry; 913 // else CAS failed due to workerCount change; retry inner loop 914 } 915 } 916 917 bool workerStarted = false; 918 bool workerAdded = false; 919 Worker w = null; 920 try { 921 w = new Worker(firstTask); 922 Thread t = w.thread; 923 if (t !is null) { 924 Mutex mainLock = this.mainLock; 925 mainLock.lock(); 926 try { 927 // Recheck while holding lock. 928 // Back out on ThreadFactory failure or if 929 // shut down before lock acquired. 930 int c = ctl; 931 932 if (isRunning(c) || 933 (runStateLessThan(c, STOP) && firstTask is null)) { 934 // implementationMissing(false); 935 // TODO: Tasks pending completion -@zxp at 10/18/2018, 9:14:13 AM 936 // 937 // if (t.isAlive()) // precheck that t is startable 938 // throw new IllegalThreadStateException(); 939 workers.add(w); 940 int s = workers.size(); 941 if (s > largestPoolSize) 942 largestPoolSize = s; 943 workerAdded = true; 944 } 945 } finally { 946 mainLock.unlock(); 947 } 948 if (workerAdded) { 949 t.start(); 950 workerStarted = true; 951 } 952 } 953 } finally { 954 if (! workerStarted) 955 addWorkerFailed(w); 956 } 957 return workerStarted; 958 } 959 960 /** 961 * Rolls back the worker thread creation. 962 * - removes worker from workers, if present 963 * - decrements worker count 964 * - rechecks for termination, in case the existence of this 965 * worker was holding up termination 966 */ 967 private void addWorkerFailed(Worker w) { 968 Mutex mainLock = this.mainLock; 969 mainLock.lock(); 970 try { 971 if (w !is null) 972 workers.remove(w); 973 decrementWorkerCount(); 974 tryTerminate(); 975 } finally { 976 mainLock.unlock(); 977 } 978 } 979 980 /** 981 * Performs cleanup and bookkeeping for a dying worker. Called 982 * only from worker threads. Unless completedAbruptly is set, 983 * assumes that workerCount has already been adjusted to account 984 * for exit. This method removes thread from worker set, and 985 * possibly terminates the pool or replaces the worker if either 986 * it exited due to user task exception or if fewer than 987 * corePoolSize workers are running or queue is non-empty but 988 * there are no workers. 989 * 990 * @param w the worker 991 * @param completedAbruptly if the worker died due to user exception 992 */ 993 private void processWorkerExit(Worker w, bool completedAbruptly) { 994 if (completedAbruptly) // If abrupt, then workerCount wasn't adjusted 995 decrementWorkerCount(); 996 997 Mutex mainLock = this.mainLock; 998 mainLock.lock(); 999 try { 1000 completedTaskCount += w.completedTasks; 1001 workers.remove(w); 1002 } finally { 1003 mainLock.unlock(); 1004 } 1005 1006 tryTerminate(); 1007 1008 int c = ctl; 1009 if (runStateLessThan(c, STOP)) { 1010 if (!completedAbruptly) { 1011 int min = _allowCoreThreadTimeOut ? 0 : corePoolSize; 1012 if (min == 0 && ! workQueue.isEmpty()) 1013 min = 1; 1014 if (workerCountOf(c) >= min) 1015 return; // replacement not needed 1016 } 1017 addWorker(null, false); 1018 } 1019 } 1020 1021 /** 1022 * Performs blocking or timed wait for a task, depending on 1023 * current configuration settings, or returns null if this worker 1024 * must exit because of any of: 1025 * 1. There are more than maximumPoolSize workers (due to 1026 * a call to setMaximumPoolSize). 1027 * 2. The pool is stopped. 1028 * 3. The pool is shutdown and the queue is empty. 1029 * 4. This worker timed out waiting for a task, and timed-out 1030 * workers are subject to termination (that is, 1031 * {@code allowCoreThreadTimeOut || workerCount > corePoolSize}) 1032 * both before and after the timed wait, and if the queue is 1033 * non-empty, this worker is not the last thread in the pool. 1034 * 1035 * @return task, or null if the worker must exit, in which case 1036 * workerCount is decremented 1037 */ 1038 private Runnable getTask() { 1039 bool timedOut = false; // Did the last poll() time out? 1040 1041 for (;;) { 1042 int c = ctl; 1043 1044 // Check if queue empty only if necessary. 1045 if (runStateAtLeast(c, SHUTDOWN) 1046 && (runStateAtLeast(c, STOP) || workQueue.isEmpty())) { 1047 decrementWorkerCount(); 1048 return null; 1049 } 1050 1051 int wc = workerCountOf(c); 1052 1053 // Are workers subject to culling? 1054 bool timed = _allowCoreThreadTimeOut || wc > corePoolSize; 1055 1056 if ((wc > maximumPoolSize || (timed && timedOut)) 1057 && (wc > 1 || workQueue.isEmpty())) { 1058 if (compareAndDecrementWorkerCount(c)) 1059 return null; 1060 continue; 1061 } 1062 1063 try { 1064 Runnable r = timed ? 1065 workQueue.poll(dur!(TimeUnit.HectoNanosecond)(keepAliveTime)) : 1066 workQueue.take(); 1067 if (r !is null) 1068 return r; 1069 timedOut = true; 1070 } catch (InterruptedException retry) { 1071 timedOut = false; 1072 } 1073 } 1074 } 1075 1076 /** 1077 * Main worker run loop. Repeatedly gets tasks from queue and 1078 * executes them, while coping with a number of issues: 1079 * 1080 * 1. We may start out with an initial task, in which case we 1081 * don't need to get the first one. Otherwise, as long as pool is 1082 * running, we get tasks from getTask. If it returns null then the 1083 * worker exits due to changed pool state or configuration 1084 * parameters. Other exits result from exception throws in 1085 * external code, in which case completedAbruptly holds, which 1086 * usually leads processWorkerExit to replace this thread. 1087 * 1088 * 2. Before running any task, the lock is acquired to prevent 1089 * other pool interrupts while the task is executing, and then we 1090 * ensure that unless pool is stopping, this thread does not have 1091 * its interrupt set. 1092 * 1093 * 3. Each task run is preceded by a call to beforeExecute, which 1094 * might throw an exception, in which case we cause thread to die 1095 * (breaking loop with completedAbruptly true) without processing 1096 * the task. 1097 * 1098 * 4. Assuming beforeExecute completes normally, we run the task, 1099 * gathering any of its thrown exceptions to send to afterExecute. 1100 * We separately handle RuntimeException, Error (both of which the 1101 * specs guarantee that we trap) and arbitrary Throwables. 1102 * Because we cannot rethrow Throwables within Runnable.run, we 1103 * wrap them within Errors on the way out (to the thread's 1104 * UncaughtExceptionHandler). Any thrown exception also 1105 * conservatively causes thread to die. 1106 * 1107 * 5. After task.run completes, we call afterExecute, which may 1108 * also throw an exception, which will also cause thread to 1109 * die. According to JLS Sec 14.20, this exception is the one that 1110 * will be in effect even if task.run throws. 1111 * 1112 * The net effect of the exception mechanics is that afterExecute 1113 * and the thread's UncaughtExceptionHandler have as accurate 1114 * information as we can provide about any problems encountered by 1115 * user code. 1116 * 1117 * @param w the worker 1118 */ 1119 final void runWorker(Worker w) { 1120 Thread wt = Thread.getThis(); 1121 Runnable task = w.firstTask; 1122 w.firstTask = null; 1123 w.unlock(); // allow interrupts 1124 bool completedAbruptly = true; 1125 try { 1126 while (task !is null || (task = getTask()) !is null) { 1127 w.lock(); 1128 // If pool is stopping, ensure thread is interrupted; 1129 // if not, ensure thread is not interrupted. This 1130 // requires a recheck in second case to deal with 1131 // shutdownNow race while clearing interrupt 1132 1133 // implementationMissing(false); 1134 // if ((runStateAtLeast(ctl, STOP) || 1135 // (Thread.interrupted() && 1136 // runStateAtLeast(ctl, STOP))) && 1137 // !wt.isInterrupted()) 1138 // wt.interrupt(); 1139 try { 1140 beforeExecute(wt, task); 1141 try { 1142 task.run(); 1143 afterExecute(task, null); 1144 } catch (Throwable ex) { 1145 afterExecute(task, ex); 1146 throw ex; 1147 } 1148 } finally { 1149 task = null; 1150 w.completedTasks++; 1151 w.unlock(); 1152 } 1153 } 1154 completedAbruptly = false; 1155 } finally { 1156 processWorkerExit(w, completedAbruptly); 1157 } 1158 } 1159 1160 // constructors and methods 1161 1162 /** 1163 * Creates a new {@code ThreadPoolExecutor} with the given initial 1164 * parameters, the default thread factory and the default rejected 1165 * execution handler. 1166 * 1167 * <p>It may be more convenient to use one of the {@link Executors} 1168 * factory methods instead of this general purpose constructor. 1169 * 1170 * @param corePoolSize the number of threads to keep in the pool, even 1171 * if they are idle, unless {@code allowCoreThreadTimeOut} is set 1172 * @param maximumPoolSize the maximum number of threads to allow in the 1173 * pool 1174 * @param keepAliveTime when the number of threads is greater than 1175 * the core, this is the maximum time that excess idle threads 1176 * will wait for new tasks before terminating. 1177 * @param workQueue the queue to use for holding tasks before they are 1178 * executed. This queue will hold only the {@code Runnable} 1179 * tasks submitted by the {@code execute} method. 1180 * @throws IllegalArgumentException if one of the following holds:<br> 1181 * {@code corePoolSize < 0}<br> 1182 * {@code keepAliveTime < 0}<br> 1183 * {@code maximumPoolSize <= 0}<br> 1184 * {@code maximumPoolSize < corePoolSize} 1185 * @throws NullPointerException if {@code workQueue} is null 1186 */ 1187 this(int corePoolSize, int maximumPoolSize, Duration keepAliveTime, 1188 BlockingQueue!(Runnable) workQueue) { 1189 this(corePoolSize, maximumPoolSize, keepAliveTime, workQueue, 1190 ThreadFactory.defaultThreadFactory(), defaultHandler); 1191 } 1192 1193 /** 1194 * Creates a new {@code ThreadPoolExecutor} with the given initial 1195 * parameters and {@linkplain ThreadPoolExecutor.AbortPolicy 1196 * default rejected execution handler}. 1197 * 1198 * @param corePoolSize the number of threads to keep in the pool, even 1199 * if they are idle, unless {@code allowCoreThreadTimeOut} is set 1200 * @param maximumPoolSize the maximum number of threads to allow in the 1201 * pool 1202 * @param keepAliveTime when the number of threads is greater than 1203 * the core, this is the maximum time that excess idle threads 1204 * will wait for new tasks before terminating. 1205 * @param workQueue the queue to use for holding tasks before they are 1206 * executed. This queue will hold only the {@code Runnable} 1207 * tasks submitted by the {@code execute} method. 1208 * @param threadFactory the factory to use when the executor 1209 * creates a new thread 1210 * @throws IllegalArgumentException if one of the following holds:<br> 1211 * {@code corePoolSize < 0}<br> 1212 * {@code keepAliveTime < 0}<br> 1213 * {@code maximumPoolSize <= 0}<br> 1214 * {@code maximumPoolSize < corePoolSize} 1215 * @throws NullPointerException if {@code workQueue} 1216 * or {@code threadFactory} is null 1217 */ 1218 this(int corePoolSize, int maximumPoolSize, Duration keepAliveTime, 1219 BlockingQueue!(Runnable) workQueue, ThreadFactory threadFactory) { 1220 this(corePoolSize, maximumPoolSize, keepAliveTime, workQueue, 1221 threadFactory, defaultHandler); 1222 } 1223 1224 /** 1225 * Creates a new {@code ThreadPoolExecutor} with the given initial 1226 * parameters and 1227 * {@linkplain ThreadFactory#defaultThreadFactory default thread factory}. 1228 * 1229 * @param corePoolSize the number of threads to keep in the pool, even 1230 * if they are idle, unless {@code allowCoreThreadTimeOut} is set 1231 * @param maximumPoolSize the maximum number of threads to allow in the 1232 * pool 1233 * @param keepAliveTime when the number of threads is greater than 1234 * the core, this is the maximum time that excess idle threads 1235 * will wait for new tasks before terminating. 1236 * @param workQueue the queue to use for holding tasks before they are 1237 * executed. This queue will hold only the {@code Runnable} 1238 * tasks submitted by the {@code execute} method. 1239 * @param handler the handler to use when execution is blocked 1240 * because the thread bounds and queue capacities are reached 1241 * @throws IllegalArgumentException if one of the following holds:<br> 1242 * {@code corePoolSize < 0}<br> 1243 * {@code keepAliveTime < 0}<br> 1244 * {@code maximumPoolSize <= 0}<br> 1245 * {@code maximumPoolSize < corePoolSize} 1246 * @throws NullPointerException if {@code workQueue} 1247 * or {@code handler} is null 1248 */ 1249 this(int corePoolSize, int maximumPoolSize, Duration keepAliveTime, 1250 BlockingQueue!(Runnable) workQueue, RejectedExecutionHandler handler) { 1251 this(corePoolSize, maximumPoolSize, keepAliveTime, workQueue, 1252 ThreadFactory.defaultThreadFactory(), handler); 1253 } 1254 1255 /** 1256 * Creates a new {@code ThreadPoolExecutor} with the given initial 1257 * parameters. 1258 * 1259 * @param corePoolSize the number of threads to keep in the pool, even 1260 * if they are idle, unless {@code allowCoreThreadTimeOut} is set 1261 * @param maximumPoolSize the maximum number of threads to allow in the 1262 * pool 1263 * @param keepAliveTime when the number of threads is greater than 1264 * the core, this is the maximum time that excess idle threads 1265 * will wait for new tasks before terminating. 1266 * @param workQueue the queue to use for holding tasks before they are 1267 * executed. This queue will hold only the {@code Runnable} 1268 * tasks submitted by the {@code execute} method. 1269 * @param threadFactory the factory to use when the executor 1270 * creates a new thread 1271 * @param handler the handler to use when execution is blocked 1272 * because the thread bounds and queue capacities are reached 1273 * @throws IllegalArgumentException if one of the following holds:<br> 1274 * {@code corePoolSize < 0}<br> 1275 * {@code keepAliveTime < 0}<br> 1276 * {@code maximumPoolSize <= 0}<br> 1277 * {@code maximumPoolSize < corePoolSize} 1278 * @throws NullPointerException if {@code workQueue} 1279 * or {@code threadFactory} or {@code handler} is null 1280 */ 1281 this(int corePoolSize, int maximumPoolSize, Duration keepAliveTime, 1282 BlockingQueue!(Runnable) workQueue, 1283 ThreadFactory threadFactory, RejectedExecutionHandler handler) { 1284 1285 initialize(); 1286 this.keepAliveTime = keepAliveTime.total!(TimeUnit.HectoNanosecond)(); 1287 if (corePoolSize < 0 || maximumPoolSize <= 0 || 1288 maximumPoolSize < corePoolSize || this.keepAliveTime < 0) 1289 throw new IllegalArgumentException(); 1290 1291 if (workQueue is null || threadFactory is null || handler is null) 1292 throw new NullPointerException(); 1293 1294 this.corePoolSize = corePoolSize; 1295 this.maximumPoolSize = maximumPoolSize; 1296 this.workQueue = workQueue; 1297 this.threadFactory = threadFactory; 1298 this.handler = handler; 1299 } 1300 1301 /** 1302 * Executes the given task sometime in the future. The task 1303 * may execute in a new thread or in an existing pooled thread. 1304 * 1305 * If the task cannot be submitted for execution, either because this 1306 * executor has been shutdown or because its capacity has been reached, 1307 * the task is handled by the current {@link RejectedExecutionHandler}. 1308 * 1309 * @param command the task to execute 1310 * @throws RejectedExecutionException at discretion of 1311 * {@code RejectedExecutionHandler}, if the task 1312 * cannot be accepted for execution 1313 * @throws NullPointerException if {@code command} is null 1314 */ 1315 void execute(Runnable command) { 1316 if (command is null) 1317 throw new NullPointerException(); 1318 /* 1319 * Proceed in 3 steps: 1320 * 1321 * 1. If fewer than corePoolSize threads are running, try to 1322 * start a new thread with the given command as its first 1323 * task. The call to addWorker atomically checks runState and 1324 * workerCount, and so prevents false alarms that would add 1325 * threads when it shouldn't, by returning false. 1326 * 1327 * 2. If a task can be successfully queued, then we still need 1328 * to double-check whether we should have added a thread 1329 * (because existing ones died since last checking) or that 1330 * the pool shut down since entry into this method. So we 1331 * recheck state and if necessary roll back the enqueuing if 1332 * stopped, or start a new thread if there are none. 1333 * 1334 * 3. If we cannot queue task, then we try to add a new 1335 * thread. If it fails, we know we are shut down or saturated 1336 * and so reject the task. 1337 */ 1338 int c = ctl; 1339 if (workerCountOf(c) < corePoolSize) { 1340 if (addWorker(command, true)) 1341 return; 1342 c = ctl; 1343 } 1344 if (isRunning(c) && workQueue.offer(command)) { 1345 int recheck = ctl; 1346 if (! isRunning(recheck) && remove(command)) 1347 reject(command); 1348 else if (workerCountOf(recheck) == 0) 1349 addWorker(null, false); 1350 } 1351 else if (!addWorker(command, false)) 1352 reject(command); 1353 } 1354 1355 /** 1356 * Initiates an orderly shutdown in which previously submitted 1357 * tasks are executed, but no new tasks will be accepted. 1358 * Invocation has no additional effect if already shut down. 1359 * 1360 * <p>This method does not wait for previously submitted tasks to 1361 * complete execution. Use {@link #awaitTermination awaitTermination} 1362 * to do that. 1363 * 1364 * @throws SecurityException {@inheritDoc} 1365 */ 1366 void shutdown() { 1367 Mutex mainLock = this.mainLock; 1368 mainLock.lock(); 1369 try { 1370 checkShutdownAccess(); 1371 advanceRunState(SHUTDOWN); 1372 interruptIdleWorkers(); 1373 onShutdown(); // hook for ScheduledThreadPoolExecutor 1374 } finally { 1375 mainLock.unlock(); 1376 } 1377 tryTerminate(); 1378 } 1379 1380 /** 1381 * Attempts to stop all actively executing tasks, halts the 1382 * processing of waiting tasks, and returns a list of the tasks 1383 * that were awaiting execution. These tasks are drained (removed) 1384 * from the task queue upon return from this method. 1385 * 1386 * <p>This method does not wait for actively executing tasks to 1387 * terminate. Use {@link #awaitTermination awaitTermination} to 1388 * do that. 1389 * 1390 * <p>There are no guarantees beyond best-effort attempts to stop 1391 * processing actively executing tasks. This implementation 1392 * interrupts tasks via {@link Thread#interrupt}; any task that 1393 * fails to respond to interrupts may never terminate. 1394 * 1395 * @throws SecurityException {@inheritDoc} 1396 */ 1397 List!(Runnable) shutdownNow() { 1398 List!(Runnable) tasks; 1399 Mutex mainLock = this.mainLock; 1400 mainLock.lock(); 1401 try { 1402 checkShutdownAccess(); 1403 advanceRunState(STOP); 1404 interruptWorkers(); 1405 tasks = drainQueue(); 1406 } finally { 1407 mainLock.unlock(); 1408 } 1409 tryTerminate(); 1410 return tasks; 1411 } 1412 1413 bool isShutdown() { 1414 return runStateAtLeast(ctl, SHUTDOWN); 1415 } 1416 1417 /** Used by ScheduledThreadPoolExecutor. */ 1418 bool isStopped() { 1419 return runStateAtLeast(ctl, STOP); 1420 } 1421 1422 /** 1423 * Returns true if this executor is in the process of terminating 1424 * after {@link #shutdown} or {@link #shutdownNow} but has not 1425 * completely terminated. This method may be useful for 1426 * debugging. A return of {@code true} reported a sufficient 1427 * period after shutdown may indicate that submitted tasks have 1428 * ignored or suppressed interruption, causing this executor not 1429 * to properly terminate. 1430 * 1431 * @return {@code true} if terminating but not yet terminated 1432 */ 1433 bool isTerminating() { 1434 int c = ctl; 1435 return runStateAtLeast(c, SHUTDOWN) && runStateLessThan(c, TERMINATED); 1436 } 1437 1438 bool isTerminated() { 1439 return runStateAtLeast(ctl, TERMINATED); 1440 } 1441 1442 bool awaitTermination(Duration timeout) { 1443 // long nanos = timeout.total!(TimeUnit.HectoNanosecond); 1444 Mutex mainLock = this.mainLock; 1445 mainLock.lock(); 1446 try { 1447 while (runStateLessThan(ctl, TERMINATED)) { 1448 // if (nanos <= 0L) 1449 // return false; 1450 // nanos = termination.awaitNanos(nanos); 1451 // FIXME: Needing refactor or cleanup -@zxp at 10/18/2018, 9:31:16 AM 1452 // 1453 if(termination.wait(timeout)) 1454 return false; 1455 } 1456 return true; 1457 } finally { 1458 mainLock.unlock(); 1459 } 1460 } 1461 1462 // Override without "throws Throwable" for compatibility with subclasses 1463 // whose finalize method invokes super.finalize() (as is recommended). 1464 // Before JDK 11, finalize() had a non-empty method body. 1465 1466 /** 1467 * @implNote Previous versions of this class had a finalize method 1468 * that shut down this executor, but in this version, finalize 1469 * does nothing. 1470 */ 1471 //@Deprecated(since="9") 1472 protected void finalize() {} 1473 1474 /** 1475 * Sets the thread factory used to create new threads. 1476 * 1477 * @param threadFactory the new thread factory 1478 * @throws NullPointerException if threadFactory is null 1479 * @see #getThreadFactory 1480 */ 1481 void setThreadFactory(ThreadFactory threadFactory) { 1482 if (threadFactory is null) 1483 throw new NullPointerException(); 1484 this.threadFactory = threadFactory; 1485 } 1486 1487 /** 1488 * Returns the thread factory used to create new threads. 1489 * 1490 * @return the current thread factory 1491 * @see #setThreadFactory(ThreadFactory) 1492 */ 1493 ThreadFactory getThreadFactory() { 1494 return threadFactory; 1495 } 1496 1497 /** 1498 * Sets a new handler for unexecutable tasks. 1499 * 1500 * @param handler the new handler 1501 * @throws NullPointerException if handler is null 1502 * @see #getRejectedExecutionHandler 1503 */ 1504 void setRejectedExecutionHandler(RejectedExecutionHandler handler) { 1505 if (handler is null) 1506 throw new NullPointerException(); 1507 this.handler = handler; 1508 } 1509 1510 /** 1511 * Returns the current handler for unexecutable tasks. 1512 * 1513 * @return the current handler 1514 * @see #setRejectedExecutionHandler(RejectedExecutionHandler) 1515 */ 1516 RejectedExecutionHandler getRejectedExecutionHandler() { 1517 return handler; 1518 } 1519 1520 /** 1521 * Sets the core number of threads. This overrides any value set 1522 * in the constructor. If the new value is smaller than the 1523 * current value, excess existing threads will be terminated when 1524 * they next become idle. If larger, new threads will, if needed, 1525 * be started to execute any queued tasks. 1526 * 1527 * @param corePoolSize the new core size 1528 * @throws IllegalArgumentException if {@code corePoolSize < 0} 1529 * or {@code corePoolSize} is greater than the {@linkplain 1530 * #getMaximumPoolSize() maximum pool size} 1531 * @see #getCorePoolSize 1532 */ 1533 void setCorePoolSize(int corePoolSize) { 1534 if (corePoolSize < 0 || maximumPoolSize < corePoolSize) 1535 throw new IllegalArgumentException(); 1536 int delta = corePoolSize - this.corePoolSize; 1537 this.corePoolSize = corePoolSize; 1538 if (workerCountOf(ctl) > corePoolSize) 1539 interruptIdleWorkers(); 1540 else if (delta > 0) { 1541 // We don't really know how many new threads are "needed". 1542 // As a heuristic, prestart enough new workers (up to new 1543 // core size) to handle the current number of tasks in 1544 // queue, but stop if queue becomes empty while doing so. 1545 int k = min(delta, workQueue.size()); 1546 while (k-- > 0 && addWorker(null, true)) { 1547 if (workQueue.isEmpty()) 1548 break; 1549 } 1550 } 1551 } 1552 1553 /** 1554 * Returns the core number of threads. 1555 * 1556 * @return the core number of threads 1557 * @see #setCorePoolSize 1558 */ 1559 int getCorePoolSize() { 1560 return corePoolSize; 1561 } 1562 1563 /** 1564 * Starts a core thread, causing it to idly wait for work. This 1565 * overrides the default policy of starting core threads only when 1566 * new tasks are executed. This method will return {@code false} 1567 * if all core threads have already been started. 1568 * 1569 * @return {@code true} if a thread was started 1570 */ 1571 bool prestartCoreThread() { 1572 return workerCountOf(ctl) < corePoolSize && 1573 addWorker(null, true); 1574 } 1575 1576 /** 1577 * Same as prestartCoreThread except arranges that at least one 1578 * thread is started even if corePoolSize is 0. 1579 */ 1580 void ensurePrestart() { 1581 int wc = workerCountOf(ctl); 1582 if (wc < corePoolSize) 1583 addWorker(null, true); 1584 else if (wc == 0) 1585 addWorker(null, false); 1586 } 1587 1588 /** 1589 * Starts all core threads, causing them to idly wait for work. This 1590 * overrides the default policy of starting core threads only when 1591 * new tasks are executed. 1592 * 1593 * @return the number of threads started 1594 */ 1595 int prestartAllCoreThreads() { 1596 int n = 0; 1597 while (addWorker(null, true)) 1598 ++n; 1599 return n; 1600 } 1601 1602 /** 1603 * Returns true if this pool allows core threads to time out and 1604 * terminate if no tasks arrive within the keepAlive time, being 1605 * replaced if needed when new tasks arrive. When true, the same 1606 * keep-alive policy applying to non-core threads applies also to 1607 * core threads. When false (the default), core threads are never 1608 * terminated due to lack of incoming tasks. 1609 * 1610 * @return {@code true} if core threads are allowed to time out, 1611 * else {@code false} 1612 * 1613 * @since 1.6 1614 */ 1615 bool allowsCoreThreadTimeOut() { 1616 return _allowCoreThreadTimeOut; 1617 } 1618 1619 /** 1620 * Sets the policy governing whether core threads may time out and 1621 * terminate if no tasks arrive within the keep-alive time, being 1622 * replaced if needed when new tasks arrive. When false, core 1623 * threads are never terminated due to lack of incoming 1624 * tasks. When true, the same keep-alive policy applying to 1625 * non-core threads applies also to core threads. To avoid 1626 * continual thread replacement, the keep-alive time must be 1627 * greater than zero when setting {@code true}. This method 1628 * should in general be called before the pool is actively used. 1629 * 1630 * @param value {@code true} if should time out, else {@code false} 1631 * @throws IllegalArgumentException if value is {@code true} 1632 * and the current keep-alive time is not greater than zero 1633 * 1634 * @since 1.6 1635 */ 1636 void allowCoreThreadTimeOut(bool value) { 1637 if (value && keepAliveTime <= 0) 1638 throw new IllegalArgumentException("Core threads must have nonzero keep alive times"); 1639 if (value != _allowCoreThreadTimeOut) { 1640 _allowCoreThreadTimeOut = value; 1641 if (value) 1642 interruptIdleWorkers(); 1643 } 1644 } 1645 1646 /** 1647 * Sets the maximum allowed number of threads. This overrides any 1648 * value set in the constructor. If the new value is smaller than 1649 * the current value, excess existing threads will be 1650 * terminated when they next become idle. 1651 * 1652 * @param maximumPoolSize the new maximum 1653 * @throws IllegalArgumentException if the new maximum is 1654 * less than or equal to zero, or 1655 * less than the {@linkplain #getCorePoolSize core pool size} 1656 * @see #getMaximumPoolSize 1657 */ 1658 void setMaximumPoolSize(int maximumPoolSize) { 1659 if (maximumPoolSize <= 0 || maximumPoolSize < corePoolSize) 1660 throw new IllegalArgumentException(); 1661 this.maximumPoolSize = maximumPoolSize; 1662 if (workerCountOf(ctl) > maximumPoolSize) 1663 interruptIdleWorkers(); 1664 } 1665 1666 /** 1667 * Returns the maximum allowed number of threads. 1668 * 1669 * @return the maximum allowed number of threads 1670 * @see #setMaximumPoolSize 1671 */ 1672 int getMaximumPoolSize() { 1673 return maximumPoolSize; 1674 } 1675 1676 /** 1677 * Sets the thread keep-alive time, which is the amount of time 1678 * that threads may remain idle before being terminated. 1679 * Threads that wait this amount of time without processing a 1680 * task will be terminated if there are more than the core 1681 * number of threads currently in the pool, or if this pool 1682 * {@linkplain #allowsCoreThreadTimeOut() allows core thread timeout}. 1683 * This overrides any value set in the constructor. 1684 * 1685 * @param time the time to wait. A time value of zero will cause 1686 * excess threads to terminate immediately after executing tasks. 1687 * @param unit the time unit of the {@code time} argument 1688 * @throws IllegalArgumentException if {@code time} less than zero or 1689 * if {@code time} is zero and {@code allowsCoreThreadTimeOut} 1690 * @see #getKeepAliveTime(TimeUnit) 1691 */ 1692 void setKeepAliveTime(Duration time) { 1693 long keepAliveTime = time.total!(TimeUnit.HectoNanosecond)(); 1694 if (keepAliveTime < 0) 1695 throw new IllegalArgumentException(); 1696 if (keepAliveTime == 0 && allowsCoreThreadTimeOut()) 1697 throw new IllegalArgumentException("Core threads must have nonzero keep alive times"); 1698 long delta = keepAliveTime - this.keepAliveTime; 1699 this.keepAliveTime = keepAliveTime; 1700 if (delta < 0) 1701 interruptIdleWorkers(); 1702 } 1703 1704 /** 1705 * Returns the thread keep-alive time, which is the amount of time 1706 * that threads may remain idle before being terminated. 1707 * Threads that wait this amount of time without processing a 1708 * task will be terminated if there are more than the core 1709 * number of threads currently in the pool, or if this pool 1710 * {@linkplain #allowsCoreThreadTimeOut() allows core thread timeout}. 1711 * 1712 * @param unit the desired time unit of the result 1713 * @return the time limit 1714 * @see #setKeepAliveTime(long, TimeUnit) 1715 */ 1716 long getKeepAliveTime() { 1717 // return unit.convert(keepAliveTime, TimeUnit.NANOSECONDS); 1718 return keepAliveTime; 1719 } 1720 1721 /* User-level queue utilities */ 1722 1723 /** 1724 * Returns the task queue used by this executor. Access to the 1725 * task queue is intended primarily for debugging and monitoring. 1726 * This queue may be in active use. Retrieving the task queue 1727 * does not prevent queued tasks from executing. 1728 * 1729 * @return the task queue 1730 */ 1731 BlockingQueue!(Runnable) getQueue() { 1732 return workQueue; 1733 } 1734 1735 /** 1736 * Removes this task from the executor's internal queue if it is 1737 * present, thus causing it not to be run if it has not already 1738 * started. 1739 * 1740 * <p>This method may be useful as one part of a cancellation 1741 * scheme. It may fail to remove tasks that have been converted 1742 * into other forms before being placed on the internal queue. 1743 * For example, a task entered using {@code submit} might be 1744 * converted into a form that maintains {@code Future} status. 1745 * However, in such cases, method {@link #purge} may be used to 1746 * remove those Futures that have been cancelled. 1747 * 1748 * @param task the task to remove 1749 * @return {@code true} if the task was removed 1750 */ 1751 bool remove(Runnable task) { 1752 bool removed = workQueue.remove(task); 1753 tryTerminate(); // In case SHUTDOWN and now empty 1754 return removed; 1755 } 1756 1757 /** 1758 * Tries to remove from the work queue all {@link Future} 1759 * tasks that have been cancelled. This method can be useful as a 1760 * storage reclamation operation, that has no other impact on 1761 * functionality. Cancelled tasks are never executed, but may 1762 * accumulate in work queues until worker threads can actively 1763 * remove them. Invoking this method instead tries to remove them now. 1764 * However, this method may fail to remove tasks in 1765 * the presence of interference by other threads. 1766 */ 1767 void purge() { 1768 BlockingQueue!(Runnable) q = workQueue; 1769 try { 1770 foreach(Runnable r; q) { 1771 Future!Runnable f = cast(Future!Runnable) r; 1772 if(f !is null && f.isCancelled()) 1773 q.remove(r); 1774 } 1775 // Iterator!(Runnable) it = q.iterator(); 1776 // while (it.hasNext()) { 1777 // Runnable r = it.next(); 1778 // if (r instanceof Future<?> && ((Future<?>)r).isCancelled()) 1779 // it.remove(); 1780 // } 1781 } catch (ConcurrentModificationException fallThrough) { 1782 // Take slow path if we encounter interference during traversal. 1783 // Make copy for traversal and call remove for cancelled entries. 1784 // The slow path is more likely to be O(N*N). 1785 foreach (Runnable r ; q.toArray()) { 1786 Future!Runnable f = cast(Future!Runnable) r; 1787 if(f !is null && f.isCancelled()) 1788 q.remove(r); 1789 } 1790 } 1791 1792 tryTerminate(); // In case SHUTDOWN and now empty 1793 } 1794 1795 /* Statistics */ 1796 1797 /** 1798 * Returns the current number of threads in the pool. 1799 * 1800 * @return the number of threads 1801 */ 1802 int getPoolSize() { 1803 Mutex mainLock = this.mainLock; 1804 mainLock.lock(); 1805 try { 1806 // Remove rare and surprising possibility of 1807 // isTerminated() && getPoolSize() > 0 1808 return runStateAtLeast(ctl, TIDYING) ? 0 : workers.size(); 1809 } finally { 1810 mainLock.unlock(); 1811 } 1812 } 1813 1814 /** 1815 * Returns the approximate number of threads that are actively 1816 * executing tasks. 1817 * 1818 * @return the number of threads 1819 */ 1820 int getActiveCount() { 1821 Mutex mainLock = this.mainLock; 1822 mainLock.lock(); 1823 try { 1824 int n = 0; 1825 foreach (Worker w ; workers) 1826 if (w.isLocked()) ++n; 1827 return n; 1828 } finally { 1829 mainLock.unlock(); 1830 } 1831 } 1832 1833 /** 1834 * Returns the largest number of threads that have ever 1835 * simultaneously been in the pool. 1836 * 1837 * @return the number of threads 1838 */ 1839 int getLargestPoolSize() { 1840 Mutex mainLock = this.mainLock; 1841 mainLock.lock(); 1842 try { 1843 return largestPoolSize; 1844 } finally { 1845 mainLock.unlock(); 1846 } 1847 } 1848 1849 /** 1850 * Returns the approximate total number of tasks that have ever been 1851 * scheduled for execution. Because the states of tasks and 1852 * threads may change dynamically during computation, the returned 1853 * value is only an approximation. 1854 * 1855 * @return the number of tasks 1856 */ 1857 long getTaskCount() { 1858 Mutex mainLock = this.mainLock; 1859 mainLock.lock(); 1860 try { 1861 long n = completedTaskCount; 1862 foreach (Worker w ; workers) { 1863 n += w.completedTasks; 1864 if (w.isLocked()) 1865 ++n; 1866 } 1867 return n + workQueue.size(); 1868 } finally { 1869 mainLock.unlock(); 1870 } 1871 } 1872 1873 /** 1874 * Returns the approximate total number of tasks that have 1875 * completed execution. Because the states of tasks and threads 1876 * may change dynamically during computation, the returned value 1877 * is only an approximation, but one that does not ever decrease 1878 * across successive calls. 1879 * 1880 * @return the number of tasks 1881 */ 1882 long getCompletedTaskCount() { 1883 Mutex mainLock = this.mainLock; 1884 mainLock.lock(); 1885 try { 1886 long n = completedTaskCount; 1887 foreach (Worker w ; workers) 1888 n += w.completedTasks; 1889 return n; 1890 } finally { 1891 mainLock.unlock(); 1892 } 1893 } 1894 1895 /** 1896 * Returns a string identifying this pool, as well as its state, 1897 * including indications of run state and estimated worker and 1898 * task counts. 1899 * 1900 * @return a string identifying this pool, as well as its state 1901 */ 1902 override string toString() { 1903 long ncompleted; 1904 int nworkers, nactive; 1905 Mutex mainLock = this.mainLock; 1906 mainLock.lock(); 1907 try { 1908 ncompleted = completedTaskCount; 1909 nactive = 0; 1910 nworkers = workers.size(); 1911 foreach (Worker w ; workers) { 1912 ncompleted += w.completedTasks; 1913 if (w.isLocked()) 1914 ++nactive; 1915 } 1916 } finally { 1917 mainLock.unlock(); 1918 } 1919 int c = ctl; 1920 string runState = 1921 isRunning(c) ? "Running" : 1922 runStateAtLeast(c, TERMINATED) ? "Terminated" : 1923 "Shutting down"; 1924 return super.toString() ~ 1925 "[" ~ runState ~ 1926 ", pool size = " ~ nworkers.to!string() ~ 1927 ", active threads = " ~ nactive.to!string() ~ 1928 ", queued tasks = " ~ to!string(workQueue.size()) ~ 1929 ", completed tasks = " ~ ncompleted.to!string() ~ 1930 "]"; 1931 } 1932 1933 /* Extension hooks */ 1934 1935 /** 1936 * Method invoked prior to executing the given Runnable in the 1937 * given thread. This method is invoked by thread {@code t} that 1938 * will execute task {@code r}, and may be used to re-initialize 1939 * ThreadLocals, or to perform logging. 1940 * 1941 * <p>This implementation does nothing, but may be customized in 1942 * subclasses. Note: To properly nest multiple overridings, subclasses 1943 * should generally invoke {@code super.beforeExecute} at the end of 1944 * this method. 1945 * 1946 * @param t the thread that will run task {@code r} 1947 * @param r the task that will be executed 1948 */ 1949 protected void beforeExecute(Thread t, Runnable r) { } 1950 1951 /** 1952 * Method invoked upon completion of execution of the given Runnable. 1953 * This method is invoked by the thread that executed the task. If 1954 * non-null, the Throwable is the uncaught {@code RuntimeException} 1955 * or {@code Error} that caused execution to terminate abruptly. 1956 * 1957 * <p>This implementation does nothing, but may be customized in 1958 * subclasses. Note: To properly nest multiple overridings, subclasses 1959 * should generally invoke {@code super.afterExecute} at the 1960 * beginning of this method. 1961 * 1962 * <p><b>Note:</b> When actions are enclosed in tasks (such as 1963 * {@link FutureTask}) either explicitly or via methods such as 1964 * {@code submit}, these task objects catch and maintain 1965 * computational exceptions, and so they do not cause abrupt 1966 * termination, and the internal exceptions are <em>not</em> 1967 * passed to this method. If you would like to trap both kinds of 1968 * failures in this method, you can further probe for such cases, 1969 * as in this sample subclass that prints either the direct cause 1970 * or the underlying exception if a task has been aborted: 1971 * 1972 * <pre> {@code 1973 * class ExtendedExecutor : ThreadPoolExecutor { 1974 * // ... 1975 * protected void afterExecute(Runnable r, Throwable t) { 1976 * super.afterExecute(r, t); 1977 * if (t is null 1978 * && r instanceof Future<?> 1979 * && ((Future<?>)r).isDone()) { 1980 * try { 1981 * Object result = ((Future<?>) r).get(); 1982 * } catch (CancellationException ce) { 1983 * t = ce; 1984 * } catch (ExecutionException ee) { 1985 * t = ee.getCause(); 1986 * } catch (InterruptedException ie) { 1987 * // ignore/reset 1988 * Thread.getThis().interrupt(); 1989 * } 1990 * } 1991 * if (t !is null) 1992 * System.out.println(t); 1993 * } 1994 * }}</pre> 1995 * 1996 * @param r the runnable that has completed 1997 * @param t the exception that caused termination, or null if 1998 * execution completed normally 1999 */ 2000 protected void afterExecute(Runnable r, Throwable t) { } 2001 2002 /** 2003 * Method invoked when the Executor has terminated. Default 2004 * implementation does nothing. Note: To properly nest multiple 2005 * overridings, subclasses should generally invoke 2006 * {@code super.terminated} within this method. 2007 */ 2008 protected void terminated() { } 2009 } 2010 2011 2012 /** 2013 * A handler for tasks that cannot be executed by a {@link ThreadPoolExecutor}. 2014 * 2015 * @since 1.5 2016 * @author Doug Lea 2017 */ 2018 interface RejectedExecutionHandler { 2019 2020 /** 2021 * Method that may be invoked by a {@link ThreadPoolExecutor} when 2022 * {@link ThreadPoolExecutor#execute execute} cannot accept a 2023 * task. This may occur when no more threads or queue slots are 2024 * available because their bounds would be exceeded, or upon 2025 * shutdown of the Executor. 2026 * 2027 * <p>In the absence of other alternatives, the method may throw 2028 * an unchecked {@link RejectedExecutionException}, which will be 2029 * propagated to the caller of {@code execute}. 2030 * 2031 * @param r the runnable task requested to be executed 2032 * @param executor the executor attempting to execute this task 2033 * @throws RejectedExecutionException if there is no remedy 2034 */ 2035 void rejectedExecution(Runnable r, ThreadPoolExecutor executor); 2036 } 2037 2038 /* Predefined RejectedExecutionHandlers */ 2039 2040 /** 2041 * A handler for rejected tasks that runs the rejected task 2042 * directly in the calling thread of the {@code execute} method, 2043 * unless the executor has been shut down, in which case the task 2044 * is discarded. 2045 */ 2046 class CallerRunsPolicy : RejectedExecutionHandler { 2047 /** 2048 * Creates a {@code CallerRunsPolicy}. 2049 */ 2050 this() { } 2051 2052 /** 2053 * Executes task r in the caller's thread, unless the executor 2054 * has been shut down, in which case the task is discarded. 2055 * 2056 * @param r the runnable task requested to be executed 2057 * @param e the executor attempting to execute this task 2058 */ 2059 void rejectedExecution(Runnable r, ThreadPoolExecutor e) { 2060 if (!e.isShutdown()) { 2061 r.run(); 2062 } 2063 } 2064 } 2065 2066 /** 2067 * A handler for rejected tasks that throws a 2068 * {@link RejectedExecutionException}. 2069 * 2070 * This is the default handler for {@link ThreadPoolExecutor} and 2071 * {@link ScheduledThreadPoolExecutor}. 2072 */ 2073 class AbortPolicy : RejectedExecutionHandler { 2074 /** 2075 * Creates an {@code AbortPolicy}. 2076 */ 2077 this() { } 2078 2079 /** 2080 * Always throws RejectedExecutionException. 2081 * 2082 * @param r the runnable task requested to be executed 2083 * @param e the executor attempting to execute this task 2084 * @throws RejectedExecutionException always 2085 */ 2086 void rejectedExecution(Runnable r, ThreadPoolExecutor e) { 2087 throw new RejectedExecutionException("Task " ~ (cast(Object)r).toString() ~ 2088 " rejected from " ~ 2089 e.toString()); 2090 } 2091 } 2092 2093 /** 2094 * A handler for rejected tasks that silently discards the 2095 * rejected task. 2096 */ 2097 class DiscardPolicy : RejectedExecutionHandler { 2098 /** 2099 * Creates a {@code DiscardPolicy}. 2100 */ 2101 this() { } 2102 2103 /** 2104 * Does nothing, which has the effect of discarding task r. 2105 * 2106 * @param r the runnable task requested to be executed 2107 * @param e the executor attempting to execute this task 2108 */ 2109 void rejectedExecution(Runnable r, ThreadPoolExecutor e) { 2110 } 2111 } 2112 2113 /** 2114 * A handler for rejected tasks that discards the oldest unhandled 2115 * request and then retries {@code execute}, unless the executor 2116 * is shut down, in which case the task is discarded. 2117 */ 2118 class DiscardOldestPolicy : RejectedExecutionHandler { 2119 /** 2120 * Creates a {@code DiscardOldestPolicy} for the given executor. 2121 */ 2122 this() { } 2123 2124 /** 2125 * Obtains and ignores the next task that the executor 2126 * would otherwise execute, if one is immediately available, 2127 * and then retries execution of task r, unless the executor 2128 * is shut down, in which case task r is instead discarded. 2129 * 2130 * @param r the runnable task requested to be executed 2131 * @param e the executor attempting to execute this task 2132 */ 2133 void rejectedExecution(Runnable r, ThreadPoolExecutor e) { 2134 if (!e.isShutdown()) { 2135 e.getQueue().poll(); 2136 e.execute(r); 2137 } 2138 } 2139 }