ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/main/java/util/concurrent/ThreadPoolExecutor.java
Revision: 1.179
Committed: Tue Sep 12 17:57:16 2017 UTC (6 years, 8 months ago) by jsr166
Branch: MAIN
Changes since 1.178: +1 -1 lines
Log Message:
@code -> @link

File Contents

# Content
1 /*
2 * Written by Doug Lea with assistance from members of JCP JSR-166
3 * Expert Group and released to the public domain, as explained at
4 * http://creativecommons.org/publicdomain/zero/1.0/
5 */
6
7 package java.util.concurrent;
8
9 import java.security.AccessControlContext;
10 import java.security.AccessController;
11 import java.security.PrivilegedAction;
12 import java.util.ArrayList;
13 import java.util.ConcurrentModificationException;
14 import java.util.HashSet;
15 import java.util.Iterator;
16 import java.util.List;
17 import java.util.concurrent.atomic.AtomicInteger;
18 import java.util.concurrent.locks.AbstractQueuedSynchronizer;
19 import java.util.concurrent.locks.Condition;
20 import java.util.concurrent.locks.ReentrantLock;
21
22 /**
23 * An {@link ExecutorService} that executes each submitted task using
24 * one of possibly several pooled threads, normally configured
25 * using {@link Executors} factory methods.
26 *
27 * <p>Thread pools address two different problems: they usually
28 * provide improved performance when executing large numbers of
29 * asynchronous tasks, due to reduced per-task invocation overhead,
30 * and they provide a means of bounding and managing the resources,
31 * including threads, consumed when executing a collection of tasks.
32 * Each {@code ThreadPoolExecutor} also maintains some basic
33 * statistics, such as the number of completed tasks.
34 *
35 * <p>To be useful across a wide range of contexts, this class
36 * provides many adjustable parameters and extensibility
37 * hooks. However, programmers are urged to use the more convenient
38 * {@link Executors} factory methods {@link
39 * Executors#newCachedThreadPool} (unbounded thread pool, with
40 * automatic thread reclamation), {@link Executors#newFixedThreadPool}
41 * (fixed size thread pool) and {@link
42 * Executors#newSingleThreadExecutor} (single background thread), that
43 * preconfigure settings for the most common usage
44 * scenarios. Otherwise, use the following guide when manually
45 * configuring and tuning this class:
46 *
47 * <dl>
48 *
49 * <dt>Core and maximum pool sizes</dt>
50 *
51 * <dd>A {@code ThreadPoolExecutor} will automatically adjust the
52 * pool size (see {@link #getPoolSize})
53 * according to the bounds set by
54 * corePoolSize (see {@link #getCorePoolSize}) and
55 * maximumPoolSize (see {@link #getMaximumPoolSize}).
56 *
57 * When a new task is submitted in method {@link #execute(Runnable)},
58 * if fewer than corePoolSize threads are running, a new thread is
59 * created to handle the request, even if other worker threads are
60 * idle. Else if fewer than maximumPoolSize threads are running, a
61 * new thread will be created to handle the request only if the queue
62 * is full. By setting corePoolSize and maximumPoolSize the same, you
63 * create a fixed-size thread pool. By setting maximumPoolSize to an
64 * essentially unbounded value such as {@code Integer.MAX_VALUE}, you
65 * allow the pool to accommodate an arbitrary number of concurrent
66 * tasks. Most typically, core and maximum pool sizes are set only
67 * upon construction, but they may also be changed dynamically using
68 * {@link #setCorePoolSize} and {@link #setMaximumPoolSize}. </dd>
69 *
70 * <dt>On-demand construction</dt>
71 *
72 * <dd>By default, even core threads are initially created and
73 * started only when new tasks arrive, but this can be overridden
74 * dynamically using method {@link #prestartCoreThread} or {@link
75 * #prestartAllCoreThreads}. You probably want to prestart threads if
76 * you construct the pool with a non-empty queue. </dd>
77 *
78 * <dt>Creating new threads</dt>
79 *
80 * <dd>New threads are created using a {@link ThreadFactory}. If not
81 * otherwise specified, a {@link Executors#defaultThreadFactory} is
82 * used, that creates threads to all be in the same {@link
83 * ThreadGroup} and with the same {@code NORM_PRIORITY} priority and
84 * non-daemon status. By supplying a different ThreadFactory, you can
85 * alter the thread's name, thread group, priority, daemon status,
86 * etc. If a {@code ThreadFactory} fails to create a thread when asked
87 * by returning null from {@code newThread}, the executor will
88 * continue, but might not be able to execute any tasks. Threads
89 * should possess the "modifyThread" {@code RuntimePermission}. If
90 * worker threads or other threads using the pool do not possess this
91 * permission, service may be degraded: configuration changes may not
92 * take effect in a timely manner, and a shutdown pool may remain in a
93 * state in which termination is possible but not completed.</dd>
94 *
95 * <dt>Keep-alive times</dt>
96 *
97 * <dd>If the pool currently has more than corePoolSize threads,
98 * excess threads will be terminated if they have been idle for more
99 * than the keepAliveTime (see {@link #getKeepAliveTime(TimeUnit)}).
100 * This provides a means of reducing resource consumption when the
101 * pool is not being actively used. If the pool becomes more active
102 * later, new threads will be constructed. This parameter can also be
103 * changed dynamically using method {@link #setKeepAliveTime(long,
104 * TimeUnit)}. Using a value of {@code Long.MAX_VALUE} {@link
105 * TimeUnit#NANOSECONDS} effectively disables idle threads from ever
106 * terminating prior to shut down. By default, the keep-alive policy
107 * applies only when there are more than corePoolSize threads, but
108 * method {@link #allowCoreThreadTimeOut(boolean)} can be used to
109 * apply this time-out policy to core threads as well, so long as the
110 * keepAliveTime value is non-zero. </dd>
111 *
112 * <dt>Queuing</dt>
113 *
114 * <dd>Any {@link BlockingQueue} may be used to transfer and hold
115 * submitted tasks. The use of this queue interacts with pool sizing:
116 *
117 * <ul>
118 *
119 * <li>If fewer than corePoolSize threads are running, the Executor
120 * always prefers adding a new thread
121 * rather than queuing.
122 *
123 * <li>If corePoolSize or more threads are running, the Executor
124 * always prefers queuing a request rather than adding a new
125 * thread.
126 *
127 * <li>If a request cannot be queued, a new thread is created unless
128 * this would exceed maximumPoolSize, in which case, the task will be
129 * rejected.
130 *
131 * </ul>
132 *
133 * There are three general strategies for queuing:
134 * <ol>
135 *
136 * <li><em> Direct handoffs.</em> A good default choice for a work
137 * queue is a {@link SynchronousQueue} that hands off tasks to threads
138 * without otherwise holding them. Here, an attempt to queue a task
139 * will fail if no threads are immediately available to run it, so a
140 * new thread will be constructed. This policy avoids lockups when
141 * handling sets of requests that might have internal dependencies.
142 * Direct handoffs generally require unbounded maximumPoolSizes to
143 * avoid rejection of new submitted tasks. This in turn admits the
144 * possibility of unbounded thread growth when commands continue to
145 * arrive on average faster than they can be processed.
146 *
147 * <li><em> Unbounded queues.</em> Using an unbounded queue (for
148 * example a {@link LinkedBlockingQueue} without a predefined
149 * capacity) will cause new tasks to wait in the queue when all
150 * corePoolSize threads are busy. Thus, no more than corePoolSize
151 * threads will ever be created. (And the value of the maximumPoolSize
152 * therefore doesn't have any effect.) This may be appropriate when
153 * each task is completely independent of others, so tasks cannot
154 * affect each others execution; for example, in a web page server.
155 * While this style of queuing can be useful in smoothing out
156 * transient bursts of requests, it admits the possibility of
157 * unbounded work queue growth when commands continue to arrive on
158 * average faster than they can be processed.
159 *
160 * <li><em>Bounded queues.</em> A bounded queue (for example, an
161 * {@link ArrayBlockingQueue}) helps prevent resource exhaustion when
162 * used with finite maximumPoolSizes, but can be more difficult to
163 * tune and control. Queue sizes and maximum pool sizes may be traded
164 * off for each other: Using large queues and small pools minimizes
165 * CPU usage, OS resources, and context-switching overhead, but can
166 * lead to artificially low throughput. If tasks frequently block (for
167 * example if they are I/O bound), a system may be able to schedule
168 * time for more threads than you otherwise allow. Use of small queues
169 * generally requires larger pool sizes, which keeps CPUs busier but
170 * may encounter unacceptable scheduling overhead, which also
171 * decreases throughput.
172 *
173 * </ol>
174 *
175 * </dd>
176 *
177 * <dt>Rejected tasks</dt>
178 *
179 * <dd>New tasks submitted in method {@link #execute(Runnable)} will be
180 * <em>rejected</em> when the Executor has been shut down, and also when
181 * the Executor uses finite bounds for both maximum threads and work queue
182 * capacity, and is saturated. In either case, the {@code execute} method
183 * invokes the {@link
184 * RejectedExecutionHandler#rejectedExecution(Runnable, ThreadPoolExecutor)}
185 * method of its {@link RejectedExecutionHandler}. Four predefined handler
186 * policies are provided:
187 *
188 * <ol>
189 *
190 * <li>In the default {@link ThreadPoolExecutor.AbortPolicy}, the handler
191 * throws a runtime {@link RejectedExecutionException} upon rejection.
192 *
193 * <li>In {@link ThreadPoolExecutor.CallerRunsPolicy}, the thread
194 * that invokes {@code execute} itself runs the task. This provides a
195 * simple feedback control mechanism that will slow down the rate that
196 * new tasks are submitted.
197 *
198 * <li>In {@link ThreadPoolExecutor.DiscardPolicy}, a task that
199 * cannot be executed is simply dropped.
200 *
201 * <li>In {@link ThreadPoolExecutor.DiscardOldestPolicy}, if the
202 * executor is not shut down, the task at the head of the work queue
203 * is dropped, and then execution is retried (which can fail again,
204 * causing this to be repeated.)
205 *
206 * </ol>
207 *
208 * It is possible to define and use other kinds of {@link
209 * RejectedExecutionHandler} classes. Doing so requires some care
210 * especially when policies are designed to work only under particular
211 * capacity or queuing policies. </dd>
212 *
213 * <dt>Hook methods</dt>
214 *
215 * <dd>This class provides {@code protected} overridable
216 * {@link #beforeExecute(Thread, Runnable)} and
217 * {@link #afterExecute(Runnable, Throwable)} methods that are called
218 * before and after execution of each task. These can be used to
219 * manipulate the execution environment; for example, reinitializing
220 * ThreadLocals, gathering statistics, or adding log entries.
221 * Additionally, method {@link #terminated} can be overridden to perform
222 * any special processing that needs to be done once the Executor has
223 * fully terminated.
224 *
225 * <p>If hook, callback, or BlockingQueue methods throw exceptions,
226 * internal worker threads may in turn fail, abruptly terminate, and
227 * possibly be replaced.</dd>
228 *
229 * <dt>Queue maintenance</dt>
230 *
231 * <dd>Method {@link #getQueue()} allows access to the work queue
232 * for purposes of monitoring and debugging. Use of this method for
233 * any other purpose is strongly discouraged. Two supplied methods,
234 * {@link #remove(Runnable)} and {@link #purge} are available to
235 * assist in storage reclamation when large numbers of queued tasks
236 * become cancelled.</dd>
237 *
238 * <dt>Finalization</dt>
239 *
240 * <dd>A pool that is no longer referenced in a program <em>AND</em>
241 * has no remaining threads will be {@code shutdown} automatically. If
242 * you would like to ensure that unreferenced pools are reclaimed even
243 * if users forget to call {@link #shutdown}, then you must arrange
244 * that unused threads eventually die, by setting appropriate
245 * keep-alive times, using a lower bound of zero core threads and/or
246 * setting {@link #allowCoreThreadTimeOut(boolean)}. </dd>
247 *
248 * </dl>
249 *
250 * <p><b>Extension example</b>. Most extensions of this class
251 * override one or more of the protected hook methods. For example,
252 * here is a subclass that adds a simple pause/resume feature:
253 *
254 * <pre> {@code
255 * class PausableThreadPoolExecutor extends ThreadPoolExecutor {
256 * private boolean isPaused;
257 * private ReentrantLock pauseLock = new ReentrantLock();
258 * private Condition unpaused = pauseLock.newCondition();
259 *
260 * public PausableThreadPoolExecutor(...) { super(...); }
261 *
262 * protected void beforeExecute(Thread t, Runnable r) {
263 * super.beforeExecute(t, r);
264 * pauseLock.lock();
265 * try {
266 * while (isPaused) unpaused.await();
267 * } catch (InterruptedException ie) {
268 * t.interrupt();
269 * } finally {
270 * pauseLock.unlock();
271 * }
272 * }
273 *
274 * public void pause() {
275 * pauseLock.lock();
276 * try {
277 * isPaused = true;
278 * } finally {
279 * pauseLock.unlock();
280 * }
281 * }
282 *
283 * public void resume() {
284 * pauseLock.lock();
285 * try {
286 * isPaused = false;
287 * unpaused.signalAll();
288 * } finally {
289 * pauseLock.unlock();
290 * }
291 * }
292 * }}</pre>
293 *
294 * @since 1.5
295 * @author Doug Lea
296 */
297 public class ThreadPoolExecutor extends AbstractExecutorService {
298 /**
299 * The main pool control state, ctl, is an atomic integer packing
300 * two conceptual fields
301 * workerCount, indicating the effective number of threads
302 * runState, indicating whether running, shutting down etc
303 *
304 * In order to pack them into one int, we limit workerCount to
305 * (2^29)-1 (about 500 million) threads rather than (2^31)-1 (2
306 * billion) otherwise representable. If this is ever an issue in
307 * the future, the variable can be changed to be an AtomicLong,
308 * and the shift/mask constants below adjusted. But until the need
309 * arises, this code is a bit faster and simpler using an int.
310 *
311 * The workerCount is the number of workers that have been
312 * permitted to start and not permitted to stop. The value may be
313 * transiently different from the actual number of live threads,
314 * for example when a ThreadFactory fails to create a thread when
315 * asked, and when exiting threads are still performing
316 * bookkeeping before terminating. The user-visible pool size is
317 * reported as the current size of the workers set.
318 *
319 * The runState provides the main lifecycle control, taking on values:
320 *
321 * RUNNING: Accept new tasks and process queued tasks
322 * SHUTDOWN: Don't accept new tasks, but process queued tasks
323 * STOP: Don't accept new tasks, don't process queued tasks,
324 * and interrupt in-progress tasks
325 * TIDYING: All tasks have terminated, workerCount is zero,
326 * the thread transitioning to state TIDYING
327 * will run the terminated() hook method
328 * TERMINATED: terminated() has completed
329 *
330 * The numerical order among these values matters, to allow
331 * ordered comparisons. The runState monotonically increases over
332 * time, but need not hit each state. The transitions are:
333 *
334 * RUNNING -> SHUTDOWN
335 * On invocation of shutdown(), perhaps implicitly in finalize()
336 * (RUNNING or SHUTDOWN) -> STOP
337 * On invocation of shutdownNow()
338 * SHUTDOWN -> TIDYING
339 * When both queue and pool are empty
340 * STOP -> TIDYING
341 * When pool is empty
342 * TIDYING -> TERMINATED
343 * When the terminated() hook method has completed
344 *
345 * Threads waiting in awaitTermination() will return when the
346 * state reaches TERMINATED.
347 *
348 * Detecting the transition from SHUTDOWN to TIDYING is less
349 * straightforward than you'd like because the queue may become
350 * empty after non-empty and vice versa during SHUTDOWN state, but
351 * we can only terminate if, after seeing that it is empty, we see
352 * that workerCount is 0 (which sometimes entails a recheck -- see
353 * below).
354 */
355 private final AtomicInteger ctl = new AtomicInteger(ctlOf(RUNNING, 0));
356 private static final int COUNT_BITS = Integer.SIZE - 3;
357 private static final int CAPACITY = (1 << COUNT_BITS) - 1;
358
359 // runState is stored in the high-order bits
360 private static final int RUNNING = -1 << COUNT_BITS;
361 private static final int SHUTDOWN = 0 << COUNT_BITS;
362 private static final int STOP = 1 << COUNT_BITS;
363 private static final int TIDYING = 2 << COUNT_BITS;
364 private static final int TERMINATED = 3 << COUNT_BITS;
365
366 // Packing and unpacking ctl
367 private static int runStateOf(int c) { return c & ~CAPACITY; }
368 private static int workerCountOf(int c) { return c & CAPACITY; }
369 private static int ctlOf(int rs, int wc) { return rs | wc; }
370
371 /*
372 * Bit field accessors that don't require unpacking ctl.
373 * These depend on the bit layout and on workerCount being never negative.
374 */
375
376 private static boolean runStateLessThan(int c, int s) {
377 return c < s;
378 }
379
380 private static boolean runStateAtLeast(int c, int s) {
381 return c >= s;
382 }
383
384 private static boolean isRunning(int c) {
385 return c < SHUTDOWN;
386 }
387
388 /**
389 * Attempts to CAS-increment the workerCount field of ctl.
390 */
391 private boolean compareAndIncrementWorkerCount(int expect) {
392 return ctl.compareAndSet(expect, expect + 1);
393 }
394
395 /**
396 * Attempts to CAS-decrement the workerCount field of ctl.
397 */
398 private boolean compareAndDecrementWorkerCount(int expect) {
399 return ctl.compareAndSet(expect, expect - 1);
400 }
401
402 /**
403 * Decrements the workerCount field of ctl. This is called only on
404 * abrupt termination of a thread (see processWorkerExit). Other
405 * decrements are performed within getTask.
406 */
407 private void decrementWorkerCount() {
408 ctl.addAndGet(-1);
409 }
410
411 /**
412 * The queue used for holding tasks and handing off to worker
413 * threads. We do not require that workQueue.poll() returning
414 * null necessarily means that workQueue.isEmpty(), so rely
415 * solely on isEmpty to see if the queue is empty (which we must
416 * do for example when deciding whether to transition from
417 * SHUTDOWN to TIDYING). This accommodates special-purpose
418 * queues such as DelayQueues for which poll() is allowed to
419 * return null even if it may later return non-null when delays
420 * expire.
421 */
422 private final BlockingQueue<Runnable> workQueue;
423
424 /**
425 * Lock held on access to workers set and related bookkeeping.
426 * While we could use a concurrent set of some sort, it turns out
427 * to be generally preferable to use a lock. Among the reasons is
428 * that this serializes interruptIdleWorkers, which avoids
429 * unnecessary interrupt storms, especially during shutdown.
430 * Otherwise exiting threads would concurrently interrupt those
431 * that have not yet interrupted. It also simplifies some of the
432 * associated statistics bookkeeping of largestPoolSize etc. We
433 * also hold mainLock on shutdown and shutdownNow, for the sake of
434 * ensuring workers set is stable while separately checking
435 * permission to interrupt and actually interrupting.
436 */
437 private final ReentrantLock mainLock = new ReentrantLock();
438
439 /**
440 * Set containing all worker threads in pool. Accessed only when
441 * holding mainLock.
442 */
443 private final HashSet<Worker> workers = new HashSet<>();
444
445 /**
446 * Wait condition to support awaitTermination.
447 */
448 private final Condition termination = mainLock.newCondition();
449
450 /**
451 * Tracks largest attained pool size. Accessed only under
452 * mainLock.
453 */
454 private int largestPoolSize;
455
456 /**
457 * Counter for completed tasks. Updated only on termination of
458 * worker threads. Accessed only under mainLock.
459 */
460 private long completedTaskCount;
461
462 /*
463 * All user control parameters are declared as volatiles so that
464 * ongoing actions are based on freshest values, but without need
465 * for locking, since no internal invariants depend on them
466 * changing synchronously with respect to other actions.
467 */
468
469 /**
470 * Factory for new threads. All threads are created using this
471 * factory (via method addWorker). All callers must be prepared
472 * for addWorker to fail, which may reflect a system or user's
473 * policy limiting the number of threads. Even though it is not
474 * treated as an error, failure to create threads may result in
475 * new tasks being rejected or existing ones remaining stuck in
476 * the queue.
477 *
478 * We go further and preserve pool invariants even in the face of
479 * errors such as OutOfMemoryError, that might be thrown while
480 * trying to create threads. Such errors are rather common due to
481 * the need to allocate a native stack in Thread.start, and users
482 * will want to perform clean pool shutdown to clean up. There
483 * will likely be enough memory available for the cleanup code to
484 * complete without encountering yet another OutOfMemoryError.
485 */
486 private volatile ThreadFactory threadFactory;
487
488 /**
489 * Handler called when saturated or shutdown in execute.
490 */
491 private volatile RejectedExecutionHandler handler;
492
493 /**
494 * Timeout in nanoseconds for idle threads waiting for work.
495 * Threads use this timeout when there are more than corePoolSize
496 * present or if allowCoreThreadTimeOut. Otherwise they wait
497 * forever for new work.
498 */
499 private volatile long keepAliveTime;
500
501 /**
502 * If false (default), core threads stay alive even when idle.
503 * If true, core threads use keepAliveTime to time out waiting
504 * for work.
505 */
506 private volatile boolean allowCoreThreadTimeOut;
507
508 /**
509 * Core pool size is the minimum number of workers to keep alive
510 * (and not allow to time out etc) unless allowCoreThreadTimeOut
511 * is set, in which case the minimum is zero.
512 */
513 private volatile int corePoolSize;
514
515 /**
516 * Maximum pool size. Note that the actual maximum is internally
517 * bounded by CAPACITY.
518 */
519 private volatile int maximumPoolSize;
520
521 /**
522 * The default rejected execution handler.
523 */
524 private static final RejectedExecutionHandler defaultHandler =
525 new AbortPolicy();
526
527 /**
528 * Permission required for callers of shutdown and shutdownNow.
529 * We additionally require (see checkShutdownAccess) that callers
530 * have permission to actually interrupt threads in the worker set
531 * (as governed by Thread.interrupt, which relies on
532 * ThreadGroup.checkAccess, which in turn relies on
533 * SecurityManager.checkAccess). Shutdowns are attempted only if
534 * these checks pass.
535 *
536 * All actual invocations of Thread.interrupt (see
537 * interruptIdleWorkers and interruptWorkers) ignore
538 * SecurityExceptions, meaning that the attempted interrupts
539 * silently fail. In the case of shutdown, they should not fail
540 * unless the SecurityManager has inconsistent policies, sometimes
541 * allowing access to a thread and sometimes not. In such cases,
542 * failure to actually interrupt threads may disable or delay full
543 * termination. Other uses of interruptIdleWorkers are advisory,
544 * and failure to actually interrupt will merely delay response to
545 * configuration changes so is not handled exceptionally.
546 */
547 private static final RuntimePermission shutdownPerm =
548 new RuntimePermission("modifyThread");
549
550 /** The context to be used when executing the finalizer, or null. */
551 private final AccessControlContext acc;
552
553 /**
554 * Class Worker mainly maintains interrupt control state for
555 * threads running tasks, along with other minor bookkeeping.
556 * This class opportunistically extends AbstractQueuedSynchronizer
557 * to simplify acquiring and releasing a lock surrounding each
558 * task execution. This protects against interrupts that are
559 * intended to wake up a worker thread waiting for a task from
560 * instead interrupting a task being run. We implement a simple
561 * non-reentrant mutual exclusion lock rather than use
562 * ReentrantLock because we do not want worker tasks to be able to
563 * reacquire the lock when they invoke pool control methods like
564 * setCorePoolSize. Additionally, to suppress interrupts until
565 * the thread actually starts running tasks, we initialize lock
566 * state to a negative value, and clear it upon start (in
567 * runWorker).
568 */
569 private final class Worker
570 extends AbstractQueuedSynchronizer
571 implements Runnable
572 {
573 /**
574 * This class will never be serialized, but we provide a
575 * serialVersionUID to suppress a javac warning.
576 */
577 private static final long serialVersionUID = 6138294804551838833L;
578
579 /** Thread this worker is running in. Null if factory fails. */
580 final Thread thread;
581 /** Initial task to run. Possibly null. */
582 Runnable firstTask;
583 /** Per-thread task counter */
584 volatile long completedTasks;
585
586 // TODO: switch to AbstractQueuedLongSynchronizer and move
587 // completedTasks into the lock word.
588
589 /**
590 * Creates with given first task and thread from ThreadFactory.
591 * @param firstTask the first task (null if none)
592 */
593 Worker(Runnable firstTask) {
594 setState(-1); // inhibit interrupts until runWorker
595 this.firstTask = firstTask;
596 this.thread = getThreadFactory().newThread(this);
597 }
598
599 /** Delegates main run loop to outer runWorker. */
600 public void run() {
601 runWorker(this);
602 }
603
604 // Lock methods
605 //
606 // The value 0 represents the unlocked state.
607 // The value 1 represents the locked state.
608
609 protected boolean isHeldExclusively() {
610 return getState() != 0;
611 }
612
613 protected boolean tryAcquire(int unused) {
614 if (compareAndSetState(0, 1)) {
615 setExclusiveOwnerThread(Thread.currentThread());
616 return true;
617 }
618 return false;
619 }
620
621 protected boolean tryRelease(int unused) {
622 setExclusiveOwnerThread(null);
623 setState(0);
624 return true;
625 }
626
627 public void lock() { acquire(1); }
628 public boolean tryLock() { return tryAcquire(1); }
629 public void unlock() { release(1); }
630 public boolean isLocked() { return isHeldExclusively(); }
631
632 void interruptIfStarted() {
633 Thread t;
634 if (getState() >= 0 && (t = thread) != null && !t.isInterrupted()) {
635 try {
636 t.interrupt();
637 } catch (SecurityException ignore) {
638 }
639 }
640 }
641 }
642
643 /*
644 * Methods for setting control state
645 */
646
647 /**
648 * Transitions runState to given target, or leaves it alone if
649 * already at least the given target.
650 *
651 * @param targetState the desired state, either SHUTDOWN or STOP
652 * (but not TIDYING or TERMINATED -- use tryTerminate for that)
653 */
654 private void advanceRunState(int targetState) {
655 // assert targetState == SHUTDOWN || targetState == STOP;
656 for (;;) {
657 int c = ctl.get();
658 if (runStateAtLeast(c, targetState) ||
659 ctl.compareAndSet(c, ctlOf(targetState, workerCountOf(c))))
660 break;
661 }
662 }
663
664 /**
665 * Transitions to TERMINATED state if either (SHUTDOWN and pool
666 * and queue empty) or (STOP and pool empty). If otherwise
667 * eligible to terminate but workerCount is nonzero, interrupts an
668 * idle worker to ensure that shutdown signals propagate. This
669 * method must be called following any action that might make
670 * termination possible -- reducing worker count or removing tasks
671 * from the queue during shutdown. The method is non-private to
672 * allow access from ScheduledThreadPoolExecutor.
673 */
674 final void tryTerminate() {
675 for (;;) {
676 int c = ctl.get();
677 if (isRunning(c) ||
678 runStateAtLeast(c, TIDYING) ||
679 (runStateLessThan(c, STOP) && ! workQueue.isEmpty()))
680 return;
681 if (workerCountOf(c) != 0) { // Eligible to terminate
682 interruptIdleWorkers(ONLY_ONE);
683 return;
684 }
685
686 final ReentrantLock mainLock = this.mainLock;
687 mainLock.lock();
688 try {
689 if (ctl.compareAndSet(c, ctlOf(TIDYING, 0))) {
690 try {
691 terminated();
692 } finally {
693 ctl.set(ctlOf(TERMINATED, 0));
694 termination.signalAll();
695 }
696 return;
697 }
698 } finally {
699 mainLock.unlock();
700 }
701 // else retry on failed CAS
702 }
703 }
704
705 /*
706 * Methods for controlling interrupts to worker threads.
707 */
708
709 /**
710 * If there is a security manager, makes sure caller has
711 * permission to shut down threads in general (see shutdownPerm).
712 * If this passes, additionally makes sure the caller is allowed
713 * to interrupt each worker thread. This might not be true even if
714 * first check passed, if the SecurityManager treats some threads
715 * specially.
716 */
717 private void checkShutdownAccess() {
718 SecurityManager security = System.getSecurityManager();
719 if (security != null) {
720 security.checkPermission(shutdownPerm);
721 final ReentrantLock mainLock = this.mainLock;
722 mainLock.lock();
723 try {
724 for (Worker w : workers)
725 security.checkAccess(w.thread);
726 } finally {
727 mainLock.unlock();
728 }
729 }
730 }
731
732 /**
733 * Interrupts all threads, even if active. Ignores SecurityExceptions
734 * (in which case some threads may remain uninterrupted).
735 */
736 private void interruptWorkers() {
737 final ReentrantLock mainLock = this.mainLock;
738 mainLock.lock();
739 try {
740 for (Worker w : workers)
741 w.interruptIfStarted();
742 } finally {
743 mainLock.unlock();
744 }
745 }
746
747 /**
748 * Interrupts threads that might be waiting for tasks (as
749 * indicated by not being locked) so they can check for
750 * termination or configuration changes. Ignores
751 * SecurityExceptions (in which case some threads may remain
752 * uninterrupted).
753 *
754 * @param onlyOne If true, interrupt at most one worker. This is
755 * called only from tryTerminate when termination is otherwise
756 * enabled but there are still other workers. In this case, at
757 * most one waiting worker is interrupted to propagate shutdown
758 * signals in case all threads are currently waiting.
759 * Interrupting any arbitrary thread ensures that newly arriving
760 * workers since shutdown began will also eventually exit.
761 * To guarantee eventual termination, it suffices to always
762 * interrupt only one idle worker, but shutdown() interrupts all
763 * idle workers so that redundant workers exit promptly, not
764 * waiting for a straggler task to finish.
765 */
766 private void interruptIdleWorkers(boolean onlyOne) {
767 final ReentrantLock mainLock = this.mainLock;
768 mainLock.lock();
769 try {
770 for (Worker w : workers) {
771 Thread t = w.thread;
772 if (!t.isInterrupted() && w.tryLock()) {
773 try {
774 t.interrupt();
775 } catch (SecurityException ignore) {
776 } finally {
777 w.unlock();
778 }
779 }
780 if (onlyOne)
781 break;
782 }
783 } finally {
784 mainLock.unlock();
785 }
786 }
787
788 /**
789 * Common form of interruptIdleWorkers, to avoid having to
790 * remember what the boolean argument means.
791 */
792 private void interruptIdleWorkers() {
793 interruptIdleWorkers(false);
794 }
795
796 private static final boolean ONLY_ONE = true;
797
798 /*
799 * Misc utilities, most of which are also exported to
800 * ScheduledThreadPoolExecutor
801 */
802
803 /**
804 * Invokes the rejected execution handler for the given command.
805 * Package-protected for use by ScheduledThreadPoolExecutor.
806 */
807 final void reject(Runnable command) {
808 handler.rejectedExecution(command, this);
809 }
810
811 /**
812 * Performs any further cleanup following run state transition on
813 * invocation of shutdown. A no-op here, but used by
814 * ScheduledThreadPoolExecutor to cancel delayed tasks.
815 */
816 void onShutdown() {
817 }
818
819 /**
820 * Drains the task queue into a new list, normally using
821 * drainTo. But if the queue is a DelayQueue or any other kind of
822 * queue for which poll or drainTo may fail to remove some
823 * elements, it deletes them one by one.
824 */
825 private List<Runnable> drainQueue() {
826 BlockingQueue<Runnable> q = workQueue;
827 ArrayList<Runnable> taskList = new ArrayList<>();
828 q.drainTo(taskList);
829 if (!q.isEmpty()) {
830 for (Runnable r : q.toArray(new Runnable[0])) {
831 if (q.remove(r))
832 taskList.add(r);
833 }
834 }
835 return taskList;
836 }
837
838 /*
839 * Methods for creating, running and cleaning up after workers
840 */
841
842 /**
843 * Checks if a new worker can be added with respect to current
844 * pool state and the given bound (either core or maximum). If so,
845 * the worker count is adjusted accordingly, and, if possible, a
846 * new worker is created and started, running firstTask as its
847 * first task. This method returns false if the pool is stopped or
848 * eligible to shut down. It also returns false if the thread
849 * factory fails to create a thread when asked. If the thread
850 * creation fails, either due to the thread factory returning
851 * null, or due to an exception (typically OutOfMemoryError in
852 * Thread.start()), we roll back cleanly.
853 *
854 * @param firstTask the task the new thread should run first (or
855 * null if none). Workers are created with an initial first task
856 * (in method execute()) to bypass queuing when there are fewer
857 * than corePoolSize threads (in which case we always start one),
858 * or when the queue is full (in which case we must bypass queue).
859 * Initially idle threads are usually created via
860 * prestartCoreThread or to replace other dying workers.
861 *
862 * @param core if true use corePoolSize as bound, else
863 * maximumPoolSize. (A boolean indicator is used here rather than a
864 * value to ensure reads of fresh values after checking other pool
865 * state).
866 * @return true if successful
867 */
868 private boolean addWorker(Runnable firstTask, boolean core) {
869 retry:
870 for (int c = ctl.get();;) {
871 // Check if queue empty only if necessary.
872 if (runStateAtLeast(c, SHUTDOWN)
873 && (runStateAtLeast(c, STOP)
874 || firstTask != null
875 || workQueue.isEmpty()))
876 return false;
877
878 for (;;) {
879 int wc = workerCountOf(c);
880 if (wc >= CAPACITY ||
881 wc >= (core ? corePoolSize : maximumPoolSize))
882 return false;
883 if (compareAndIncrementWorkerCount(c))
884 break retry;
885 c = ctl.get(); // Re-read ctl
886 if (runStateAtLeast(c, SHUTDOWN))
887 continue retry;
888 // else CAS failed due to workerCount change; retry inner loop
889 }
890 }
891
892 boolean workerStarted = false;
893 boolean workerAdded = false;
894 Worker w = null;
895 try {
896 w = new Worker(firstTask);
897 final Thread t = w.thread;
898 if (t != null) {
899 final ReentrantLock mainLock = this.mainLock;
900 mainLock.lock();
901 try {
902 // Recheck while holding lock.
903 // Back out on ThreadFactory failure or if
904 // shut down before lock acquired.
905 int c = ctl.get();
906
907 if (isRunning(c) ||
908 (runStateLessThan(c, STOP) && firstTask == null)) {
909 if (t.isAlive()) // precheck that t is startable
910 throw new IllegalThreadStateException();
911 workers.add(w);
912 int s = workers.size();
913 if (s > largestPoolSize)
914 largestPoolSize = s;
915 workerAdded = true;
916 }
917 } finally {
918 mainLock.unlock();
919 }
920 if (workerAdded) {
921 t.start();
922 workerStarted = true;
923 }
924 }
925 } finally {
926 if (! workerStarted)
927 addWorkerFailed(w);
928 }
929 return workerStarted;
930 }
931
932 /**
933 * Rolls back the worker thread creation.
934 * - removes worker from workers, if present
935 * - decrements worker count
936 * - rechecks for termination, in case the existence of this
937 * worker was holding up termination
938 */
939 private void addWorkerFailed(Worker w) {
940 final ReentrantLock mainLock = this.mainLock;
941 mainLock.lock();
942 try {
943 if (w != null)
944 workers.remove(w);
945 decrementWorkerCount();
946 tryTerminate();
947 } finally {
948 mainLock.unlock();
949 }
950 }
951
952 /**
953 * Performs cleanup and bookkeeping for a dying worker. Called
954 * only from worker threads. Unless completedAbruptly is set,
955 * assumes that workerCount has already been adjusted to account
956 * for exit. This method removes thread from worker set, and
957 * possibly terminates the pool or replaces the worker if either
958 * it exited due to user task exception or if fewer than
959 * corePoolSize workers are running or queue is non-empty but
960 * there are no workers.
961 *
962 * @param w the worker
963 * @param completedAbruptly if the worker died due to user exception
964 */
965 private void processWorkerExit(Worker w, boolean completedAbruptly) {
966 if (completedAbruptly) // If abrupt, then workerCount wasn't adjusted
967 decrementWorkerCount();
968
969 final ReentrantLock mainLock = this.mainLock;
970 mainLock.lock();
971 try {
972 completedTaskCount += w.completedTasks;
973 workers.remove(w);
974 } finally {
975 mainLock.unlock();
976 }
977
978 tryTerminate();
979
980 int c = ctl.get();
981 if (runStateLessThan(c, STOP)) {
982 if (!completedAbruptly) {
983 int min = allowCoreThreadTimeOut ? 0 : corePoolSize;
984 if (min == 0 && ! workQueue.isEmpty())
985 min = 1;
986 if (workerCountOf(c) >= min)
987 return; // replacement not needed
988 }
989 addWorker(null, false);
990 }
991 }
992
993 /**
994 * Performs blocking or timed wait for a task, depending on
995 * current configuration settings, or returns null if this worker
996 * must exit because of any of:
997 * 1. There are more than maximumPoolSize workers (due to
998 * a call to setMaximumPoolSize).
999 * 2. The pool is stopped.
1000 * 3. The pool is shutdown and the queue is empty.
1001 * 4. This worker timed out waiting for a task, and timed-out
1002 * workers are subject to termination (that is,
1003 * {@code allowCoreThreadTimeOut || workerCount > corePoolSize})
1004 * both before and after the timed wait, and if the queue is
1005 * non-empty, this worker is not the last thread in the pool.
1006 *
1007 * @return task, or null if the worker must exit, in which case
1008 * workerCount is decremented
1009 */
1010 private Runnable getTask() {
1011 boolean timedOut = false; // Did the last poll() time out?
1012
1013 for (;;) {
1014 int c = ctl.get();
1015
1016 // Check if queue empty only if necessary.
1017 if (runStateAtLeast(c, SHUTDOWN)
1018 && (runStateAtLeast(c, STOP) || workQueue.isEmpty())) {
1019 decrementWorkerCount();
1020 return null;
1021 }
1022
1023 int wc = workerCountOf(c);
1024
1025 // Are workers subject to culling?
1026 boolean timed = allowCoreThreadTimeOut || wc > corePoolSize;
1027
1028 if ((wc > maximumPoolSize || (timed && timedOut))
1029 && (wc > 1 || workQueue.isEmpty())) {
1030 if (compareAndDecrementWorkerCount(c))
1031 return null;
1032 continue;
1033 }
1034
1035 try {
1036 Runnable r = timed ?
1037 workQueue.poll(keepAliveTime, TimeUnit.NANOSECONDS) :
1038 workQueue.take();
1039 if (r != null)
1040 return r;
1041 timedOut = true;
1042 } catch (InterruptedException retry) {
1043 timedOut = false;
1044 }
1045 }
1046 }
1047
1048 /**
1049 * Main worker run loop. Repeatedly gets tasks from queue and
1050 * executes them, while coping with a number of issues:
1051 *
1052 * 1. We may start out with an initial task, in which case we
1053 * don't need to get the first one. Otherwise, as long as pool is
1054 * running, we get tasks from getTask. If it returns null then the
1055 * worker exits due to changed pool state or configuration
1056 * parameters. Other exits result from exception throws in
1057 * external code, in which case completedAbruptly holds, which
1058 * usually leads processWorkerExit to replace this thread.
1059 *
1060 * 2. Before running any task, the lock is acquired to prevent
1061 * other pool interrupts while the task is executing, and then we
1062 * ensure that unless pool is stopping, this thread does not have
1063 * its interrupt set.
1064 *
1065 * 3. Each task run is preceded by a call to beforeExecute, which
1066 * might throw an exception, in which case we cause thread to die
1067 * (breaking loop with completedAbruptly true) without processing
1068 * the task.
1069 *
1070 * 4. Assuming beforeExecute completes normally, we run the task,
1071 * gathering any of its thrown exceptions to send to afterExecute.
1072 * We separately handle RuntimeException, Error (both of which the
1073 * specs guarantee that we trap) and arbitrary Throwables.
1074 * Because we cannot rethrow Throwables within Runnable.run, we
1075 * wrap them within Errors on the way out (to the thread's
1076 * UncaughtExceptionHandler). Any thrown exception also
1077 * conservatively causes thread to die.
1078 *
1079 * 5. After task.run completes, we call afterExecute, which may
1080 * also throw an exception, which will also cause thread to
1081 * die. According to JLS Sec 14.20, this exception is the one that
1082 * will be in effect even if task.run throws.
1083 *
1084 * The net effect of the exception mechanics is that afterExecute
1085 * and the thread's UncaughtExceptionHandler have as accurate
1086 * information as we can provide about any problems encountered by
1087 * user code.
1088 *
1089 * @param w the worker
1090 */
1091 final void runWorker(Worker w) {
1092 Thread wt = Thread.currentThread();
1093 Runnable task = w.firstTask;
1094 w.firstTask = null;
1095 w.unlock(); // allow interrupts
1096 boolean completedAbruptly = true;
1097 try {
1098 while (task != null || (task = getTask()) != null) {
1099 w.lock();
1100 // If pool is stopping, ensure thread is interrupted;
1101 // if not, ensure thread is not interrupted. This
1102 // requires a recheck in second case to deal with
1103 // shutdownNow race while clearing interrupt
1104 if ((runStateAtLeast(ctl.get(), STOP) ||
1105 (Thread.interrupted() &&
1106 runStateAtLeast(ctl.get(), STOP))) &&
1107 !wt.isInterrupted())
1108 wt.interrupt();
1109 try {
1110 beforeExecute(wt, task);
1111 Throwable thrown = null;
1112 try {
1113 task.run();
1114 } catch (RuntimeException x) {
1115 thrown = x; throw x;
1116 } catch (Error x) {
1117 thrown = x; throw x;
1118 } catch (Throwable x) {
1119 thrown = x; throw new Error(x);
1120 } finally {
1121 afterExecute(task, thrown);
1122 }
1123 } finally {
1124 task = null;
1125 w.completedTasks++;
1126 w.unlock();
1127 }
1128 }
1129 completedAbruptly = false;
1130 } finally {
1131 processWorkerExit(w, completedAbruptly);
1132 }
1133 }
1134
1135 // Public constructors and methods
1136
1137 /**
1138 * Creates a new {@code ThreadPoolExecutor} with the given initial
1139 * parameters, the default thread factory and the default rejected
1140 * execution handler.
1141 *
1142 * <p>It may be more convenient to use one of the {@link Executors}
1143 * factory methods instead of this general purpose constructor.
1144 *
1145 * @param corePoolSize the number of threads to keep in the pool, even
1146 * if they are idle, unless {@code allowCoreThreadTimeOut} is set
1147 * @param maximumPoolSize the maximum number of threads to allow in the
1148 * pool
1149 * @param keepAliveTime when the number of threads is greater than
1150 * the core, this is the maximum time that excess idle threads
1151 * will wait for new tasks before terminating.
1152 * @param unit the time unit for the {@code keepAliveTime} argument
1153 * @param workQueue the queue to use for holding tasks before they are
1154 * executed. This queue will hold only the {@code Runnable}
1155 * tasks submitted by the {@code execute} method.
1156 * @throws IllegalArgumentException if one of the following holds:<br>
1157 * {@code corePoolSize < 0}<br>
1158 * {@code keepAliveTime < 0}<br>
1159 * {@code maximumPoolSize <= 0}<br>
1160 * {@code maximumPoolSize < corePoolSize}
1161 * @throws NullPointerException if {@code workQueue} is null
1162 */
1163 public ThreadPoolExecutor(int corePoolSize,
1164 int maximumPoolSize,
1165 long keepAliveTime,
1166 TimeUnit unit,
1167 BlockingQueue<Runnable> workQueue) {
1168 this(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue,
1169 Executors.defaultThreadFactory(), defaultHandler);
1170 }
1171
1172 /**
1173 * Creates a new {@code ThreadPoolExecutor} with the given initial
1174 * parameters and {@linkplain ThreadPoolExecutor.AbortPolicy
1175 * default rejected execution handler}.
1176 *
1177 * @param corePoolSize the number of threads to keep in the pool, even
1178 * if they are idle, unless {@code allowCoreThreadTimeOut} is set
1179 * @param maximumPoolSize the maximum number of threads to allow in the
1180 * pool
1181 * @param keepAliveTime when the number of threads is greater than
1182 * the core, this is the maximum time that excess idle threads
1183 * will wait for new tasks before terminating.
1184 * @param unit the time unit for the {@code keepAliveTime} argument
1185 * @param workQueue the queue to use for holding tasks before they are
1186 * executed. This queue will hold only the {@code Runnable}
1187 * tasks submitted by the {@code execute} method.
1188 * @param threadFactory the factory to use when the executor
1189 * creates a new thread
1190 * @throws IllegalArgumentException if one of the following holds:<br>
1191 * {@code corePoolSize < 0}<br>
1192 * {@code keepAliveTime < 0}<br>
1193 * {@code maximumPoolSize <= 0}<br>
1194 * {@code maximumPoolSize < corePoolSize}
1195 * @throws NullPointerException if {@code workQueue}
1196 * or {@code threadFactory} is null
1197 */
1198 public ThreadPoolExecutor(int corePoolSize,
1199 int maximumPoolSize,
1200 long keepAliveTime,
1201 TimeUnit unit,
1202 BlockingQueue<Runnable> workQueue,
1203 ThreadFactory threadFactory) {
1204 this(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue,
1205 threadFactory, defaultHandler);
1206 }
1207
1208 /**
1209 * Creates a new {@code ThreadPoolExecutor} with the given initial
1210 * parameters and
1211 * {@linkplain Executors#defaultThreadFactory default thread factory}.
1212 *
1213 * @param corePoolSize the number of threads to keep in the pool, even
1214 * if they are idle, unless {@code allowCoreThreadTimeOut} is set
1215 * @param maximumPoolSize the maximum number of threads to allow in the
1216 * pool
1217 * @param keepAliveTime when the number of threads is greater than
1218 * the core, this is the maximum time that excess idle threads
1219 * will wait for new tasks before terminating.
1220 * @param unit the time unit for the {@code keepAliveTime} argument
1221 * @param workQueue the queue to use for holding tasks before they are
1222 * executed. This queue will hold only the {@code Runnable}
1223 * tasks submitted by the {@code execute} method.
1224 * @param handler the handler to use when execution is blocked
1225 * because the thread bounds and queue capacities are reached
1226 * @throws IllegalArgumentException if one of the following holds:<br>
1227 * {@code corePoolSize < 0}<br>
1228 * {@code keepAliveTime < 0}<br>
1229 * {@code maximumPoolSize <= 0}<br>
1230 * {@code maximumPoolSize < corePoolSize}
1231 * @throws NullPointerException if {@code workQueue}
1232 * or {@code handler} is null
1233 */
1234 public ThreadPoolExecutor(int corePoolSize,
1235 int maximumPoolSize,
1236 long keepAliveTime,
1237 TimeUnit unit,
1238 BlockingQueue<Runnable> workQueue,
1239 RejectedExecutionHandler handler) {
1240 this(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue,
1241 Executors.defaultThreadFactory(), handler);
1242 }
1243
1244 /**
1245 * Creates a new {@code ThreadPoolExecutor} with the given initial
1246 * parameters.
1247 *
1248 * @param corePoolSize the number of threads to keep in the pool, even
1249 * if they are idle, unless {@code allowCoreThreadTimeOut} is set
1250 * @param maximumPoolSize the maximum number of threads to allow in the
1251 * pool
1252 * @param keepAliveTime when the number of threads is greater than
1253 * the core, this is the maximum time that excess idle threads
1254 * will wait for new tasks before terminating.
1255 * @param unit the time unit for the {@code keepAliveTime} argument
1256 * @param workQueue the queue to use for holding tasks before they are
1257 * executed. This queue will hold only the {@code Runnable}
1258 * tasks submitted by the {@code execute} method.
1259 * @param threadFactory the factory to use when the executor
1260 * creates a new thread
1261 * @param handler the handler to use when execution is blocked
1262 * because the thread bounds and queue capacities are reached
1263 * @throws IllegalArgumentException if one of the following holds:<br>
1264 * {@code corePoolSize < 0}<br>
1265 * {@code keepAliveTime < 0}<br>
1266 * {@code maximumPoolSize <= 0}<br>
1267 * {@code maximumPoolSize < corePoolSize}
1268 * @throws NullPointerException if {@code workQueue}
1269 * or {@code threadFactory} or {@code handler} is null
1270 */
1271 public ThreadPoolExecutor(int corePoolSize,
1272 int maximumPoolSize,
1273 long keepAliveTime,
1274 TimeUnit unit,
1275 BlockingQueue<Runnable> workQueue,
1276 ThreadFactory threadFactory,
1277 RejectedExecutionHandler handler) {
1278 if (corePoolSize < 0 ||
1279 maximumPoolSize <= 0 ||
1280 maximumPoolSize < corePoolSize ||
1281 keepAliveTime < 0)
1282 throw new IllegalArgumentException();
1283 if (workQueue == null || threadFactory == null || handler == null)
1284 throw new NullPointerException();
1285 this.acc = (System.getSecurityManager() == null)
1286 ? null
1287 : AccessController.getContext();
1288 this.corePoolSize = corePoolSize;
1289 this.maximumPoolSize = maximumPoolSize;
1290 this.workQueue = workQueue;
1291 this.keepAliveTime = unit.toNanos(keepAliveTime);
1292 this.threadFactory = threadFactory;
1293 this.handler = handler;
1294 }
1295
1296 /**
1297 * Executes the given task sometime in the future. The task
1298 * may execute in a new thread or in an existing pooled thread.
1299 *
1300 * If the task cannot be submitted for execution, either because this
1301 * executor has been shutdown or because its capacity has been reached,
1302 * the task is handled by the current {@link RejectedExecutionHandler}.
1303 *
1304 * @param command the task to execute
1305 * @throws RejectedExecutionException at discretion of
1306 * {@code RejectedExecutionHandler}, if the task
1307 * cannot be accepted for execution
1308 * @throws NullPointerException if {@code command} is null
1309 */
1310 public void execute(Runnable command) {
1311 if (command == null)
1312 throw new NullPointerException();
1313 /*
1314 * Proceed in 3 steps:
1315 *
1316 * 1. If fewer than corePoolSize threads are running, try to
1317 * start a new thread with the given command as its first
1318 * task. The call to addWorker atomically checks runState and
1319 * workerCount, and so prevents false alarms that would add
1320 * threads when it shouldn't, by returning false.
1321 *
1322 * 2. If a task can be successfully queued, then we still need
1323 * to double-check whether we should have added a thread
1324 * (because existing ones died since last checking) or that
1325 * the pool shut down since entry into this method. So we
1326 * recheck state and if necessary roll back the enqueuing if
1327 * stopped, or start a new thread if there are none.
1328 *
1329 * 3. If we cannot queue task, then we try to add a new
1330 * thread. If it fails, we know we are shut down or saturated
1331 * and so reject the task.
1332 */
1333 int c = ctl.get();
1334 if (workerCountOf(c) < corePoolSize) {
1335 if (addWorker(command, true))
1336 return;
1337 c = ctl.get();
1338 }
1339 if (isRunning(c) && workQueue.offer(command)) {
1340 int recheck = ctl.get();
1341 if (! isRunning(recheck) && remove(command))
1342 reject(command);
1343 else if (workerCountOf(recheck) == 0)
1344 addWorker(null, false);
1345 }
1346 else if (!addWorker(command, false))
1347 reject(command);
1348 }
1349
1350 /**
1351 * Initiates an orderly shutdown in which previously submitted
1352 * tasks are executed, but no new tasks will be accepted.
1353 * Invocation has no additional effect if already shut down.
1354 *
1355 * <p>This method does not wait for previously submitted tasks to
1356 * complete execution. Use {@link #awaitTermination awaitTermination}
1357 * to do that.
1358 *
1359 * @throws SecurityException {@inheritDoc}
1360 */
1361 public void shutdown() {
1362 final ReentrantLock mainLock = this.mainLock;
1363 mainLock.lock();
1364 try {
1365 checkShutdownAccess();
1366 advanceRunState(SHUTDOWN);
1367 interruptIdleWorkers();
1368 onShutdown(); // hook for ScheduledThreadPoolExecutor
1369 } finally {
1370 mainLock.unlock();
1371 }
1372 tryTerminate();
1373 }
1374
1375 /**
1376 * Attempts to stop all actively executing tasks, halts the
1377 * processing of waiting tasks, and returns a list of the tasks
1378 * that were awaiting execution. These tasks are drained (removed)
1379 * from the task queue upon return from this method.
1380 *
1381 * <p>This method does not wait for actively executing tasks to
1382 * terminate. Use {@link #awaitTermination awaitTermination} to
1383 * do that.
1384 *
1385 * <p>There are no guarantees beyond best-effort attempts to stop
1386 * processing actively executing tasks. This implementation
1387 * interrupts tasks via {@link Thread#interrupt}; any task that
1388 * fails to respond to interrupts may never terminate.
1389 *
1390 * @throws SecurityException {@inheritDoc}
1391 */
1392 public List<Runnable> shutdownNow() {
1393 List<Runnable> tasks;
1394 final ReentrantLock mainLock = this.mainLock;
1395 mainLock.lock();
1396 try {
1397 checkShutdownAccess();
1398 advanceRunState(STOP);
1399 interruptWorkers();
1400 tasks = drainQueue();
1401 } finally {
1402 mainLock.unlock();
1403 }
1404 tryTerminate();
1405 return tasks;
1406 }
1407
1408 public boolean isShutdown() {
1409 return runStateAtLeast(ctl.get(), SHUTDOWN);
1410 }
1411
1412 /** Used by ScheduledThreadPoolExecutor. */
1413 boolean isStopped() {
1414 return runStateAtLeast(ctl.get(), STOP);
1415 }
1416
1417 /**
1418 * Returns true if this executor is in the process of terminating
1419 * after {@link #shutdown} or {@link #shutdownNow} but has not
1420 * completely terminated. This method may be useful for
1421 * debugging. A return of {@code true} reported a sufficient
1422 * period after shutdown may indicate that submitted tasks have
1423 * ignored or suppressed interruption, causing this executor not
1424 * to properly terminate.
1425 *
1426 * @return {@code true} if terminating but not yet terminated
1427 */
1428 public boolean isTerminating() {
1429 int c = ctl.get();
1430 return runStateAtLeast(c, SHUTDOWN) && runStateLessThan(c, TERMINATED);
1431 }
1432
1433 public boolean isTerminated() {
1434 return runStateAtLeast(ctl.get(), TERMINATED);
1435 }
1436
1437 public boolean awaitTermination(long timeout, TimeUnit unit)
1438 throws InterruptedException {
1439 long nanos = unit.toNanos(timeout);
1440 final ReentrantLock mainLock = this.mainLock;
1441 mainLock.lock();
1442 try {
1443 while (runStateLessThan(ctl.get(), TERMINATED)) {
1444 if (nanos <= 0L)
1445 return false;
1446 nanos = termination.awaitNanos(nanos);
1447 }
1448 return true;
1449 } finally {
1450 mainLock.unlock();
1451 }
1452 }
1453
1454 /**
1455 * Invokes {@code shutdown} when this executor is no longer
1456 * referenced and it has no threads.
1457 *
1458 * <p>This method is invoked with privileges that are restricted by
1459 * the security context of the caller that invokes the constructor.
1460 *
1461 * @deprecated The {@code finalize} method has been deprecated.
1462 * Subclasses that override {@code finalize} in order to perform cleanup
1463 * should be modified to use alternative cleanup mechanisms and
1464 * to remove the overriding {@code finalize} method.
1465 * When overriding the {@code finalize} method, its implementation must explicitly
1466 * ensure that {@code super.finalize()} is invoked as described in {@link Object#finalize}.
1467 * See the specification for {@link Object#finalize()} for further
1468 * information about migration options.
1469 */
1470 @Deprecated(since="9")
1471 protected void finalize() {
1472 SecurityManager sm = System.getSecurityManager();
1473 if (sm == null || acc == null) {
1474 shutdown();
1475 } else {
1476 PrivilegedAction<Void> pa = () -> { shutdown(); return null; };
1477 AccessController.doPrivileged(pa, acc);
1478 }
1479 }
1480
1481 /**
1482 * Sets the thread factory used to create new threads.
1483 *
1484 * @param threadFactory the new thread factory
1485 * @throws NullPointerException if threadFactory is null
1486 * @see #getThreadFactory
1487 */
1488 public void setThreadFactory(ThreadFactory threadFactory) {
1489 if (threadFactory == null)
1490 throw new NullPointerException();
1491 this.threadFactory = threadFactory;
1492 }
1493
1494 /**
1495 * Returns the thread factory used to create new threads.
1496 *
1497 * @return the current thread factory
1498 * @see #setThreadFactory(ThreadFactory)
1499 */
1500 public ThreadFactory getThreadFactory() {
1501 return threadFactory;
1502 }
1503
1504 /**
1505 * Sets a new handler for unexecutable tasks.
1506 *
1507 * @param handler the new handler
1508 * @throws NullPointerException if handler is null
1509 * @see #getRejectedExecutionHandler
1510 */
1511 public void setRejectedExecutionHandler(RejectedExecutionHandler handler) {
1512 if (handler == null)
1513 throw new NullPointerException();
1514 this.handler = handler;
1515 }
1516
1517 /**
1518 * Returns the current handler for unexecutable tasks.
1519 *
1520 * @return the current handler
1521 * @see #setRejectedExecutionHandler(RejectedExecutionHandler)
1522 */
1523 public RejectedExecutionHandler getRejectedExecutionHandler() {
1524 return handler;
1525 }
1526
1527 /**
1528 * Sets the core number of threads. This overrides any value set
1529 * in the constructor. If the new value is smaller than the
1530 * current value, excess existing threads will be terminated when
1531 * they next become idle. If larger, new threads will, if needed,
1532 * be started to execute any queued tasks.
1533 *
1534 * @param corePoolSize the new core size
1535 * @throws IllegalArgumentException if {@code corePoolSize < 0}
1536 * or {@code corePoolSize} is greater than the {@linkplain
1537 * #getMaximumPoolSize() maximum pool size}
1538 * @see #getCorePoolSize
1539 */
1540 public void setCorePoolSize(int corePoolSize) {
1541 if (corePoolSize < 0 || maximumPoolSize < corePoolSize)
1542 throw new IllegalArgumentException();
1543 int delta = corePoolSize - this.corePoolSize;
1544 this.corePoolSize = corePoolSize;
1545 if (workerCountOf(ctl.get()) > corePoolSize)
1546 interruptIdleWorkers();
1547 else if (delta > 0) {
1548 // We don't really know how many new threads are "needed".
1549 // As a heuristic, prestart enough new workers (up to new
1550 // core size) to handle the current number of tasks in
1551 // queue, but stop if queue becomes empty while doing so.
1552 int k = Math.min(delta, workQueue.size());
1553 while (k-- > 0 && addWorker(null, true)) {
1554 if (workQueue.isEmpty())
1555 break;
1556 }
1557 }
1558 }
1559
1560 /**
1561 * Returns the core number of threads.
1562 *
1563 * @return the core number of threads
1564 * @see #setCorePoolSize
1565 */
1566 public int getCorePoolSize() {
1567 return corePoolSize;
1568 }
1569
1570 /**
1571 * Starts a core thread, causing it to idly wait for work. This
1572 * overrides the default policy of starting core threads only when
1573 * new tasks are executed. This method will return {@code false}
1574 * if all core threads have already been started.
1575 *
1576 * @return {@code true} if a thread was started
1577 */
1578 public boolean prestartCoreThread() {
1579 return workerCountOf(ctl.get()) < corePoolSize &&
1580 addWorker(null, true);
1581 }
1582
1583 /**
1584 * Same as prestartCoreThread except arranges that at least one
1585 * thread is started even if corePoolSize is 0.
1586 */
1587 void ensurePrestart() {
1588 int wc = workerCountOf(ctl.get());
1589 if (wc < corePoolSize)
1590 addWorker(null, true);
1591 else if (wc == 0)
1592 addWorker(null, false);
1593 }
1594
1595 /**
1596 * Starts all core threads, causing them to idly wait for work. This
1597 * overrides the default policy of starting core threads only when
1598 * new tasks are executed.
1599 *
1600 * @return the number of threads started
1601 */
1602 public int prestartAllCoreThreads() {
1603 int n = 0;
1604 while (addWorker(null, true))
1605 ++n;
1606 return n;
1607 }
1608
1609 /**
1610 * Returns true if this pool allows core threads to time out and
1611 * terminate if no tasks arrive within the keepAlive time, being
1612 * replaced if needed when new tasks arrive. When true, the same
1613 * keep-alive policy applying to non-core threads applies also to
1614 * core threads. When false (the default), core threads are never
1615 * terminated due to lack of incoming tasks.
1616 *
1617 * @return {@code true} if core threads are allowed to time out,
1618 * else {@code false}
1619 *
1620 * @since 1.6
1621 */
1622 public boolean allowsCoreThreadTimeOut() {
1623 return allowCoreThreadTimeOut;
1624 }
1625
1626 /**
1627 * Sets the policy governing whether core threads may time out and
1628 * terminate if no tasks arrive within the keep-alive time, being
1629 * replaced if needed when new tasks arrive. When false, core
1630 * threads are never terminated due to lack of incoming
1631 * tasks. When true, the same keep-alive policy applying to
1632 * non-core threads applies also to core threads. To avoid
1633 * continual thread replacement, the keep-alive time must be
1634 * greater than zero when setting {@code true}. This method
1635 * should in general be called before the pool is actively used.
1636 *
1637 * @param value {@code true} if should time out, else {@code false}
1638 * @throws IllegalArgumentException if value is {@code true}
1639 * and the current keep-alive time is not greater than zero
1640 *
1641 * @since 1.6
1642 */
1643 public void allowCoreThreadTimeOut(boolean value) {
1644 if (value && keepAliveTime <= 0)
1645 throw new IllegalArgumentException("Core threads must have nonzero keep alive times");
1646 if (value != allowCoreThreadTimeOut) {
1647 allowCoreThreadTimeOut = value;
1648 if (value)
1649 interruptIdleWorkers();
1650 }
1651 }
1652
1653 /**
1654 * Sets the maximum allowed number of threads. This overrides any
1655 * value set in the constructor. If the new value is smaller than
1656 * the current value, excess existing threads will be
1657 * terminated when they next become idle.
1658 *
1659 * @param maximumPoolSize the new maximum
1660 * @throws IllegalArgumentException if the new maximum is
1661 * less than or equal to zero, or
1662 * less than the {@linkplain #getCorePoolSize core pool size}
1663 * @see #getMaximumPoolSize
1664 */
1665 public void setMaximumPoolSize(int maximumPoolSize) {
1666 if (maximumPoolSize <= 0 || maximumPoolSize < corePoolSize)
1667 throw new IllegalArgumentException();
1668 this.maximumPoolSize = maximumPoolSize;
1669 if (workerCountOf(ctl.get()) > maximumPoolSize)
1670 interruptIdleWorkers();
1671 }
1672
1673 /**
1674 * Returns the maximum allowed number of threads.
1675 *
1676 * @return the maximum allowed number of threads
1677 * @see #setMaximumPoolSize
1678 */
1679 public int getMaximumPoolSize() {
1680 return maximumPoolSize;
1681 }
1682
1683 /**
1684 * Sets the thread keep-alive time, which is the amount of time
1685 * that threads may remain idle before being terminated.
1686 * Threads that wait this amount of time without processing a
1687 * task will be terminated if there are more than the core
1688 * number of threads currently in the pool, or if this pool
1689 * {@linkplain #allowsCoreThreadTimeOut() allows core thread timeout}.
1690 * This overrides any value set in the constructor.
1691 *
1692 * @param time the time to wait. A time value of zero will cause
1693 * excess threads to terminate immediately after executing tasks.
1694 * @param unit the time unit of the {@code time} argument
1695 * @throws IllegalArgumentException if {@code time} less than zero or
1696 * if {@code time} is zero and {@code allowsCoreThreadTimeOut}
1697 * @see #getKeepAliveTime(TimeUnit)
1698 */
1699 public void setKeepAliveTime(long time, TimeUnit unit) {
1700 if (time < 0)
1701 throw new IllegalArgumentException();
1702 if (time == 0 && allowsCoreThreadTimeOut())
1703 throw new IllegalArgumentException("Core threads must have nonzero keep alive times");
1704 long keepAliveTime = unit.toNanos(time);
1705 long delta = keepAliveTime - this.keepAliveTime;
1706 this.keepAliveTime = keepAliveTime;
1707 if (delta < 0)
1708 interruptIdleWorkers();
1709 }
1710
1711 /**
1712 * Returns the thread keep-alive time, which is the amount of time
1713 * that threads may remain idle before being terminated.
1714 * Threads that wait this amount of time without processing a
1715 * task will be terminated if there are more than the core
1716 * number of threads currently in the pool, or if this pool
1717 * {@linkplain #allowsCoreThreadTimeOut() allows core thread timeout}.
1718 *
1719 * @param unit the desired time unit of the result
1720 * @return the time limit
1721 * @see #setKeepAliveTime(long, TimeUnit)
1722 */
1723 public long getKeepAliveTime(TimeUnit unit) {
1724 return unit.convert(keepAliveTime, TimeUnit.NANOSECONDS);
1725 }
1726
1727 /* User-level queue utilities */
1728
1729 /**
1730 * Returns the task queue used by this executor. Access to the
1731 * task queue is intended primarily for debugging and monitoring.
1732 * This queue may be in active use. Retrieving the task queue
1733 * does not prevent queued tasks from executing.
1734 *
1735 * @return the task queue
1736 */
1737 public BlockingQueue<Runnable> getQueue() {
1738 return workQueue;
1739 }
1740
1741 /**
1742 * Removes this task from the executor's internal queue if it is
1743 * present, thus causing it not to be run if it has not already
1744 * started.
1745 *
1746 * <p>This method may be useful as one part of a cancellation
1747 * scheme. It may fail to remove tasks that have been converted
1748 * into other forms before being placed on the internal queue.
1749 * For example, a task entered using {@code submit} might be
1750 * converted into a form that maintains {@code Future} status.
1751 * However, in such cases, method {@link #purge} may be used to
1752 * remove those Futures that have been cancelled.
1753 *
1754 * @param task the task to remove
1755 * @return {@code true} if the task was removed
1756 */
1757 public boolean remove(Runnable task) {
1758 boolean removed = workQueue.remove(task);
1759 tryTerminate(); // In case SHUTDOWN and now empty
1760 return removed;
1761 }
1762
1763 /**
1764 * Tries to remove from the work queue all {@link Future}
1765 * tasks that have been cancelled. This method can be useful as a
1766 * storage reclamation operation, that has no other impact on
1767 * functionality. Cancelled tasks are never executed, but may
1768 * accumulate in work queues until worker threads can actively
1769 * remove them. Invoking this method instead tries to remove them now.
1770 * However, this method may fail to remove tasks in
1771 * the presence of interference by other threads.
1772 */
1773 public void purge() {
1774 final BlockingQueue<Runnable> q = workQueue;
1775 try {
1776 Iterator<Runnable> it = q.iterator();
1777 while (it.hasNext()) {
1778 Runnable r = it.next();
1779 if (r instanceof Future<?> && ((Future<?>)r).isCancelled())
1780 it.remove();
1781 }
1782 } catch (ConcurrentModificationException fallThrough) {
1783 // Take slow path if we encounter interference during traversal.
1784 // Make copy for traversal and call remove for cancelled entries.
1785 // The slow path is more likely to be O(N*N).
1786 for (Object r : q.toArray())
1787 if (r instanceof Future<?> && ((Future<?>)r).isCancelled())
1788 q.remove(r);
1789 }
1790
1791 tryTerminate(); // In case SHUTDOWN and now empty
1792 }
1793
1794 /* Statistics */
1795
1796 /**
1797 * Returns the current number of threads in the pool.
1798 *
1799 * @return the number of threads
1800 */
1801 public int getPoolSize() {
1802 final ReentrantLock mainLock = this.mainLock;
1803 mainLock.lock();
1804 try {
1805 // Remove rare and surprising possibility of
1806 // isTerminated() && getPoolSize() > 0
1807 return runStateAtLeast(ctl.get(), TIDYING) ? 0
1808 : workers.size();
1809 } finally {
1810 mainLock.unlock();
1811 }
1812 }
1813
1814 /**
1815 * Returns the approximate number of threads that are actively
1816 * executing tasks.
1817 *
1818 * @return the number of threads
1819 */
1820 public int getActiveCount() {
1821 final ReentrantLock mainLock = this.mainLock;
1822 mainLock.lock();
1823 try {
1824 int n = 0;
1825 for (Worker w : workers)
1826 if (w.isLocked())
1827 ++n;
1828 return n;
1829 } finally {
1830 mainLock.unlock();
1831 }
1832 }
1833
1834 /**
1835 * Returns the largest number of threads that have ever
1836 * simultaneously been in the pool.
1837 *
1838 * @return the number of threads
1839 */
1840 public int getLargestPoolSize() {
1841 final ReentrantLock mainLock = this.mainLock;
1842 mainLock.lock();
1843 try {
1844 return largestPoolSize;
1845 } finally {
1846 mainLock.unlock();
1847 }
1848 }
1849
1850 /**
1851 * Returns the approximate total number of tasks that have ever been
1852 * scheduled for execution. Because the states of tasks and
1853 * threads may change dynamically during computation, the returned
1854 * value is only an approximation.
1855 *
1856 * @return the number of tasks
1857 */
1858 public long getTaskCount() {
1859 final ReentrantLock mainLock = this.mainLock;
1860 mainLock.lock();
1861 try {
1862 long n = completedTaskCount;
1863 for (Worker w : workers) {
1864 n += w.completedTasks;
1865 if (w.isLocked())
1866 ++n;
1867 }
1868 return n + workQueue.size();
1869 } finally {
1870 mainLock.unlock();
1871 }
1872 }
1873
1874 /**
1875 * Returns the approximate total number of tasks that have
1876 * completed execution. Because the states of tasks and threads
1877 * may change dynamically during computation, the returned value
1878 * is only an approximation, but one that does not ever decrease
1879 * across successive calls.
1880 *
1881 * @return the number of tasks
1882 */
1883 public long getCompletedTaskCount() {
1884 final ReentrantLock mainLock = this.mainLock;
1885 mainLock.lock();
1886 try {
1887 long n = completedTaskCount;
1888 for (Worker w : workers)
1889 n += w.completedTasks;
1890 return n;
1891 } finally {
1892 mainLock.unlock();
1893 }
1894 }
1895
1896 /**
1897 * Returns a string identifying this pool, as well as its state,
1898 * including indications of run state and estimated worker and
1899 * task counts.
1900 *
1901 * @return a string identifying this pool, as well as its state
1902 */
1903 public String toString() {
1904 long ncompleted;
1905 int nworkers, nactive;
1906 final ReentrantLock mainLock = this.mainLock;
1907 mainLock.lock();
1908 try {
1909 ncompleted = completedTaskCount;
1910 nactive = 0;
1911 nworkers = workers.size();
1912 for (Worker w : workers) {
1913 ncompleted += w.completedTasks;
1914 if (w.isLocked())
1915 ++nactive;
1916 }
1917 } finally {
1918 mainLock.unlock();
1919 }
1920 int c = ctl.get();
1921 String runState =
1922 isRunning(c) ? "Running" :
1923 runStateAtLeast(c, TERMINATED) ? "Terminated" :
1924 "Shutting down";
1925 return super.toString() +
1926 "[" + runState +
1927 ", pool size = " + nworkers +
1928 ", active threads = " + nactive +
1929 ", queued tasks = " + workQueue.size() +
1930 ", completed tasks = " + ncompleted +
1931 "]";
1932 }
1933
1934 /* Extension hooks */
1935
1936 /**
1937 * Method invoked prior to executing the given Runnable in the
1938 * given thread. This method is invoked by thread {@code t} that
1939 * will execute task {@code r}, and may be used to re-initialize
1940 * ThreadLocals, or to perform logging.
1941 *
1942 * <p>This implementation does nothing, but may be customized in
1943 * subclasses. Note: To properly nest multiple overridings, subclasses
1944 * should generally invoke {@code super.beforeExecute} at the end of
1945 * this method.
1946 *
1947 * @param t the thread that will run task {@code r}
1948 * @param r the task that will be executed
1949 */
1950 protected void beforeExecute(Thread t, Runnable r) { }
1951
1952 /**
1953 * Method invoked upon completion of execution of the given Runnable.
1954 * This method is invoked by the thread that executed the task. If
1955 * non-null, the Throwable is the uncaught {@code RuntimeException}
1956 * or {@code Error} that caused execution to terminate abruptly.
1957 *
1958 * <p>This implementation does nothing, but may be customized in
1959 * subclasses. Note: To properly nest multiple overridings, subclasses
1960 * should generally invoke {@code super.afterExecute} at the
1961 * beginning of this method.
1962 *
1963 * <p><b>Note:</b> When actions are enclosed in tasks (such as
1964 * {@link FutureTask}) either explicitly or via methods such as
1965 * {@code submit}, these task objects catch and maintain
1966 * computational exceptions, and so they do not cause abrupt
1967 * termination, and the internal exceptions are <em>not</em>
1968 * passed to this method. If you would like to trap both kinds of
1969 * failures in this method, you can further probe for such cases,
1970 * as in this sample subclass that prints either the direct cause
1971 * or the underlying exception if a task has been aborted:
1972 *
1973 * <pre> {@code
1974 * class ExtendedExecutor extends ThreadPoolExecutor {
1975 * // ...
1976 * protected void afterExecute(Runnable r, Throwable t) {
1977 * super.afterExecute(r, t);
1978 * if (t == null
1979 * && r instanceof Future<?>
1980 * && ((Future<?>)r).isDone()) {
1981 * try {
1982 * Object result = ((Future<?>) r).get();
1983 * } catch (CancellationException ce) {
1984 * t = ce;
1985 * } catch (ExecutionException ee) {
1986 * t = ee.getCause();
1987 * } catch (InterruptedException ie) {
1988 * // ignore/reset
1989 * Thread.currentThread().interrupt();
1990 * }
1991 * }
1992 * if (t != null)
1993 * System.out.println(t);
1994 * }
1995 * }}</pre>
1996 *
1997 * @param r the runnable that has completed
1998 * @param t the exception that caused termination, or null if
1999 * execution completed normally
2000 */
2001 protected void afterExecute(Runnable r, Throwable t) { }
2002
2003 /**
2004 * Method invoked when the Executor has terminated. Default
2005 * implementation does nothing. Note: To properly nest multiple
2006 * overridings, subclasses should generally invoke
2007 * {@code super.terminated} within this method.
2008 */
2009 protected void terminated() { }
2010
2011 /* Predefined RejectedExecutionHandlers */
2012
2013 /**
2014 * A handler for rejected tasks that runs the rejected task
2015 * directly in the calling thread of the {@code execute} method,
2016 * unless the executor has been shut down, in which case the task
2017 * is discarded.
2018 */
2019 public static class CallerRunsPolicy implements RejectedExecutionHandler {
2020 /**
2021 * Creates a {@code CallerRunsPolicy}.
2022 */
2023 public CallerRunsPolicy() { }
2024
2025 /**
2026 * Executes task r in the caller's thread, unless the executor
2027 * has been shut down, in which case the task is discarded.
2028 *
2029 * @param r the runnable task requested to be executed
2030 * @param e the executor attempting to execute this task
2031 */
2032 public void rejectedExecution(Runnable r, ThreadPoolExecutor e) {
2033 if (!e.isShutdown()) {
2034 r.run();
2035 }
2036 }
2037 }
2038
2039 /**
2040 * A handler for rejected tasks that throws a
2041 * {@link RejectedExecutionException}.
2042 *
2043 * This is the default handler for {@link ThreadPoolExecutor} and
2044 * {@link ScheduledThreadPoolExecutor}.
2045 */
2046 public static class AbortPolicy implements RejectedExecutionHandler {
2047 /**
2048 * Creates an {@code AbortPolicy}.
2049 */
2050 public AbortPolicy() { }
2051
2052 /**
2053 * Always throws RejectedExecutionException.
2054 *
2055 * @param r the runnable task requested to be executed
2056 * @param e the executor attempting to execute this task
2057 * @throws RejectedExecutionException always
2058 */
2059 public void rejectedExecution(Runnable r, ThreadPoolExecutor e) {
2060 throw new RejectedExecutionException("Task " + r.toString() +
2061 " rejected from " +
2062 e.toString());
2063 }
2064 }
2065
2066 /**
2067 * A handler for rejected tasks that silently discards the
2068 * rejected task.
2069 */
2070 public static class DiscardPolicy implements RejectedExecutionHandler {
2071 /**
2072 * Creates a {@code DiscardPolicy}.
2073 */
2074 public DiscardPolicy() { }
2075
2076 /**
2077 * Does nothing, which has the effect of discarding task r.
2078 *
2079 * @param r the runnable task requested to be executed
2080 * @param e the executor attempting to execute this task
2081 */
2082 public void rejectedExecution(Runnable r, ThreadPoolExecutor e) {
2083 }
2084 }
2085
2086 /**
2087 * A handler for rejected tasks that discards the oldest unhandled
2088 * request and then retries {@code execute}, unless the executor
2089 * is shut down, in which case the task is discarded.
2090 */
2091 public static class DiscardOldestPolicy implements RejectedExecutionHandler {
2092 /**
2093 * Creates a {@code DiscardOldestPolicy} for the given executor.
2094 */
2095 public DiscardOldestPolicy() { }
2096
2097 /**
2098 * Obtains and ignores the next task that the executor
2099 * would otherwise execute, if one is immediately available,
2100 * and then retries execution of task r, unless the executor
2101 * is shut down, in which case task r is instead discarded.
2102 *
2103 * @param r the runnable task requested to be executed
2104 * @param e the executor attempting to execute this task
2105 */
2106 public void rejectedExecution(Runnable r, ThreadPoolExecutor e) {
2107 if (!e.isShutdown()) {
2108 e.getQueue().poll();
2109 e.execute(r);
2110 }
2111 }
2112 }
2113 }