1 |
/* |
2 |
* Written by Doug Lea with assistance from members of JCP JSR-166 |
3 |
* Expert Group and released to the public domain, as explained at |
4 |
* http://creativecommons.org/publicdomain/zero/1.0/ |
5 |
*/ |
6 |
|
7 |
package java.util.concurrent; |
8 |
|
9 |
import java.security.AccessControlContext; |
10 |
import java.security.AccessController; |
11 |
import java.security.PrivilegedAction; |
12 |
import java.util.ArrayList; |
13 |
import java.util.ConcurrentModificationException; |
14 |
import java.util.HashSet; |
15 |
import java.util.Iterator; |
16 |
import java.util.List; |
17 |
import java.util.concurrent.atomic.AtomicInteger; |
18 |
import java.util.concurrent.locks.AbstractQueuedSynchronizer; |
19 |
import java.util.concurrent.locks.Condition; |
20 |
import java.util.concurrent.locks.ReentrantLock; |
21 |
|
22 |
/** |
23 |
* An {@link ExecutorService} that executes each submitted task using |
24 |
* one of possibly several pooled threads, normally configured |
25 |
* using {@link Executors} factory methods. |
26 |
* |
27 |
* <p>Thread pools address two different problems: they usually |
28 |
* provide improved performance when executing large numbers of |
29 |
* asynchronous tasks, due to reduced per-task invocation overhead, |
30 |
* and they provide a means of bounding and managing the resources, |
31 |
* including threads, consumed when executing a collection of tasks. |
32 |
* Each {@code ThreadPoolExecutor} also maintains some basic |
33 |
* statistics, such as the number of completed tasks. |
34 |
* |
35 |
* <p>To be useful across a wide range of contexts, this class |
36 |
* provides many adjustable parameters and extensibility |
37 |
* hooks. However, programmers are urged to use the more convenient |
38 |
* {@link Executors} factory methods {@link |
39 |
* Executors#newCachedThreadPool} (unbounded thread pool, with |
40 |
* automatic thread reclamation), {@link Executors#newFixedThreadPool} |
41 |
* (fixed size thread pool) and {@link |
42 |
* Executors#newSingleThreadExecutor} (single background thread), that |
43 |
* preconfigure settings for the most common usage |
44 |
* scenarios. Otherwise, use the following guide when manually |
45 |
* configuring and tuning this class: |
46 |
* |
47 |
* <dl> |
48 |
* |
49 |
* <dt>Core and maximum pool sizes</dt> |
50 |
* |
51 |
* <dd>A {@code ThreadPoolExecutor} will automatically adjust the |
52 |
* pool size (see {@link #getPoolSize}) |
53 |
* according to the bounds set by |
54 |
* corePoolSize (see {@link #getCorePoolSize}) and |
55 |
* maximumPoolSize (see {@link #getMaximumPoolSize}). |
56 |
* |
57 |
* When a new task is submitted in method {@link #execute(Runnable)}, |
58 |
* if fewer than corePoolSize threads are running, a new thread is |
59 |
* created to handle the request, even if other worker threads are |
60 |
* idle. Else if fewer than maximumPoolSize threads are running, a |
61 |
* new thread will be created to handle the request only if the queue |
62 |
* is full. By setting corePoolSize and maximumPoolSize the same, you |
63 |
* create a fixed-size thread pool. By setting maximumPoolSize to an |
64 |
* essentially unbounded value such as {@code Integer.MAX_VALUE}, you |
65 |
* allow the pool to accommodate an arbitrary number of concurrent |
66 |
* tasks. Most typically, core and maximum pool sizes are set only |
67 |
* upon construction, but they may also be changed dynamically using |
68 |
* {@link #setCorePoolSize} and {@link #setMaximumPoolSize}. </dd> |
69 |
* |
70 |
* <dt>On-demand construction</dt> |
71 |
* |
72 |
* <dd>By default, even core threads are initially created and |
73 |
* started only when new tasks arrive, but this can be overridden |
74 |
* dynamically using method {@link #prestartCoreThread} or {@link |
75 |
* #prestartAllCoreThreads}. You probably want to prestart threads if |
76 |
* you construct the pool with a non-empty queue. </dd> |
77 |
* |
78 |
* <dt>Creating new threads</dt> |
79 |
* |
80 |
* <dd>New threads are created using a {@link ThreadFactory}. If not |
81 |
* otherwise specified, a {@link Executors#defaultThreadFactory} is |
82 |
* used, that creates threads to all be in the same {@link |
83 |
* ThreadGroup} and with the same {@code NORM_PRIORITY} priority and |
84 |
* non-daemon status. By supplying a different ThreadFactory, you can |
85 |
* alter the thread's name, thread group, priority, daemon status, |
86 |
* etc. If a {@code ThreadFactory} fails to create a thread when asked |
87 |
* by returning null from {@code newThread}, the executor will |
88 |
* continue, but might not be able to execute any tasks. Threads |
89 |
* should possess the "modifyThread" {@code RuntimePermission}. If |
90 |
* worker threads or other threads using the pool do not possess this |
91 |
* permission, service may be degraded: configuration changes may not |
92 |
* take effect in a timely manner, and a shutdown pool may remain in a |
93 |
* state in which termination is possible but not completed.</dd> |
94 |
* |
95 |
* <dt>Keep-alive times</dt> |
96 |
* |
97 |
* <dd>If the pool currently has more than corePoolSize threads, |
98 |
* excess threads will be terminated if they have been idle for more |
99 |
* than the keepAliveTime (see {@link #getKeepAliveTime(TimeUnit)}). |
100 |
* This provides a means of reducing resource consumption when the |
101 |
* pool is not being actively used. If the pool becomes more active |
102 |
* later, new threads will be constructed. This parameter can also be |
103 |
* changed dynamically using method {@link #setKeepAliveTime(long, |
104 |
* TimeUnit)}. Using a value of {@code Long.MAX_VALUE} {@link |
105 |
* TimeUnit#NANOSECONDS} effectively disables idle threads from ever |
106 |
* terminating prior to shut down. By default, the keep-alive policy |
107 |
* applies only when there are more than corePoolSize threads, but |
108 |
* method {@link #allowCoreThreadTimeOut(boolean)} can be used to |
109 |
* apply this time-out policy to core threads as well, so long as the |
110 |
* keepAliveTime value is non-zero. </dd> |
111 |
* |
112 |
* <dt>Queuing</dt> |
113 |
* |
114 |
* <dd>Any {@link BlockingQueue} may be used to transfer and hold |
115 |
* submitted tasks. The use of this queue interacts with pool sizing: |
116 |
* |
117 |
* <ul> |
118 |
* |
119 |
* <li>If fewer than corePoolSize threads are running, the Executor |
120 |
* always prefers adding a new thread |
121 |
* rather than queuing. |
122 |
* |
123 |
* <li>If corePoolSize or more threads are running, the Executor |
124 |
* always prefers queuing a request rather than adding a new |
125 |
* thread. |
126 |
* |
127 |
* <li>If a request cannot be queued, a new thread is created unless |
128 |
* this would exceed maximumPoolSize, in which case, the task will be |
129 |
* rejected. |
130 |
* |
131 |
* </ul> |
132 |
* |
133 |
* There are three general strategies for queuing: |
134 |
* <ol> |
135 |
* |
136 |
* <li><em> Direct handoffs.</em> A good default choice for a work |
137 |
* queue is a {@link SynchronousQueue} that hands off tasks to threads |
138 |
* without otherwise holding them. Here, an attempt to queue a task |
139 |
* will fail if no threads are immediately available to run it, so a |
140 |
* new thread will be constructed. This policy avoids lockups when |
141 |
* handling sets of requests that might have internal dependencies. |
142 |
* Direct handoffs generally require unbounded maximumPoolSizes to |
143 |
* avoid rejection of new submitted tasks. This in turn admits the |
144 |
* possibility of unbounded thread growth when commands continue to |
145 |
* arrive on average faster than they can be processed. |
146 |
* |
147 |
* <li><em> Unbounded queues.</em> Using an unbounded queue (for |
148 |
* example a {@link LinkedBlockingQueue} without a predefined |
149 |
* capacity) will cause new tasks to wait in the queue when all |
150 |
* corePoolSize threads are busy. Thus, no more than corePoolSize |
151 |
* threads will ever be created. (And the value of the maximumPoolSize |
152 |
* therefore doesn't have any effect.) This may be appropriate when |
153 |
* each task is completely independent of others, so tasks cannot |
154 |
* affect each others execution; for example, in a web page server. |
155 |
* While this style of queuing can be useful in smoothing out |
156 |
* transient bursts of requests, it admits the possibility of |
157 |
* unbounded work queue growth when commands continue to arrive on |
158 |
* average faster than they can be processed. |
159 |
* |
160 |
* <li><em>Bounded queues.</em> A bounded queue (for example, an |
161 |
* {@link ArrayBlockingQueue}) helps prevent resource exhaustion when |
162 |
* used with finite maximumPoolSizes, but can be more difficult to |
163 |
* tune and control. Queue sizes and maximum pool sizes may be traded |
164 |
* off for each other: Using large queues and small pools minimizes |
165 |
* CPU usage, OS resources, and context-switching overhead, but can |
166 |
* lead to artificially low throughput. If tasks frequently block (for |
167 |
* example if they are I/O bound), a system may be able to schedule |
168 |
* time for more threads than you otherwise allow. Use of small queues |
169 |
* generally requires larger pool sizes, which keeps CPUs busier but |
170 |
* may encounter unacceptable scheduling overhead, which also |
171 |
* decreases throughput. |
172 |
* |
173 |
* </ol> |
174 |
* |
175 |
* </dd> |
176 |
* |
177 |
* <dt>Rejected tasks</dt> |
178 |
* |
179 |
* <dd>New tasks submitted in method {@link #execute(Runnable)} will be |
180 |
* <em>rejected</em> when the Executor has been shut down, and also when |
181 |
* the Executor uses finite bounds for both maximum threads and work queue |
182 |
* capacity, and is saturated. In either case, the {@code execute} method |
183 |
* invokes the {@link |
184 |
* RejectedExecutionHandler#rejectedExecution(Runnable, ThreadPoolExecutor)} |
185 |
* method of its {@link RejectedExecutionHandler}. Four predefined handler |
186 |
* policies are provided: |
187 |
* |
188 |
* <ol> |
189 |
* |
190 |
* <li>In the default {@link ThreadPoolExecutor.AbortPolicy}, the handler |
191 |
* throws a runtime {@link RejectedExecutionException} upon rejection. |
192 |
* |
193 |
* <li>In {@link ThreadPoolExecutor.CallerRunsPolicy}, the thread |
194 |
* that invokes {@code execute} itself runs the task. This provides a |
195 |
* simple feedback control mechanism that will slow down the rate that |
196 |
* new tasks are submitted. |
197 |
* |
198 |
* <li>In {@link ThreadPoolExecutor.DiscardPolicy}, a task that |
199 |
* cannot be executed is simply dropped. |
200 |
* |
201 |
* <li>In {@link ThreadPoolExecutor.DiscardOldestPolicy}, if the |
202 |
* executor is not shut down, the task at the head of the work queue |
203 |
* is dropped, and then execution is retried (which can fail again, |
204 |
* causing this to be repeated.) |
205 |
* |
206 |
* </ol> |
207 |
* |
208 |
* It is possible to define and use other kinds of {@link |
209 |
* RejectedExecutionHandler} classes. Doing so requires some care |
210 |
* especially when policies are designed to work only under particular |
211 |
* capacity or queuing policies. </dd> |
212 |
* |
213 |
* <dt>Hook methods</dt> |
214 |
* |
215 |
* <dd>This class provides {@code protected} overridable |
216 |
* {@link #beforeExecute(Thread, Runnable)} and |
217 |
* {@link #afterExecute(Runnable, Throwable)} methods that are called |
218 |
* before and after execution of each task. These can be used to |
219 |
* manipulate the execution environment; for example, reinitializing |
220 |
* ThreadLocals, gathering statistics, or adding log entries. |
221 |
* Additionally, method {@link #terminated} can be overridden to perform |
222 |
* any special processing that needs to be done once the Executor has |
223 |
* fully terminated. |
224 |
* |
225 |
* <p>If hook, callback, or BlockingQueue methods throw exceptions, |
226 |
* internal worker threads may in turn fail, abruptly terminate, and |
227 |
* possibly be replaced.</dd> |
228 |
* |
229 |
* <dt>Queue maintenance</dt> |
230 |
* |
231 |
* <dd>Method {@link #getQueue()} allows access to the work queue |
232 |
* for purposes of monitoring and debugging. Use of this method for |
233 |
* any other purpose is strongly discouraged. Two supplied methods, |
234 |
* {@link #remove(Runnable)} and {@link #purge} are available to |
235 |
* assist in storage reclamation when large numbers of queued tasks |
236 |
* become cancelled.</dd> |
237 |
* |
238 |
* <dt>Finalization</dt> |
239 |
* |
240 |
* <dd>A pool that is no longer referenced in a program <em>AND</em> |
241 |
* has no remaining threads will be {@code shutdown} automatically. If |
242 |
* you would like to ensure that unreferenced pools are reclaimed even |
243 |
* if users forget to call {@link #shutdown}, then you must arrange |
244 |
* that unused threads eventually die, by setting appropriate |
245 |
* keep-alive times, using a lower bound of zero core threads and/or |
246 |
* setting {@link #allowCoreThreadTimeOut(boolean)}. </dd> |
247 |
* |
248 |
* </dl> |
249 |
* |
250 |
* <p><b>Extension example</b>. Most extensions of this class |
251 |
* override one or more of the protected hook methods. For example, |
252 |
* here is a subclass that adds a simple pause/resume feature: |
253 |
* |
254 |
* <pre> {@code |
255 |
* class PausableThreadPoolExecutor extends ThreadPoolExecutor { |
256 |
* private boolean isPaused; |
257 |
* private ReentrantLock pauseLock = new ReentrantLock(); |
258 |
* private Condition unpaused = pauseLock.newCondition(); |
259 |
* |
260 |
* public PausableThreadPoolExecutor(...) { super(...); } |
261 |
* |
262 |
* protected void beforeExecute(Thread t, Runnable r) { |
263 |
* super.beforeExecute(t, r); |
264 |
* pauseLock.lock(); |
265 |
* try { |
266 |
* while (isPaused) unpaused.await(); |
267 |
* } catch (InterruptedException ie) { |
268 |
* t.interrupt(); |
269 |
* } finally { |
270 |
* pauseLock.unlock(); |
271 |
* } |
272 |
* } |
273 |
* |
274 |
* public void pause() { |
275 |
* pauseLock.lock(); |
276 |
* try { |
277 |
* isPaused = true; |
278 |
* } finally { |
279 |
* pauseLock.unlock(); |
280 |
* } |
281 |
* } |
282 |
* |
283 |
* public void resume() { |
284 |
* pauseLock.lock(); |
285 |
* try { |
286 |
* isPaused = false; |
287 |
* unpaused.signalAll(); |
288 |
* } finally { |
289 |
* pauseLock.unlock(); |
290 |
* } |
291 |
* } |
292 |
* }}</pre> |
293 |
* |
294 |
* @since 1.5 |
295 |
* @author Doug Lea |
296 |
*/ |
297 |
public class ThreadPoolExecutor extends AbstractExecutorService { |
298 |
/** |
299 |
* The main pool control state, ctl, is an atomic integer packing |
300 |
* two conceptual fields |
301 |
* workerCount, indicating the effective number of threads |
302 |
* runState, indicating whether running, shutting down etc |
303 |
* |
304 |
* In order to pack them into one int, we limit workerCount to |
305 |
* (2^29)-1 (about 500 million) threads rather than (2^31)-1 (2 |
306 |
* billion) otherwise representable. If this is ever an issue in |
307 |
* the future, the variable can be changed to be an AtomicLong, |
308 |
* and the shift/mask constants below adjusted. But until the need |
309 |
* arises, this code is a bit faster and simpler using an int. |
310 |
* |
311 |
* The workerCount is the number of workers that have been |
312 |
* permitted to start and not permitted to stop. The value may be |
313 |
* transiently different from the actual number of live threads, |
314 |
* for example when a ThreadFactory fails to create a thread when |
315 |
* asked, and when exiting threads are still performing |
316 |
* bookkeeping before terminating. The user-visible pool size is |
317 |
* reported as the current size of the workers set. |
318 |
* |
319 |
* The runState provides the main lifecycle control, taking on values: |
320 |
* |
321 |
* RUNNING: Accept new tasks and process queued tasks |
322 |
* SHUTDOWN: Don't accept new tasks, but process queued tasks |
323 |
* STOP: Don't accept new tasks, don't process queued tasks, |
324 |
* and interrupt in-progress tasks |
325 |
* TIDYING: All tasks have terminated, workerCount is zero, |
326 |
* the thread transitioning to state TIDYING |
327 |
* will run the terminated() hook method |
328 |
* TERMINATED: terminated() has completed |
329 |
* |
330 |
* The numerical order among these values matters, to allow |
331 |
* ordered comparisons. The runState monotonically increases over |
332 |
* time, but need not hit each state. The transitions are: |
333 |
* |
334 |
* RUNNING -> SHUTDOWN |
335 |
* On invocation of shutdown(), perhaps implicitly in finalize() |
336 |
* (RUNNING or SHUTDOWN) -> STOP |
337 |
* On invocation of shutdownNow() |
338 |
* SHUTDOWN -> TIDYING |
339 |
* When both queue and pool are empty |
340 |
* STOP -> TIDYING |
341 |
* When pool is empty |
342 |
* TIDYING -> TERMINATED |
343 |
* When the terminated() hook method has completed |
344 |
* |
345 |
* Threads waiting in awaitTermination() will return when the |
346 |
* state reaches TERMINATED. |
347 |
* |
348 |
* Detecting the transition from SHUTDOWN to TIDYING is less |
349 |
* straightforward than you'd like because the queue may become |
350 |
* empty after non-empty and vice versa during SHUTDOWN state, but |
351 |
* we can only terminate if, after seeing that it is empty, we see |
352 |
* that workerCount is 0 (which sometimes entails a recheck -- see |
353 |
* below). |
354 |
*/ |
355 |
private final AtomicInteger ctl = new AtomicInteger(ctlOf(RUNNING, 0)); |
356 |
private static final int COUNT_BITS = Integer.SIZE - 3; |
357 |
private static final int CAPACITY = (1 << COUNT_BITS) - 1; |
358 |
|
359 |
// runState is stored in the high-order bits |
360 |
private static final int RUNNING = -1 << COUNT_BITS; |
361 |
private static final int SHUTDOWN = 0 << COUNT_BITS; |
362 |
private static final int STOP = 1 << COUNT_BITS; |
363 |
private static final int TIDYING = 2 << COUNT_BITS; |
364 |
private static final int TERMINATED = 3 << COUNT_BITS; |
365 |
|
366 |
// Packing and unpacking ctl |
367 |
private static int runStateOf(int c) { return c & ~CAPACITY; } |
368 |
private static int workerCountOf(int c) { return c & CAPACITY; } |
369 |
private static int ctlOf(int rs, int wc) { return rs | wc; } |
370 |
|
371 |
/* |
372 |
* Bit field accessors that don't require unpacking ctl. |
373 |
* These depend on the bit layout and on workerCount being never negative. |
374 |
*/ |
375 |
|
376 |
private static boolean runStateLessThan(int c, int s) { |
377 |
return c < s; |
378 |
} |
379 |
|
380 |
private static boolean runStateAtLeast(int c, int s) { |
381 |
return c >= s; |
382 |
} |
383 |
|
384 |
private static boolean isRunning(int c) { |
385 |
return c < SHUTDOWN; |
386 |
} |
387 |
|
388 |
/** |
389 |
* Attempts to CAS-increment the workerCount field of ctl. |
390 |
*/ |
391 |
private boolean compareAndIncrementWorkerCount(int expect) { |
392 |
return ctl.compareAndSet(expect, expect + 1); |
393 |
} |
394 |
|
395 |
/** |
396 |
* Attempts to CAS-decrement the workerCount field of ctl. |
397 |
*/ |
398 |
private boolean compareAndDecrementWorkerCount(int expect) { |
399 |
return ctl.compareAndSet(expect, expect - 1); |
400 |
} |
401 |
|
402 |
/** |
403 |
* Decrements the workerCount field of ctl. This is called only on |
404 |
* abrupt termination of a thread (see processWorkerExit). Other |
405 |
* decrements are performed within getTask. |
406 |
*/ |
407 |
private void decrementWorkerCount() { |
408 |
do {} while (! compareAndDecrementWorkerCount(ctl.get())); |
409 |
} |
410 |
|
411 |
/** |
412 |
* The queue used for holding tasks and handing off to worker |
413 |
* threads. We do not require that workQueue.poll() returning |
414 |
* null necessarily means that workQueue.isEmpty(), so rely |
415 |
* solely on isEmpty to see if the queue is empty (which we must |
416 |
* do for example when deciding whether to transition from |
417 |
* SHUTDOWN to TIDYING). This accommodates special-purpose |
418 |
* queues such as DelayQueues for which poll() is allowed to |
419 |
* return null even if it may later return non-null when delays |
420 |
* expire. |
421 |
*/ |
422 |
private final BlockingQueue<Runnable> workQueue; |
423 |
|
424 |
/** |
425 |
* Lock held on access to workers set and related bookkeeping. |
426 |
* While we could use a concurrent set of some sort, it turns out |
427 |
* to be generally preferable to use a lock. Among the reasons is |
428 |
* that this serializes interruptIdleWorkers, which avoids |
429 |
* unnecessary interrupt storms, especially during shutdown. |
430 |
* Otherwise exiting threads would concurrently interrupt those |
431 |
* that have not yet interrupted. It also simplifies some of the |
432 |
* associated statistics bookkeeping of largestPoolSize etc. We |
433 |
* also hold mainLock on shutdown and shutdownNow, for the sake of |
434 |
* ensuring workers set is stable while separately checking |
435 |
* permission to interrupt and actually interrupting. |
436 |
*/ |
437 |
private final ReentrantLock mainLock = new ReentrantLock(); |
438 |
|
439 |
/** |
440 |
* Set containing all worker threads in pool. Accessed only when |
441 |
* holding mainLock. |
442 |
*/ |
443 |
private final HashSet<Worker> workers = new HashSet<>(); |
444 |
|
445 |
/** |
446 |
* Wait condition to support awaitTermination. |
447 |
*/ |
448 |
private final Condition termination = mainLock.newCondition(); |
449 |
|
450 |
/** |
451 |
* Tracks largest attained pool size. Accessed only under |
452 |
* mainLock. |
453 |
*/ |
454 |
private int largestPoolSize; |
455 |
|
456 |
/** |
457 |
* Counter for completed tasks. Updated only on termination of |
458 |
* worker threads. Accessed only under mainLock. |
459 |
*/ |
460 |
private long completedTaskCount; |
461 |
|
462 |
/* |
463 |
* All user control parameters are declared as volatiles so that |
464 |
* ongoing actions are based on freshest values, but without need |
465 |
* for locking, since no internal invariants depend on them |
466 |
* changing synchronously with respect to other actions. |
467 |
*/ |
468 |
|
469 |
/** |
470 |
* Factory for new threads. All threads are created using this |
471 |
* factory (via method addWorker). All callers must be prepared |
472 |
* for addWorker to fail, which may reflect a system or user's |
473 |
* policy limiting the number of threads. Even though it is not |
474 |
* treated as an error, failure to create threads may result in |
475 |
* new tasks being rejected or existing ones remaining stuck in |
476 |
* the queue. |
477 |
* |
478 |
* We go further and preserve pool invariants even in the face of |
479 |
* errors such as OutOfMemoryError, that might be thrown while |
480 |
* trying to create threads. Such errors are rather common due to |
481 |
* the need to allocate a native stack in Thread.start, and users |
482 |
* will want to perform clean pool shutdown to clean up. There |
483 |
* will likely be enough memory available for the cleanup code to |
484 |
* complete without encountering yet another OutOfMemoryError. |
485 |
*/ |
486 |
private volatile ThreadFactory threadFactory; |
487 |
|
488 |
/** |
489 |
* Handler called when saturated or shutdown in execute. |
490 |
*/ |
491 |
private volatile RejectedExecutionHandler handler; |
492 |
|
493 |
/** |
494 |
* Timeout in nanoseconds for idle threads waiting for work. |
495 |
* Threads use this timeout when there are more than corePoolSize |
496 |
* present or if allowCoreThreadTimeOut. Otherwise they wait |
497 |
* forever for new work. |
498 |
*/ |
499 |
private volatile long keepAliveTime; |
500 |
|
501 |
/** |
502 |
* If false (default), core threads stay alive even when idle. |
503 |
* If true, core threads use keepAliveTime to time out waiting |
504 |
* for work. |
505 |
*/ |
506 |
private volatile boolean allowCoreThreadTimeOut; |
507 |
|
508 |
/** |
509 |
* Core pool size is the minimum number of workers to keep alive |
510 |
* (and not allow to time out etc) unless allowCoreThreadTimeOut |
511 |
* is set, in which case the minimum is zero. |
512 |
*/ |
513 |
private volatile int corePoolSize; |
514 |
|
515 |
/** |
516 |
* Maximum pool size. Note that the actual maximum is internally |
517 |
* bounded by CAPACITY. |
518 |
*/ |
519 |
private volatile int maximumPoolSize; |
520 |
|
521 |
/** |
522 |
* The default rejected execution handler. |
523 |
*/ |
524 |
private static final RejectedExecutionHandler defaultHandler = |
525 |
new AbortPolicy(); |
526 |
|
527 |
/** |
528 |
* Permission required for callers of shutdown and shutdownNow. |
529 |
* We additionally require (see checkShutdownAccess) that callers |
530 |
* have permission to actually interrupt threads in the worker set |
531 |
* (as governed by Thread.interrupt, which relies on |
532 |
* ThreadGroup.checkAccess, which in turn relies on |
533 |
* SecurityManager.checkAccess). Shutdowns are attempted only if |
534 |
* these checks pass. |
535 |
* |
536 |
* All actual invocations of Thread.interrupt (see |
537 |
* interruptIdleWorkers and interruptWorkers) ignore |
538 |
* SecurityExceptions, meaning that the attempted interrupts |
539 |
* silently fail. In the case of shutdown, they should not fail |
540 |
* unless the SecurityManager has inconsistent policies, sometimes |
541 |
* allowing access to a thread and sometimes not. In such cases, |
542 |
* failure to actually interrupt threads may disable or delay full |
543 |
* termination. Other uses of interruptIdleWorkers are advisory, |
544 |
* and failure to actually interrupt will merely delay response to |
545 |
* configuration changes so is not handled exceptionally. |
546 |
*/ |
547 |
private static final RuntimePermission shutdownPerm = |
548 |
new RuntimePermission("modifyThread"); |
549 |
|
550 |
/** The context to be used when executing the finalizer, or null. */ |
551 |
private final AccessControlContext acc; |
552 |
|
553 |
/** |
554 |
* Class Worker mainly maintains interrupt control state for |
555 |
* threads running tasks, along with other minor bookkeeping. |
556 |
* This class opportunistically extends AbstractQueuedSynchronizer |
557 |
* to simplify acquiring and releasing a lock surrounding each |
558 |
* task execution. This protects against interrupts that are |
559 |
* intended to wake up a worker thread waiting for a task from |
560 |
* instead interrupting a task being run. We implement a simple |
561 |
* non-reentrant mutual exclusion lock rather than use |
562 |
* ReentrantLock because we do not want worker tasks to be able to |
563 |
* reacquire the lock when they invoke pool control methods like |
564 |
* setCorePoolSize. Additionally, to suppress interrupts until |
565 |
* the thread actually starts running tasks, we initialize lock |
566 |
* state to a negative value, and clear it upon start (in |
567 |
* runWorker). |
568 |
*/ |
569 |
private final class Worker |
570 |
extends AbstractQueuedSynchronizer |
571 |
implements Runnable |
572 |
{ |
573 |
/** |
574 |
* This class will never be serialized, but we provide a |
575 |
* serialVersionUID to suppress a javac warning. |
576 |
*/ |
577 |
private static final long serialVersionUID = 6138294804551838833L; |
578 |
|
579 |
/** Thread this worker is running in. Null if factory fails. */ |
580 |
final Thread thread; |
581 |
/** Initial task to run. Possibly null. */ |
582 |
Runnable firstTask; |
583 |
/** Per-thread task counter */ |
584 |
volatile long completedTasks; |
585 |
|
586 |
// TODO: switch to AbstractQueuedLongSynchronizer and move |
587 |
// completedTasks into the lock word. |
588 |
|
589 |
/** |
590 |
* Creates with given first task and thread from ThreadFactory. |
591 |
* @param firstTask the first task (null if none) |
592 |
*/ |
593 |
Worker(Runnable firstTask) { |
594 |
setState(-1); // inhibit interrupts until runWorker |
595 |
this.firstTask = firstTask; |
596 |
this.thread = getThreadFactory().newThread(this); |
597 |
} |
598 |
|
599 |
/** Delegates main run loop to outer runWorker. */ |
600 |
public void run() { |
601 |
runWorker(this); |
602 |
} |
603 |
|
604 |
// Lock methods |
605 |
// |
606 |
// The value 0 represents the unlocked state. |
607 |
// The value 1 represents the locked state. |
608 |
|
609 |
protected boolean isHeldExclusively() { |
610 |
return getState() != 0; |
611 |
} |
612 |
|
613 |
protected boolean tryAcquire(int unused) { |
614 |
if (compareAndSetState(0, 1)) { |
615 |
setExclusiveOwnerThread(Thread.currentThread()); |
616 |
return true; |
617 |
} |
618 |
return false; |
619 |
} |
620 |
|
621 |
protected boolean tryRelease(int unused) { |
622 |
setExclusiveOwnerThread(null); |
623 |
setState(0); |
624 |
return true; |
625 |
} |
626 |
|
627 |
public void lock() { acquire(1); } |
628 |
public boolean tryLock() { return tryAcquire(1); } |
629 |
public void unlock() { release(1); } |
630 |
public boolean isLocked() { return isHeldExclusively(); } |
631 |
|
632 |
void interruptIfStarted() { |
633 |
Thread t; |
634 |
if (getState() >= 0 && (t = thread) != null && !t.isInterrupted()) { |
635 |
try { |
636 |
t.interrupt(); |
637 |
} catch (SecurityException ignore) { |
638 |
} |
639 |
} |
640 |
} |
641 |
} |
642 |
|
643 |
/* |
644 |
* Methods for setting control state |
645 |
*/ |
646 |
|
647 |
/** |
648 |
* Transitions runState to given target, or leaves it alone if |
649 |
* already at least the given target. |
650 |
* |
651 |
* @param targetState the desired state, either SHUTDOWN or STOP |
652 |
* (but not TIDYING or TERMINATED -- use tryTerminate for that) |
653 |
*/ |
654 |
private void advanceRunState(int targetState) { |
655 |
// assert targetState == SHUTDOWN || targetState == STOP; |
656 |
for (;;) { |
657 |
int c = ctl.get(); |
658 |
if (runStateAtLeast(c, targetState) || |
659 |
ctl.compareAndSet(c, ctlOf(targetState, workerCountOf(c)))) |
660 |
break; |
661 |
} |
662 |
} |
663 |
|
664 |
/** |
665 |
* Transitions to TERMINATED state if either (SHUTDOWN and pool |
666 |
* and queue empty) or (STOP and pool empty). If otherwise |
667 |
* eligible to terminate but workerCount is nonzero, interrupts an |
668 |
* idle worker to ensure that shutdown signals propagate. This |
669 |
* method must be called following any action that might make |
670 |
* termination possible -- reducing worker count or removing tasks |
671 |
* from the queue during shutdown. The method is non-private to |
672 |
* allow access from ScheduledThreadPoolExecutor. |
673 |
*/ |
674 |
final void tryTerminate() { |
675 |
for (;;) { |
676 |
int c = ctl.get(); |
677 |
if (isRunning(c) || |
678 |
runStateAtLeast(c, TIDYING) || |
679 |
(runStateOf(c) == SHUTDOWN && ! workQueue.isEmpty())) |
680 |
return; |
681 |
if (workerCountOf(c) != 0) { // Eligible to terminate |
682 |
interruptIdleWorkers(ONLY_ONE); |
683 |
return; |
684 |
} |
685 |
|
686 |
final ReentrantLock mainLock = this.mainLock; |
687 |
mainLock.lock(); |
688 |
try { |
689 |
if (ctl.compareAndSet(c, ctlOf(TIDYING, 0))) { |
690 |
try { |
691 |
terminated(); |
692 |
} finally { |
693 |
ctl.set(ctlOf(TERMINATED, 0)); |
694 |
termination.signalAll(); |
695 |
} |
696 |
return; |
697 |
} |
698 |
} finally { |
699 |
mainLock.unlock(); |
700 |
} |
701 |
// else retry on failed CAS |
702 |
} |
703 |
} |
704 |
|
705 |
/* |
706 |
* Methods for controlling interrupts to worker threads. |
707 |
*/ |
708 |
|
709 |
/** |
710 |
* If there is a security manager, makes sure caller has |
711 |
* permission to shut down threads in general (see shutdownPerm). |
712 |
* If this passes, additionally makes sure the caller is allowed |
713 |
* to interrupt each worker thread. This might not be true even if |
714 |
* first check passed, if the SecurityManager treats some threads |
715 |
* specially. |
716 |
*/ |
717 |
private void checkShutdownAccess() { |
718 |
SecurityManager security = System.getSecurityManager(); |
719 |
if (security != null) { |
720 |
security.checkPermission(shutdownPerm); |
721 |
final ReentrantLock mainLock = this.mainLock; |
722 |
mainLock.lock(); |
723 |
try { |
724 |
for (Worker w : workers) |
725 |
security.checkAccess(w.thread); |
726 |
} finally { |
727 |
mainLock.unlock(); |
728 |
} |
729 |
} |
730 |
} |
731 |
|
732 |
/** |
733 |
* Interrupts all threads, even if active. Ignores SecurityExceptions |
734 |
* (in which case some threads may remain uninterrupted). |
735 |
*/ |
736 |
private void interruptWorkers() { |
737 |
final ReentrantLock mainLock = this.mainLock; |
738 |
mainLock.lock(); |
739 |
try { |
740 |
for (Worker w : workers) |
741 |
w.interruptIfStarted(); |
742 |
} finally { |
743 |
mainLock.unlock(); |
744 |
} |
745 |
} |
746 |
|
747 |
/** |
748 |
* Interrupts threads that might be waiting for tasks (as |
749 |
* indicated by not being locked) so they can check for |
750 |
* termination or configuration changes. Ignores |
751 |
* SecurityExceptions (in which case some threads may remain |
752 |
* uninterrupted). |
753 |
* |
754 |
* @param onlyOne If true, interrupt at most one worker. This is |
755 |
* called only from tryTerminate when termination is otherwise |
756 |
* enabled but there are still other workers. In this case, at |
757 |
* most one waiting worker is interrupted to propagate shutdown |
758 |
* signals in case all threads are currently waiting. |
759 |
* Interrupting any arbitrary thread ensures that newly arriving |
760 |
* workers since shutdown began will also eventually exit. |
761 |
* To guarantee eventual termination, it suffices to always |
762 |
* interrupt only one idle worker, but shutdown() interrupts all |
763 |
* idle workers so that redundant workers exit promptly, not |
764 |
* waiting for a straggler task to finish. |
765 |
*/ |
766 |
private void interruptIdleWorkers(boolean onlyOne) { |
767 |
final ReentrantLock mainLock = this.mainLock; |
768 |
mainLock.lock(); |
769 |
try { |
770 |
for (Worker w : workers) { |
771 |
Thread t = w.thread; |
772 |
if (!t.isInterrupted() && w.tryLock()) { |
773 |
try { |
774 |
t.interrupt(); |
775 |
} catch (SecurityException ignore) { |
776 |
} finally { |
777 |
w.unlock(); |
778 |
} |
779 |
} |
780 |
if (onlyOne) |
781 |
break; |
782 |
} |
783 |
} finally { |
784 |
mainLock.unlock(); |
785 |
} |
786 |
} |
787 |
|
788 |
/** |
789 |
* Common form of interruptIdleWorkers, to avoid having to |
790 |
* remember what the boolean argument means. |
791 |
*/ |
792 |
private void interruptIdleWorkers() { |
793 |
interruptIdleWorkers(false); |
794 |
} |
795 |
|
796 |
private static final boolean ONLY_ONE = true; |
797 |
|
798 |
/* |
799 |
* Misc utilities, most of which are also exported to |
800 |
* ScheduledThreadPoolExecutor |
801 |
*/ |
802 |
|
803 |
/** |
804 |
* Invokes the rejected execution handler for the given command. |
805 |
* Package-protected for use by ScheduledThreadPoolExecutor. |
806 |
*/ |
807 |
final void reject(Runnable command) { |
808 |
handler.rejectedExecution(command, this); |
809 |
} |
810 |
|
811 |
/** |
812 |
* Performs any further cleanup following run state transition on |
813 |
* invocation of shutdown. A no-op here, but used by |
814 |
* ScheduledThreadPoolExecutor to cancel delayed tasks. |
815 |
*/ |
816 |
void onShutdown() { |
817 |
} |
818 |
|
819 |
/** |
820 |
* Drains the task queue into a new list, normally using |
821 |
* drainTo. But if the queue is a DelayQueue or any other kind of |
822 |
* queue for which poll or drainTo may fail to remove some |
823 |
* elements, it deletes them one by one. |
824 |
*/ |
825 |
private List<Runnable> drainQueue() { |
826 |
BlockingQueue<Runnable> q = workQueue; |
827 |
ArrayList<Runnable> taskList = new ArrayList<>(); |
828 |
q.drainTo(taskList); |
829 |
if (!q.isEmpty()) { |
830 |
for (Runnable r : q.toArray(new Runnable[0])) { |
831 |
if (q.remove(r)) |
832 |
taskList.add(r); |
833 |
} |
834 |
} |
835 |
return taskList; |
836 |
} |
837 |
|
838 |
/* |
839 |
* Methods for creating, running and cleaning up after workers |
840 |
*/ |
841 |
|
842 |
/** |
843 |
* Checks if a new worker can be added with respect to current |
844 |
* pool state and the given bound (either core or maximum). If so, |
845 |
* the worker count is adjusted accordingly, and, if possible, a |
846 |
* new worker is created and started, running firstTask as its |
847 |
* first task. This method returns false if the pool is stopped or |
848 |
* eligible to shut down. It also returns false if the thread |
849 |
* factory fails to create a thread when asked. If the thread |
850 |
* creation fails, either due to the thread factory returning |
851 |
* null, or due to an exception (typically OutOfMemoryError in |
852 |
* Thread.start()), we roll back cleanly. |
853 |
* |
854 |
* @param firstTask the task the new thread should run first (or |
855 |
* null if none). Workers are created with an initial first task |
856 |
* (in method execute()) to bypass queuing when there are fewer |
857 |
* than corePoolSize threads (in which case we always start one), |
858 |
* or when the queue is full (in which case we must bypass queue). |
859 |
* Initially idle threads are usually created via |
860 |
* prestartCoreThread or to replace other dying workers. |
861 |
* |
862 |
* @param core if true use corePoolSize as bound, else |
863 |
* maximumPoolSize. (A boolean indicator is used here rather than a |
864 |
* value to ensure reads of fresh values after checking other pool |
865 |
* state). |
866 |
* @return true if successful |
867 |
*/ |
868 |
private boolean addWorker(Runnable firstTask, boolean core) { |
869 |
retry: |
870 |
for (;;) { |
871 |
int c = ctl.get(); |
872 |
int rs = runStateOf(c); |
873 |
|
874 |
// Check if queue empty only if necessary. |
875 |
if (rs >= SHUTDOWN && |
876 |
! (rs == SHUTDOWN && |
877 |
firstTask == null && |
878 |
! workQueue.isEmpty())) |
879 |
return false; |
880 |
|
881 |
for (;;) { |
882 |
int wc = workerCountOf(c); |
883 |
if (wc >= CAPACITY || |
884 |
wc >= (core ? corePoolSize : maximumPoolSize)) |
885 |
return false; |
886 |
if (compareAndIncrementWorkerCount(c)) |
887 |
break retry; |
888 |
c = ctl.get(); // Re-read ctl |
889 |
if (runStateOf(c) != rs) |
890 |
continue retry; |
891 |
// else CAS failed due to workerCount change; retry inner loop |
892 |
} |
893 |
} |
894 |
|
895 |
boolean workerStarted = false; |
896 |
boolean workerAdded = false; |
897 |
Worker w = null; |
898 |
try { |
899 |
w = new Worker(firstTask); |
900 |
final Thread t = w.thread; |
901 |
if (t != null) { |
902 |
final ReentrantLock mainLock = this.mainLock; |
903 |
mainLock.lock(); |
904 |
try { |
905 |
// Recheck while holding lock. |
906 |
// Back out on ThreadFactory failure or if |
907 |
// shut down before lock acquired. |
908 |
int rs = runStateOf(ctl.get()); |
909 |
|
910 |
if (rs < SHUTDOWN || |
911 |
(rs == SHUTDOWN && firstTask == null)) { |
912 |
if (t.isAlive()) // precheck that t is startable |
913 |
throw new IllegalThreadStateException(); |
914 |
workers.add(w); |
915 |
int s = workers.size(); |
916 |
if (s > largestPoolSize) |
917 |
largestPoolSize = s; |
918 |
workerAdded = true; |
919 |
} |
920 |
} finally { |
921 |
mainLock.unlock(); |
922 |
} |
923 |
if (workerAdded) { |
924 |
t.start(); |
925 |
workerStarted = true; |
926 |
} |
927 |
} |
928 |
} finally { |
929 |
if (! workerStarted) |
930 |
addWorkerFailed(w); |
931 |
} |
932 |
return workerStarted; |
933 |
} |
934 |
|
935 |
/** |
936 |
* Rolls back the worker thread creation. |
937 |
* - removes worker from workers, if present |
938 |
* - decrements worker count |
939 |
* - rechecks for termination, in case the existence of this |
940 |
* worker was holding up termination |
941 |
*/ |
942 |
private void addWorkerFailed(Worker w) { |
943 |
final ReentrantLock mainLock = this.mainLock; |
944 |
mainLock.lock(); |
945 |
try { |
946 |
if (w != null) |
947 |
workers.remove(w); |
948 |
decrementWorkerCount(); |
949 |
tryTerminate(); |
950 |
} finally { |
951 |
mainLock.unlock(); |
952 |
} |
953 |
} |
954 |
|
955 |
/** |
956 |
* Performs cleanup and bookkeeping for a dying worker. Called |
957 |
* only from worker threads. Unless completedAbruptly is set, |
958 |
* assumes that workerCount has already been adjusted to account |
959 |
* for exit. This method removes thread from worker set, and |
960 |
* possibly terminates the pool or replaces the worker if either |
961 |
* it exited due to user task exception or if fewer than |
962 |
* corePoolSize workers are running or queue is non-empty but |
963 |
* there are no workers. |
964 |
* |
965 |
* @param w the worker |
966 |
* @param completedAbruptly if the worker died due to user exception |
967 |
*/ |
968 |
private void processWorkerExit(Worker w, boolean completedAbruptly) { |
969 |
if (completedAbruptly) // If abrupt, then workerCount wasn't adjusted |
970 |
decrementWorkerCount(); |
971 |
|
972 |
final ReentrantLock mainLock = this.mainLock; |
973 |
mainLock.lock(); |
974 |
try { |
975 |
completedTaskCount += w.completedTasks; |
976 |
workers.remove(w); |
977 |
} finally { |
978 |
mainLock.unlock(); |
979 |
} |
980 |
|
981 |
tryTerminate(); |
982 |
|
983 |
int c = ctl.get(); |
984 |
if (runStateLessThan(c, STOP)) { |
985 |
if (!completedAbruptly) { |
986 |
int min = allowCoreThreadTimeOut ? 0 : corePoolSize; |
987 |
if (min == 0 && ! workQueue.isEmpty()) |
988 |
min = 1; |
989 |
if (workerCountOf(c) >= min) |
990 |
return; // replacement not needed |
991 |
} |
992 |
addWorker(null, false); |
993 |
} |
994 |
} |
995 |
|
996 |
/** |
997 |
* Performs blocking or timed wait for a task, depending on |
998 |
* current configuration settings, or returns null if this worker |
999 |
* must exit because of any of: |
1000 |
* 1. There are more than maximumPoolSize workers (due to |
1001 |
* a call to setMaximumPoolSize). |
1002 |
* 2. The pool is stopped. |
1003 |
* 3. The pool is shutdown and the queue is empty. |
1004 |
* 4. This worker timed out waiting for a task, and timed-out |
1005 |
* workers are subject to termination (that is, |
1006 |
* {@code allowCoreThreadTimeOut || workerCount > corePoolSize}) |
1007 |
* both before and after the timed wait, and if the queue is |
1008 |
* non-empty, this worker is not the last thread in the pool. |
1009 |
* |
1010 |
* @return task, or null if the worker must exit, in which case |
1011 |
* workerCount is decremented |
1012 |
*/ |
1013 |
private Runnable getTask() { |
1014 |
boolean timedOut = false; // Did the last poll() time out? |
1015 |
|
1016 |
for (;;) { |
1017 |
int c = ctl.get(); |
1018 |
int rs = runStateOf(c); |
1019 |
|
1020 |
// Check if queue empty only if necessary. |
1021 |
if (rs >= SHUTDOWN && (rs >= STOP || workQueue.isEmpty())) { |
1022 |
decrementWorkerCount(); |
1023 |
return null; |
1024 |
} |
1025 |
|
1026 |
int wc = workerCountOf(c); |
1027 |
|
1028 |
// Are workers subject to culling? |
1029 |
boolean timed = allowCoreThreadTimeOut || wc > corePoolSize; |
1030 |
|
1031 |
if ((wc > maximumPoolSize || (timed && timedOut)) |
1032 |
&& (wc > 1 || workQueue.isEmpty())) { |
1033 |
if (compareAndDecrementWorkerCount(c)) |
1034 |
return null; |
1035 |
continue; |
1036 |
} |
1037 |
|
1038 |
try { |
1039 |
Runnable r = timed ? |
1040 |
workQueue.poll(keepAliveTime, TimeUnit.NANOSECONDS) : |
1041 |
workQueue.take(); |
1042 |
if (r != null) |
1043 |
return r; |
1044 |
timedOut = true; |
1045 |
} catch (InterruptedException retry) { |
1046 |
timedOut = false; |
1047 |
} |
1048 |
} |
1049 |
} |
1050 |
|
1051 |
/** |
1052 |
* Main worker run loop. Repeatedly gets tasks from queue and |
1053 |
* executes them, while coping with a number of issues: |
1054 |
* |
1055 |
* 1. We may start out with an initial task, in which case we |
1056 |
* don't need to get the first one. Otherwise, as long as pool is |
1057 |
* running, we get tasks from getTask. If it returns null then the |
1058 |
* worker exits due to changed pool state or configuration |
1059 |
* parameters. Other exits result from exception throws in |
1060 |
* external code, in which case completedAbruptly holds, which |
1061 |
* usually leads processWorkerExit to replace this thread. |
1062 |
* |
1063 |
* 2. Before running any task, the lock is acquired to prevent |
1064 |
* other pool interrupts while the task is executing, and then we |
1065 |
* ensure that unless pool is stopping, this thread does not have |
1066 |
* its interrupt set. |
1067 |
* |
1068 |
* 3. Each task run is preceded by a call to beforeExecute, which |
1069 |
* might throw an exception, in which case we cause thread to die |
1070 |
* (breaking loop with completedAbruptly true) without processing |
1071 |
* the task. |
1072 |
* |
1073 |
* 4. Assuming beforeExecute completes normally, we run the task, |
1074 |
* gathering any of its thrown exceptions to send to afterExecute. |
1075 |
* We separately handle RuntimeException, Error (both of which the |
1076 |
* specs guarantee that we trap) and arbitrary Throwables. |
1077 |
* Because we cannot rethrow Throwables within Runnable.run, we |
1078 |
* wrap them within Errors on the way out (to the thread's |
1079 |
* UncaughtExceptionHandler). Any thrown exception also |
1080 |
* conservatively causes thread to die. |
1081 |
* |
1082 |
* 5. After task.run completes, we call afterExecute, which may |
1083 |
* also throw an exception, which will also cause thread to |
1084 |
* die. According to JLS Sec 14.20, this exception is the one that |
1085 |
* will be in effect even if task.run throws. |
1086 |
* |
1087 |
* The net effect of the exception mechanics is that afterExecute |
1088 |
* and the thread's UncaughtExceptionHandler have as accurate |
1089 |
* information as we can provide about any problems encountered by |
1090 |
* user code. |
1091 |
* |
1092 |
* @param w the worker |
1093 |
*/ |
1094 |
final void runWorker(Worker w) { |
1095 |
Thread wt = Thread.currentThread(); |
1096 |
Runnable task = w.firstTask; |
1097 |
w.firstTask = null; |
1098 |
w.unlock(); // allow interrupts |
1099 |
boolean completedAbruptly = true; |
1100 |
try { |
1101 |
while (task != null || (task = getTask()) != null) { |
1102 |
w.lock(); |
1103 |
// If pool is stopping, ensure thread is interrupted; |
1104 |
// if not, ensure thread is not interrupted. This |
1105 |
// requires a recheck in second case to deal with |
1106 |
// shutdownNow race while clearing interrupt |
1107 |
if ((runStateAtLeast(ctl.get(), STOP) || |
1108 |
(Thread.interrupted() && |
1109 |
runStateAtLeast(ctl.get(), STOP))) && |
1110 |
!wt.isInterrupted()) |
1111 |
wt.interrupt(); |
1112 |
try { |
1113 |
beforeExecute(wt, task); |
1114 |
Throwable thrown = null; |
1115 |
try { |
1116 |
task.run(); |
1117 |
} catch (RuntimeException x) { |
1118 |
thrown = x; throw x; |
1119 |
} catch (Error x) { |
1120 |
thrown = x; throw x; |
1121 |
} catch (Throwable x) { |
1122 |
thrown = x; throw new Error(x); |
1123 |
} finally { |
1124 |
afterExecute(task, thrown); |
1125 |
} |
1126 |
} finally { |
1127 |
task = null; |
1128 |
w.completedTasks++; |
1129 |
w.unlock(); |
1130 |
} |
1131 |
} |
1132 |
completedAbruptly = false; |
1133 |
} finally { |
1134 |
processWorkerExit(w, completedAbruptly); |
1135 |
} |
1136 |
} |
1137 |
|
1138 |
// Public constructors and methods |
1139 |
|
1140 |
/** |
1141 |
* Creates a new {@code ThreadPoolExecutor} with the given initial |
1142 |
* parameters, the default thread factory and the default rejected |
1143 |
* execution handler. |
1144 |
* |
1145 |
* <p>It may be more convenient to use one of the {@link Executors} |
1146 |
* factory methods instead of this general purpose constructor. |
1147 |
* |
1148 |
* @param corePoolSize the number of threads to keep in the pool, even |
1149 |
* if they are idle, unless {@code allowCoreThreadTimeOut} is set |
1150 |
* @param maximumPoolSize the maximum number of threads to allow in the |
1151 |
* pool |
1152 |
* @param keepAliveTime when the number of threads is greater than |
1153 |
* the core, this is the maximum time that excess idle threads |
1154 |
* will wait for new tasks before terminating. |
1155 |
* @param unit the time unit for the {@code keepAliveTime} argument |
1156 |
* @param workQueue the queue to use for holding tasks before they are |
1157 |
* executed. This queue will hold only the {@code Runnable} |
1158 |
* tasks submitted by the {@code execute} method. |
1159 |
* @throws IllegalArgumentException if one of the following holds:<br> |
1160 |
* {@code corePoolSize < 0}<br> |
1161 |
* {@code keepAliveTime < 0}<br> |
1162 |
* {@code maximumPoolSize <= 0}<br> |
1163 |
* {@code maximumPoolSize < corePoolSize} |
1164 |
* @throws NullPointerException if {@code workQueue} is null |
1165 |
*/ |
1166 |
public ThreadPoolExecutor(int corePoolSize, |
1167 |
int maximumPoolSize, |
1168 |
long keepAliveTime, |
1169 |
TimeUnit unit, |
1170 |
BlockingQueue<Runnable> workQueue) { |
1171 |
this(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, |
1172 |
Executors.defaultThreadFactory(), defaultHandler); |
1173 |
} |
1174 |
|
1175 |
/** |
1176 |
* Creates a new {@code ThreadPoolExecutor} with the given initial |
1177 |
* parameters and {@linkplain ThreadPoolExecutor.AbortPolicy |
1178 |
* default rejected execution handler}. |
1179 |
* |
1180 |
* @param corePoolSize the number of threads to keep in the pool, even |
1181 |
* if they are idle, unless {@code allowCoreThreadTimeOut} is set |
1182 |
* @param maximumPoolSize the maximum number of threads to allow in the |
1183 |
* pool |
1184 |
* @param keepAliveTime when the number of threads is greater than |
1185 |
* the core, this is the maximum time that excess idle threads |
1186 |
* will wait for new tasks before terminating. |
1187 |
* @param unit the time unit for the {@code keepAliveTime} argument |
1188 |
* @param workQueue the queue to use for holding tasks before they are |
1189 |
* executed. This queue will hold only the {@code Runnable} |
1190 |
* tasks submitted by the {@code execute} method. |
1191 |
* @param threadFactory the factory to use when the executor |
1192 |
* creates a new thread |
1193 |
* @throws IllegalArgumentException if one of the following holds:<br> |
1194 |
* {@code corePoolSize < 0}<br> |
1195 |
* {@code keepAliveTime < 0}<br> |
1196 |
* {@code maximumPoolSize <= 0}<br> |
1197 |
* {@code maximumPoolSize < corePoolSize} |
1198 |
* @throws NullPointerException if {@code workQueue} |
1199 |
* or {@code threadFactory} is null |
1200 |
*/ |
1201 |
public ThreadPoolExecutor(int corePoolSize, |
1202 |
int maximumPoolSize, |
1203 |
long keepAliveTime, |
1204 |
TimeUnit unit, |
1205 |
BlockingQueue<Runnable> workQueue, |
1206 |
ThreadFactory threadFactory) { |
1207 |
this(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, |
1208 |
threadFactory, defaultHandler); |
1209 |
} |
1210 |
|
1211 |
/** |
1212 |
* Creates a new {@code ThreadPoolExecutor} with the given initial |
1213 |
* parameters and |
1214 |
* {@linkplain Executors#defaultThreadFactory default thread factory}. |
1215 |
* |
1216 |
* @param corePoolSize the number of threads to keep in the pool, even |
1217 |
* if they are idle, unless {@code allowCoreThreadTimeOut} is set |
1218 |
* @param maximumPoolSize the maximum number of threads to allow in the |
1219 |
* pool |
1220 |
* @param keepAliveTime when the number of threads is greater than |
1221 |
* the core, this is the maximum time that excess idle threads |
1222 |
* will wait for new tasks before terminating. |
1223 |
* @param unit the time unit for the {@code keepAliveTime} argument |
1224 |
* @param workQueue the queue to use for holding tasks before they are |
1225 |
* executed. This queue will hold only the {@code Runnable} |
1226 |
* tasks submitted by the {@code execute} method. |
1227 |
* @param handler the handler to use when execution is blocked |
1228 |
* because the thread bounds and queue capacities are reached |
1229 |
* @throws IllegalArgumentException if one of the following holds:<br> |
1230 |
* {@code corePoolSize < 0}<br> |
1231 |
* {@code keepAliveTime < 0}<br> |
1232 |
* {@code maximumPoolSize <= 0}<br> |
1233 |
* {@code maximumPoolSize < corePoolSize} |
1234 |
* @throws NullPointerException if {@code workQueue} |
1235 |
* or {@code handler} is null |
1236 |
*/ |
1237 |
public ThreadPoolExecutor(int corePoolSize, |
1238 |
int maximumPoolSize, |
1239 |
long keepAliveTime, |
1240 |
TimeUnit unit, |
1241 |
BlockingQueue<Runnable> workQueue, |
1242 |
RejectedExecutionHandler handler) { |
1243 |
this(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, |
1244 |
Executors.defaultThreadFactory(), handler); |
1245 |
} |
1246 |
|
1247 |
/** |
1248 |
* Creates a new {@code ThreadPoolExecutor} with the given initial |
1249 |
* parameters. |
1250 |
* |
1251 |
* @param corePoolSize the number of threads to keep in the pool, even |
1252 |
* if they are idle, unless {@code allowCoreThreadTimeOut} is set |
1253 |
* @param maximumPoolSize the maximum number of threads to allow in the |
1254 |
* pool |
1255 |
* @param keepAliveTime when the number of threads is greater than |
1256 |
* the core, this is the maximum time that excess idle threads |
1257 |
* will wait for new tasks before terminating. |
1258 |
* @param unit the time unit for the {@code keepAliveTime} argument |
1259 |
* @param workQueue the queue to use for holding tasks before they are |
1260 |
* executed. This queue will hold only the {@code Runnable} |
1261 |
* tasks submitted by the {@code execute} method. |
1262 |
* @param threadFactory the factory to use when the executor |
1263 |
* creates a new thread |
1264 |
* @param handler the handler to use when execution is blocked |
1265 |
* because the thread bounds and queue capacities are reached |
1266 |
* @throws IllegalArgumentException if one of the following holds:<br> |
1267 |
* {@code corePoolSize < 0}<br> |
1268 |
* {@code keepAliveTime < 0}<br> |
1269 |
* {@code maximumPoolSize <= 0}<br> |
1270 |
* {@code maximumPoolSize < corePoolSize} |
1271 |
* @throws NullPointerException if {@code workQueue} |
1272 |
* or {@code threadFactory} or {@code handler} is null |
1273 |
*/ |
1274 |
public ThreadPoolExecutor(int corePoolSize, |
1275 |
int maximumPoolSize, |
1276 |
long keepAliveTime, |
1277 |
TimeUnit unit, |
1278 |
BlockingQueue<Runnable> workQueue, |
1279 |
ThreadFactory threadFactory, |
1280 |
RejectedExecutionHandler handler) { |
1281 |
if (corePoolSize < 0 || |
1282 |
maximumPoolSize <= 0 || |
1283 |
maximumPoolSize < corePoolSize || |
1284 |
keepAliveTime < 0) |
1285 |
throw new IllegalArgumentException(); |
1286 |
if (workQueue == null || threadFactory == null || handler == null) |
1287 |
throw new NullPointerException(); |
1288 |
this.acc = (System.getSecurityManager() == null) |
1289 |
? null |
1290 |
: AccessController.getContext(); |
1291 |
this.corePoolSize = corePoolSize; |
1292 |
this.maximumPoolSize = maximumPoolSize; |
1293 |
this.workQueue = workQueue; |
1294 |
this.keepAliveTime = unit.toNanos(keepAliveTime); |
1295 |
this.threadFactory = threadFactory; |
1296 |
this.handler = handler; |
1297 |
} |
1298 |
|
1299 |
/** |
1300 |
* Executes the given task sometime in the future. The task |
1301 |
* may execute in a new thread or in an existing pooled thread. |
1302 |
* |
1303 |
* If the task cannot be submitted for execution, either because this |
1304 |
* executor has been shutdown or because its capacity has been reached, |
1305 |
* the task is handled by the current {@code RejectedExecutionHandler}. |
1306 |
* |
1307 |
* @param command the task to execute |
1308 |
* @throws RejectedExecutionException at discretion of |
1309 |
* {@code RejectedExecutionHandler}, if the task |
1310 |
* cannot be accepted for execution |
1311 |
* @throws NullPointerException if {@code command} is null |
1312 |
*/ |
1313 |
public void execute(Runnable command) { |
1314 |
if (command == null) |
1315 |
throw new NullPointerException(); |
1316 |
/* |
1317 |
* Proceed in 3 steps: |
1318 |
* |
1319 |
* 1. If fewer than corePoolSize threads are running, try to |
1320 |
* start a new thread with the given command as its first |
1321 |
* task. The call to addWorker atomically checks runState and |
1322 |
* workerCount, and so prevents false alarms that would add |
1323 |
* threads when it shouldn't, by returning false. |
1324 |
* |
1325 |
* 2. If a task can be successfully queued, then we still need |
1326 |
* to double-check whether we should have added a thread |
1327 |
* (because existing ones died since last checking) or that |
1328 |
* the pool shut down since entry into this method. So we |
1329 |
* recheck state and if necessary roll back the enqueuing if |
1330 |
* stopped, or start a new thread if there are none. |
1331 |
* |
1332 |
* 3. If we cannot queue task, then we try to add a new |
1333 |
* thread. If it fails, we know we are shut down or saturated |
1334 |
* and so reject the task. |
1335 |
*/ |
1336 |
int c = ctl.get(); |
1337 |
if (workerCountOf(c) < corePoolSize) { |
1338 |
if (addWorker(command, true)) |
1339 |
return; |
1340 |
c = ctl.get(); |
1341 |
} |
1342 |
if (isRunning(c) && workQueue.offer(command)) { |
1343 |
int recheck = ctl.get(); |
1344 |
if (! isRunning(recheck) && remove(command)) |
1345 |
reject(command); |
1346 |
else if (workerCountOf(recheck) == 0) |
1347 |
addWorker(null, false); |
1348 |
} |
1349 |
else if (!addWorker(command, false)) |
1350 |
reject(command); |
1351 |
} |
1352 |
|
1353 |
/** |
1354 |
* Initiates an orderly shutdown in which previously submitted |
1355 |
* tasks are executed, but no new tasks will be accepted. |
1356 |
* Invocation has no additional effect if already shut down. |
1357 |
* |
1358 |
* <p>This method does not wait for previously submitted tasks to |
1359 |
* complete execution. Use {@link #awaitTermination awaitTermination} |
1360 |
* to do that. |
1361 |
* |
1362 |
* @throws SecurityException {@inheritDoc} |
1363 |
*/ |
1364 |
public void shutdown() { |
1365 |
final ReentrantLock mainLock = this.mainLock; |
1366 |
mainLock.lock(); |
1367 |
try { |
1368 |
checkShutdownAccess(); |
1369 |
advanceRunState(SHUTDOWN); |
1370 |
interruptIdleWorkers(); |
1371 |
onShutdown(); // hook for ScheduledThreadPoolExecutor |
1372 |
} finally { |
1373 |
mainLock.unlock(); |
1374 |
} |
1375 |
tryTerminate(); |
1376 |
} |
1377 |
|
1378 |
/** |
1379 |
* Attempts to stop all actively executing tasks, halts the |
1380 |
* processing of waiting tasks, and returns a list of the tasks |
1381 |
* that were awaiting execution. These tasks are drained (removed) |
1382 |
* from the task queue upon return from this method. |
1383 |
* |
1384 |
* <p>This method does not wait for actively executing tasks to |
1385 |
* terminate. Use {@link #awaitTermination awaitTermination} to |
1386 |
* do that. |
1387 |
* |
1388 |
* <p>There are no guarantees beyond best-effort attempts to stop |
1389 |
* processing actively executing tasks. This implementation |
1390 |
* interrupts tasks via {@link Thread#interrupt}; any task that |
1391 |
* fails to respond to interrupts may never terminate. |
1392 |
* |
1393 |
* @throws SecurityException {@inheritDoc} |
1394 |
*/ |
1395 |
public List<Runnable> shutdownNow() { |
1396 |
List<Runnable> tasks; |
1397 |
final ReentrantLock mainLock = this.mainLock; |
1398 |
mainLock.lock(); |
1399 |
try { |
1400 |
checkShutdownAccess(); |
1401 |
advanceRunState(STOP); |
1402 |
interruptWorkers(); |
1403 |
tasks = drainQueue(); |
1404 |
} finally { |
1405 |
mainLock.unlock(); |
1406 |
} |
1407 |
tryTerminate(); |
1408 |
return tasks; |
1409 |
} |
1410 |
|
1411 |
public boolean isShutdown() { |
1412 |
return ! isRunning(ctl.get()); |
1413 |
} |
1414 |
|
1415 |
/** Used by ScheduledThreadPoolExecutor. */ |
1416 |
boolean isStopped() { |
1417 |
return runStateAtLeast(ctl.get(), STOP); |
1418 |
} |
1419 |
|
1420 |
/** |
1421 |
* Returns true if this executor is in the process of terminating |
1422 |
* after {@link #shutdown} or {@link #shutdownNow} but has not |
1423 |
* completely terminated. This method may be useful for |
1424 |
* debugging. A return of {@code true} reported a sufficient |
1425 |
* period after shutdown may indicate that submitted tasks have |
1426 |
* ignored or suppressed interruption, causing this executor not |
1427 |
* to properly terminate. |
1428 |
* |
1429 |
* @return {@code true} if terminating but not yet terminated |
1430 |
*/ |
1431 |
public boolean isTerminating() { |
1432 |
int c = ctl.get(); |
1433 |
return ! isRunning(c) && runStateLessThan(c, TERMINATED); |
1434 |
} |
1435 |
|
1436 |
public boolean isTerminated() { |
1437 |
return runStateAtLeast(ctl.get(), TERMINATED); |
1438 |
} |
1439 |
|
1440 |
public boolean awaitTermination(long timeout, TimeUnit unit) |
1441 |
throws InterruptedException { |
1442 |
long nanos = unit.toNanos(timeout); |
1443 |
final ReentrantLock mainLock = this.mainLock; |
1444 |
mainLock.lock(); |
1445 |
try { |
1446 |
while (!runStateAtLeast(ctl.get(), TERMINATED)) { |
1447 |
if (nanos <= 0L) |
1448 |
return false; |
1449 |
nanos = termination.awaitNanos(nanos); |
1450 |
} |
1451 |
return true; |
1452 |
} finally { |
1453 |
mainLock.unlock(); |
1454 |
} |
1455 |
} |
1456 |
|
1457 |
/** |
1458 |
* Invokes {@code shutdown} when this executor is no longer |
1459 |
* referenced and it has no threads. |
1460 |
* |
1461 |
* <p>This method is invoked with privileges that are restricted by |
1462 |
* the security context of the caller that invokes the constructor. |
1463 |
*/ |
1464 |
protected void finalize() { |
1465 |
SecurityManager sm = System.getSecurityManager(); |
1466 |
if (sm == null || acc == null) { |
1467 |
shutdown(); |
1468 |
} else { |
1469 |
PrivilegedAction<Void> pa = () -> { shutdown(); return null; }; |
1470 |
AccessController.doPrivileged(pa, acc); |
1471 |
} |
1472 |
} |
1473 |
|
1474 |
/** |
1475 |
* Sets the thread factory used to create new threads. |
1476 |
* |
1477 |
* @param threadFactory the new thread factory |
1478 |
* @throws NullPointerException if threadFactory is null |
1479 |
* @see #getThreadFactory |
1480 |
*/ |
1481 |
public void setThreadFactory(ThreadFactory threadFactory) { |
1482 |
if (threadFactory == null) |
1483 |
throw new NullPointerException(); |
1484 |
this.threadFactory = threadFactory; |
1485 |
} |
1486 |
|
1487 |
/** |
1488 |
* Returns the thread factory used to create new threads. |
1489 |
* |
1490 |
* @return the current thread factory |
1491 |
* @see #setThreadFactory(ThreadFactory) |
1492 |
*/ |
1493 |
public ThreadFactory getThreadFactory() { |
1494 |
return threadFactory; |
1495 |
} |
1496 |
|
1497 |
/** |
1498 |
* Sets a new handler for unexecutable tasks. |
1499 |
* |
1500 |
* @param handler the new handler |
1501 |
* @throws NullPointerException if handler is null |
1502 |
* @see #getRejectedExecutionHandler |
1503 |
*/ |
1504 |
public void setRejectedExecutionHandler(RejectedExecutionHandler handler) { |
1505 |
if (handler == null) |
1506 |
throw new NullPointerException(); |
1507 |
this.handler = handler; |
1508 |
} |
1509 |
|
1510 |
/** |
1511 |
* Returns the current handler for unexecutable tasks. |
1512 |
* |
1513 |
* @return the current handler |
1514 |
* @see #setRejectedExecutionHandler(RejectedExecutionHandler) |
1515 |
*/ |
1516 |
public RejectedExecutionHandler getRejectedExecutionHandler() { |
1517 |
return handler; |
1518 |
} |
1519 |
|
1520 |
/** |
1521 |
* Sets the core number of threads. This overrides any value set |
1522 |
* in the constructor. If the new value is smaller than the |
1523 |
* current value, excess existing threads will be terminated when |
1524 |
* they next become idle. If larger, new threads will, if needed, |
1525 |
* be started to execute any queued tasks. |
1526 |
* |
1527 |
* @param corePoolSize the new core size |
1528 |
* @throws IllegalArgumentException if {@code corePoolSize < 0} |
1529 |
* or {@code corePoolSize} is greater than the {@linkplain |
1530 |
* #getMaximumPoolSize() maximum pool size} |
1531 |
* @see #getCorePoolSize |
1532 |
*/ |
1533 |
public void setCorePoolSize(int corePoolSize) { |
1534 |
if (corePoolSize < 0 || maximumPoolSize < corePoolSize) |
1535 |
throw new IllegalArgumentException(); |
1536 |
int delta = corePoolSize - this.corePoolSize; |
1537 |
this.corePoolSize = corePoolSize; |
1538 |
if (workerCountOf(ctl.get()) > corePoolSize) |
1539 |
interruptIdleWorkers(); |
1540 |
else if (delta > 0) { |
1541 |
// We don't really know how many new threads are "needed". |
1542 |
// As a heuristic, prestart enough new workers (up to new |
1543 |
// core size) to handle the current number of tasks in |
1544 |
// queue, but stop if queue becomes empty while doing so. |
1545 |
int k = Math.min(delta, workQueue.size()); |
1546 |
while (k-- > 0 && addWorker(null, true)) { |
1547 |
if (workQueue.isEmpty()) |
1548 |
break; |
1549 |
} |
1550 |
} |
1551 |
} |
1552 |
|
1553 |
/** |
1554 |
* Returns the core number of threads. |
1555 |
* |
1556 |
* @return the core number of threads |
1557 |
* @see #setCorePoolSize |
1558 |
*/ |
1559 |
public int getCorePoolSize() { |
1560 |
return corePoolSize; |
1561 |
} |
1562 |
|
1563 |
/** |
1564 |
* Starts a core thread, causing it to idly wait for work. This |
1565 |
* overrides the default policy of starting core threads only when |
1566 |
* new tasks are executed. This method will return {@code false} |
1567 |
* if all core threads have already been started. |
1568 |
* |
1569 |
* @return {@code true} if a thread was started |
1570 |
*/ |
1571 |
public boolean prestartCoreThread() { |
1572 |
return workerCountOf(ctl.get()) < corePoolSize && |
1573 |
addWorker(null, true); |
1574 |
} |
1575 |
|
1576 |
/** |
1577 |
* Same as prestartCoreThread except arranges that at least one |
1578 |
* thread is started even if corePoolSize is 0. |
1579 |
*/ |
1580 |
void ensurePrestart() { |
1581 |
int wc = workerCountOf(ctl.get()); |
1582 |
if (wc < corePoolSize) |
1583 |
addWorker(null, true); |
1584 |
else if (wc == 0) |
1585 |
addWorker(null, false); |
1586 |
} |
1587 |
|
1588 |
/** |
1589 |
* Starts all core threads, causing them to idly wait for work. This |
1590 |
* overrides the default policy of starting core threads only when |
1591 |
* new tasks are executed. |
1592 |
* |
1593 |
* @return the number of threads started |
1594 |
*/ |
1595 |
public int prestartAllCoreThreads() { |
1596 |
int n = 0; |
1597 |
while (addWorker(null, true)) |
1598 |
++n; |
1599 |
return n; |
1600 |
} |
1601 |
|
1602 |
/** |
1603 |
* Returns true if this pool allows core threads to time out and |
1604 |
* terminate if no tasks arrive within the keepAlive time, being |
1605 |
* replaced if needed when new tasks arrive. When true, the same |
1606 |
* keep-alive policy applying to non-core threads applies also to |
1607 |
* core threads. When false (the default), core threads are never |
1608 |
* terminated due to lack of incoming tasks. |
1609 |
* |
1610 |
* @return {@code true} if core threads are allowed to time out, |
1611 |
* else {@code false} |
1612 |
* |
1613 |
* @since 1.6 |
1614 |
*/ |
1615 |
public boolean allowsCoreThreadTimeOut() { |
1616 |
return allowCoreThreadTimeOut; |
1617 |
} |
1618 |
|
1619 |
/** |
1620 |
* Sets the policy governing whether core threads may time out and |
1621 |
* terminate if no tasks arrive within the keep-alive time, being |
1622 |
* replaced if needed when new tasks arrive. When false, core |
1623 |
* threads are never terminated due to lack of incoming |
1624 |
* tasks. When true, the same keep-alive policy applying to |
1625 |
* non-core threads applies also to core threads. To avoid |
1626 |
* continual thread replacement, the keep-alive time must be |
1627 |
* greater than zero when setting {@code true}. This method |
1628 |
* should in general be called before the pool is actively used. |
1629 |
* |
1630 |
* @param value {@code true} if should time out, else {@code false} |
1631 |
* @throws IllegalArgumentException if value is {@code true} |
1632 |
* and the current keep-alive time is not greater than zero |
1633 |
* |
1634 |
* @since 1.6 |
1635 |
*/ |
1636 |
public void allowCoreThreadTimeOut(boolean value) { |
1637 |
if (value && keepAliveTime <= 0) |
1638 |
throw new IllegalArgumentException("Core threads must have nonzero keep alive times"); |
1639 |
if (value != allowCoreThreadTimeOut) { |
1640 |
allowCoreThreadTimeOut = value; |
1641 |
if (value) |
1642 |
interruptIdleWorkers(); |
1643 |
} |
1644 |
} |
1645 |
|
1646 |
/** |
1647 |
* Sets the maximum allowed number of threads. This overrides any |
1648 |
* value set in the constructor. If the new value is smaller than |
1649 |
* the current value, excess existing threads will be |
1650 |
* terminated when they next become idle. |
1651 |
* |
1652 |
* @param maximumPoolSize the new maximum |
1653 |
* @throws IllegalArgumentException if the new maximum is |
1654 |
* less than or equal to zero, or |
1655 |
* less than the {@linkplain #getCorePoolSize core pool size} |
1656 |
* @see #getMaximumPoolSize |
1657 |
*/ |
1658 |
public void setMaximumPoolSize(int maximumPoolSize) { |
1659 |
if (maximumPoolSize <= 0 || maximumPoolSize < corePoolSize) |
1660 |
throw new IllegalArgumentException(); |
1661 |
this.maximumPoolSize = maximumPoolSize; |
1662 |
if (workerCountOf(ctl.get()) > maximumPoolSize) |
1663 |
interruptIdleWorkers(); |
1664 |
} |
1665 |
|
1666 |
/** |
1667 |
* Returns the maximum allowed number of threads. |
1668 |
* |
1669 |
* @return the maximum allowed number of threads |
1670 |
* @see #setMaximumPoolSize |
1671 |
*/ |
1672 |
public int getMaximumPoolSize() { |
1673 |
return maximumPoolSize; |
1674 |
} |
1675 |
|
1676 |
/** |
1677 |
* Sets the thread keep-alive time, which is the amount of time |
1678 |
* that threads may remain idle before being terminated. |
1679 |
* Threads that wait this amount of time without processing a |
1680 |
* task will be terminated if there are more than the core |
1681 |
* number of threads currently in the pool, or if this pool |
1682 |
* {@linkplain #allowsCoreThreadTimeOut() allows core thread timeout}. |
1683 |
* This overrides any value set in the constructor. |
1684 |
* |
1685 |
* @param time the time to wait. A time value of zero will cause |
1686 |
* excess threads to terminate immediately after executing tasks. |
1687 |
* @param unit the time unit of the {@code time} argument |
1688 |
* @throws IllegalArgumentException if {@code time} less than zero or |
1689 |
* if {@code time} is zero and {@code allowsCoreThreadTimeOut} |
1690 |
* @see #getKeepAliveTime(TimeUnit) |
1691 |
*/ |
1692 |
public void setKeepAliveTime(long time, TimeUnit unit) { |
1693 |
if (time < 0) |
1694 |
throw new IllegalArgumentException(); |
1695 |
if (time == 0 && allowsCoreThreadTimeOut()) |
1696 |
throw new IllegalArgumentException("Core threads must have nonzero keep alive times"); |
1697 |
long keepAliveTime = unit.toNanos(time); |
1698 |
long delta = keepAliveTime - this.keepAliveTime; |
1699 |
this.keepAliveTime = keepAliveTime; |
1700 |
if (delta < 0) |
1701 |
interruptIdleWorkers(); |
1702 |
} |
1703 |
|
1704 |
/** |
1705 |
* Returns the thread keep-alive time, which is the amount of time |
1706 |
* that threads may remain idle before being terminated. |
1707 |
* Threads that wait this amount of time without processing a |
1708 |
* task will be terminated if there are more than the core |
1709 |
* number of threads currently in the pool, or if this pool |
1710 |
* {@linkplain #allowsCoreThreadTimeOut() allows core thread timeout}. |
1711 |
* |
1712 |
* @param unit the desired time unit of the result |
1713 |
* @return the time limit |
1714 |
* @see #setKeepAliveTime(long, TimeUnit) |
1715 |
*/ |
1716 |
public long getKeepAliveTime(TimeUnit unit) { |
1717 |
return unit.convert(keepAliveTime, TimeUnit.NANOSECONDS); |
1718 |
} |
1719 |
|
1720 |
/* User-level queue utilities */ |
1721 |
|
1722 |
/** |
1723 |
* Returns the task queue used by this executor. Access to the |
1724 |
* task queue is intended primarily for debugging and monitoring. |
1725 |
* This queue may be in active use. Retrieving the task queue |
1726 |
* does not prevent queued tasks from executing. |
1727 |
* |
1728 |
* @return the task queue |
1729 |
*/ |
1730 |
public BlockingQueue<Runnable> getQueue() { |
1731 |
return workQueue; |
1732 |
} |
1733 |
|
1734 |
/** |
1735 |
* Removes this task from the executor's internal queue if it is |
1736 |
* present, thus causing it not to be run if it has not already |
1737 |
* started. |
1738 |
* |
1739 |
* <p>This method may be useful as one part of a cancellation |
1740 |
* scheme. It may fail to remove tasks that have been converted |
1741 |
* into other forms before being placed on the internal queue. |
1742 |
* For example, a task entered using {@code submit} might be |
1743 |
* converted into a form that maintains {@code Future} status. |
1744 |
* However, in such cases, method {@link #purge} may be used to |
1745 |
* remove those Futures that have been cancelled. |
1746 |
* |
1747 |
* @param task the task to remove |
1748 |
* @return {@code true} if the task was removed |
1749 |
*/ |
1750 |
public boolean remove(Runnable task) { |
1751 |
boolean removed = workQueue.remove(task); |
1752 |
tryTerminate(); // In case SHUTDOWN and now empty |
1753 |
return removed; |
1754 |
} |
1755 |
|
1756 |
/** |
1757 |
* Tries to remove from the work queue all {@link Future} |
1758 |
* tasks that have been cancelled. This method can be useful as a |
1759 |
* storage reclamation operation, that has no other impact on |
1760 |
* functionality. Cancelled tasks are never executed, but may |
1761 |
* accumulate in work queues until worker threads can actively |
1762 |
* remove them. Invoking this method instead tries to remove them now. |
1763 |
* However, this method may fail to remove tasks in |
1764 |
* the presence of interference by other threads. |
1765 |
*/ |
1766 |
public void purge() { |
1767 |
final BlockingQueue<Runnable> q = workQueue; |
1768 |
try { |
1769 |
Iterator<Runnable> it = q.iterator(); |
1770 |
while (it.hasNext()) { |
1771 |
Runnable r = it.next(); |
1772 |
if (r instanceof Future<?> && ((Future<?>)r).isCancelled()) |
1773 |
it.remove(); |
1774 |
} |
1775 |
} catch (ConcurrentModificationException fallThrough) { |
1776 |
// Take slow path if we encounter interference during traversal. |
1777 |
// Make copy for traversal and call remove for cancelled entries. |
1778 |
// The slow path is more likely to be O(N*N). |
1779 |
for (Object r : q.toArray()) |
1780 |
if (r instanceof Future<?> && ((Future<?>)r).isCancelled()) |
1781 |
q.remove(r); |
1782 |
} |
1783 |
|
1784 |
tryTerminate(); // In case SHUTDOWN and now empty |
1785 |
} |
1786 |
|
1787 |
/* Statistics */ |
1788 |
|
1789 |
/** |
1790 |
* Returns the current number of threads in the pool. |
1791 |
* |
1792 |
* @return the number of threads |
1793 |
*/ |
1794 |
public int getPoolSize() { |
1795 |
final ReentrantLock mainLock = this.mainLock; |
1796 |
mainLock.lock(); |
1797 |
try { |
1798 |
// Remove rare and surprising possibility of |
1799 |
// isTerminated() && getPoolSize() > 0 |
1800 |
return runStateAtLeast(ctl.get(), TIDYING) ? 0 |
1801 |
: workers.size(); |
1802 |
} finally { |
1803 |
mainLock.unlock(); |
1804 |
} |
1805 |
} |
1806 |
|
1807 |
/** |
1808 |
* Returns the approximate number of threads that are actively |
1809 |
* executing tasks. |
1810 |
* |
1811 |
* @return the number of threads |
1812 |
*/ |
1813 |
public int getActiveCount() { |
1814 |
final ReentrantLock mainLock = this.mainLock; |
1815 |
mainLock.lock(); |
1816 |
try { |
1817 |
int n = 0; |
1818 |
for (Worker w : workers) |
1819 |
if (w.isLocked()) |
1820 |
++n; |
1821 |
return n; |
1822 |
} finally { |
1823 |
mainLock.unlock(); |
1824 |
} |
1825 |
} |
1826 |
|
1827 |
/** |
1828 |
* Returns the largest number of threads that have ever |
1829 |
* simultaneously been in the pool. |
1830 |
* |
1831 |
* @return the number of threads |
1832 |
*/ |
1833 |
public int getLargestPoolSize() { |
1834 |
final ReentrantLock mainLock = this.mainLock; |
1835 |
mainLock.lock(); |
1836 |
try { |
1837 |
return largestPoolSize; |
1838 |
} finally { |
1839 |
mainLock.unlock(); |
1840 |
} |
1841 |
} |
1842 |
|
1843 |
/** |
1844 |
* Returns the approximate total number of tasks that have ever been |
1845 |
* scheduled for execution. Because the states of tasks and |
1846 |
* threads may change dynamically during computation, the returned |
1847 |
* value is only an approximation. |
1848 |
* |
1849 |
* @return the number of tasks |
1850 |
*/ |
1851 |
public long getTaskCount() { |
1852 |
final ReentrantLock mainLock = this.mainLock; |
1853 |
mainLock.lock(); |
1854 |
try { |
1855 |
long n = completedTaskCount; |
1856 |
for (Worker w : workers) { |
1857 |
n += w.completedTasks; |
1858 |
if (w.isLocked()) |
1859 |
++n; |
1860 |
} |
1861 |
return n + workQueue.size(); |
1862 |
} finally { |
1863 |
mainLock.unlock(); |
1864 |
} |
1865 |
} |
1866 |
|
1867 |
/** |
1868 |
* Returns the approximate total number of tasks that have |
1869 |
* completed execution. Because the states of tasks and threads |
1870 |
* may change dynamically during computation, the returned value |
1871 |
* is only an approximation, but one that does not ever decrease |
1872 |
* across successive calls. |
1873 |
* |
1874 |
* @return the number of tasks |
1875 |
*/ |
1876 |
public long getCompletedTaskCount() { |
1877 |
final ReentrantLock mainLock = this.mainLock; |
1878 |
mainLock.lock(); |
1879 |
try { |
1880 |
long n = completedTaskCount; |
1881 |
for (Worker w : workers) |
1882 |
n += w.completedTasks; |
1883 |
return n; |
1884 |
} finally { |
1885 |
mainLock.unlock(); |
1886 |
} |
1887 |
} |
1888 |
|
1889 |
/** |
1890 |
* Returns a string identifying this pool, as well as its state, |
1891 |
* including indications of run state and estimated worker and |
1892 |
* task counts. |
1893 |
* |
1894 |
* @return a string identifying this pool, as well as its state |
1895 |
*/ |
1896 |
public String toString() { |
1897 |
long ncompleted; |
1898 |
int nworkers, nactive; |
1899 |
final ReentrantLock mainLock = this.mainLock; |
1900 |
mainLock.lock(); |
1901 |
try { |
1902 |
ncompleted = completedTaskCount; |
1903 |
nactive = 0; |
1904 |
nworkers = workers.size(); |
1905 |
for (Worker w : workers) { |
1906 |
ncompleted += w.completedTasks; |
1907 |
if (w.isLocked()) |
1908 |
++nactive; |
1909 |
} |
1910 |
} finally { |
1911 |
mainLock.unlock(); |
1912 |
} |
1913 |
int c = ctl.get(); |
1914 |
String runState = |
1915 |
runStateLessThan(c, SHUTDOWN) ? "Running" : |
1916 |
runStateAtLeast(c, TERMINATED) ? "Terminated" : |
1917 |
"Shutting down"; |
1918 |
return super.toString() + |
1919 |
"[" + runState + |
1920 |
", pool size = " + nworkers + |
1921 |
", active threads = " + nactive + |
1922 |
", queued tasks = " + workQueue.size() + |
1923 |
", completed tasks = " + ncompleted + |
1924 |
"]"; |
1925 |
} |
1926 |
|
1927 |
/* Extension hooks */ |
1928 |
|
1929 |
/** |
1930 |
* Method invoked prior to executing the given Runnable in the |
1931 |
* given thread. This method is invoked by thread {@code t} that |
1932 |
* will execute task {@code r}, and may be used to re-initialize |
1933 |
* ThreadLocals, or to perform logging. |
1934 |
* |
1935 |
* <p>This implementation does nothing, but may be customized in |
1936 |
* subclasses. Note: To properly nest multiple overridings, subclasses |
1937 |
* should generally invoke {@code super.beforeExecute} at the end of |
1938 |
* this method. |
1939 |
* |
1940 |
* @param t the thread that will run task {@code r} |
1941 |
* @param r the task that will be executed |
1942 |
*/ |
1943 |
protected void beforeExecute(Thread t, Runnable r) { } |
1944 |
|
1945 |
/** |
1946 |
* Method invoked upon completion of execution of the given Runnable. |
1947 |
* This method is invoked by the thread that executed the task. If |
1948 |
* non-null, the Throwable is the uncaught {@code RuntimeException} |
1949 |
* or {@code Error} that caused execution to terminate abruptly. |
1950 |
* |
1951 |
* <p>This implementation does nothing, but may be customized in |
1952 |
* subclasses. Note: To properly nest multiple overridings, subclasses |
1953 |
* should generally invoke {@code super.afterExecute} at the |
1954 |
* beginning of this method. |
1955 |
* |
1956 |
* <p><b>Note:</b> When actions are enclosed in tasks (such as |
1957 |
* {@link FutureTask}) either explicitly or via methods such as |
1958 |
* {@code submit}, these task objects catch and maintain |
1959 |
* computational exceptions, and so they do not cause abrupt |
1960 |
* termination, and the internal exceptions are <em>not</em> |
1961 |
* passed to this method. If you would like to trap both kinds of |
1962 |
* failures in this method, you can further probe for such cases, |
1963 |
* as in this sample subclass that prints either the direct cause |
1964 |
* or the underlying exception if a task has been aborted: |
1965 |
* |
1966 |
* <pre> {@code |
1967 |
* class ExtendedExecutor extends ThreadPoolExecutor { |
1968 |
* // ... |
1969 |
* protected void afterExecute(Runnable r, Throwable t) { |
1970 |
* super.afterExecute(r, t); |
1971 |
* if (t == null |
1972 |
* && r instanceof Future<?> |
1973 |
* && ((Future<?>)r).isDone()) { |
1974 |
* try { |
1975 |
* Object result = ((Future<?>) r).get(); |
1976 |
* } catch (CancellationException ce) { |
1977 |
* t = ce; |
1978 |
* } catch (ExecutionException ee) { |
1979 |
* t = ee.getCause(); |
1980 |
* } catch (InterruptedException ie) { |
1981 |
* // ignore/reset |
1982 |
* Thread.currentThread().interrupt(); |
1983 |
* } |
1984 |
* } |
1985 |
* if (t != null) |
1986 |
* System.out.println(t); |
1987 |
* } |
1988 |
* }}</pre> |
1989 |
* |
1990 |
* @param r the runnable that has completed |
1991 |
* @param t the exception that caused termination, or null if |
1992 |
* execution completed normally |
1993 |
*/ |
1994 |
protected void afterExecute(Runnable r, Throwable t) { } |
1995 |
|
1996 |
/** |
1997 |
* Method invoked when the Executor has terminated. Default |
1998 |
* implementation does nothing. Note: To properly nest multiple |
1999 |
* overridings, subclasses should generally invoke |
2000 |
* {@code super.terminated} within this method. |
2001 |
*/ |
2002 |
protected void terminated() { } |
2003 |
|
2004 |
/* Predefined RejectedExecutionHandlers */ |
2005 |
|
2006 |
/** |
2007 |
* A handler for rejected tasks that runs the rejected task |
2008 |
* directly in the calling thread of the {@code execute} method, |
2009 |
* unless the executor has been shut down, in which case the task |
2010 |
* is discarded. |
2011 |
*/ |
2012 |
public static class CallerRunsPolicy implements RejectedExecutionHandler { |
2013 |
/** |
2014 |
* Creates a {@code CallerRunsPolicy}. |
2015 |
*/ |
2016 |
public CallerRunsPolicy() { } |
2017 |
|
2018 |
/** |
2019 |
* Executes task r in the caller's thread, unless the executor |
2020 |
* has been shut down, in which case the task is discarded. |
2021 |
* |
2022 |
* @param r the runnable task requested to be executed |
2023 |
* @param e the executor attempting to execute this task |
2024 |
*/ |
2025 |
public void rejectedExecution(Runnable r, ThreadPoolExecutor e) { |
2026 |
if (!e.isShutdown()) { |
2027 |
r.run(); |
2028 |
} |
2029 |
} |
2030 |
} |
2031 |
|
2032 |
/** |
2033 |
* A handler for rejected tasks that throws a |
2034 |
* {@link RejectedExecutionException}. |
2035 |
* |
2036 |
* This is the default handler for {@link ThreadPoolExecutor} and |
2037 |
* {@link ScheduledThreadPoolExecutor}. |
2038 |
*/ |
2039 |
public static class AbortPolicy implements RejectedExecutionHandler { |
2040 |
/** |
2041 |
* Creates an {@code AbortPolicy}. |
2042 |
*/ |
2043 |
public AbortPolicy() { } |
2044 |
|
2045 |
/** |
2046 |
* Always throws RejectedExecutionException. |
2047 |
* |
2048 |
* @param r the runnable task requested to be executed |
2049 |
* @param e the executor attempting to execute this task |
2050 |
* @throws RejectedExecutionException always |
2051 |
*/ |
2052 |
public void rejectedExecution(Runnable r, ThreadPoolExecutor e) { |
2053 |
throw new RejectedExecutionException("Task " + r.toString() + |
2054 |
" rejected from " + |
2055 |
e.toString()); |
2056 |
} |
2057 |
} |
2058 |
|
2059 |
/** |
2060 |
* A handler for rejected tasks that silently discards the |
2061 |
* rejected task. |
2062 |
*/ |
2063 |
public static class DiscardPolicy implements RejectedExecutionHandler { |
2064 |
/** |
2065 |
* Creates a {@code DiscardPolicy}. |
2066 |
*/ |
2067 |
public DiscardPolicy() { } |
2068 |
|
2069 |
/** |
2070 |
* Does nothing, which has the effect of discarding task r. |
2071 |
* |
2072 |
* @param r the runnable task requested to be executed |
2073 |
* @param e the executor attempting to execute this task |
2074 |
*/ |
2075 |
public void rejectedExecution(Runnable r, ThreadPoolExecutor e) { |
2076 |
} |
2077 |
} |
2078 |
|
2079 |
/** |
2080 |
* A handler for rejected tasks that discards the oldest unhandled |
2081 |
* request and then retries {@code execute}, unless the executor |
2082 |
* is shut down, in which case the task is discarded. |
2083 |
*/ |
2084 |
public static class DiscardOldestPolicy implements RejectedExecutionHandler { |
2085 |
/** |
2086 |
* Creates a {@code DiscardOldestPolicy} for the given executor. |
2087 |
*/ |
2088 |
public DiscardOldestPolicy() { } |
2089 |
|
2090 |
/** |
2091 |
* Obtains and ignores the next task that the executor |
2092 |
* would otherwise execute, if one is immediately available, |
2093 |
* and then retries execution of task r, unless the executor |
2094 |
* is shut down, in which case task r is instead discarded. |
2095 |
* |
2096 |
* @param r the runnable task requested to be executed |
2097 |
* @param e the executor attempting to execute this task |
2098 |
*/ |
2099 |
public void rejectedExecution(Runnable r, ThreadPoolExecutor e) { |
2100 |
if (!e.isShutdown()) { |
2101 |
e.getQueue().poll(); |
2102 |
e.execute(r); |
2103 |
} |
2104 |
} |
2105 |
} |
2106 |
} |