ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/jsr166e/ForkJoinPool.java
Revision: 1.19
Committed: Tue Nov 20 05:18:42 2012 UTC (11 years, 5 months ago) by jsr166
Branch: MAIN
Changes since 1.18: +1 -1 lines
Log Message:
whitespace

File Contents

# Content
1 /*
2 * Written by Doug Lea with assistance from members of JCP JSR-166
3 * Expert Group and released to the public domain, as explained at
4 * http://creativecommons.org/publicdomain/zero/1.0/
5 */
6
7 package jsr166e;
8
9 import java.util.ArrayList;
10 import java.util.Arrays;
11 import java.util.Collection;
12 import java.util.Collections;
13 import java.util.List;
14 import java.util.concurrent.AbstractExecutorService;
15 import java.util.concurrent.Callable;
16 import java.util.concurrent.ExecutorService;
17 import java.util.concurrent.Future;
18 import java.util.concurrent.RejectedExecutionException;
19 import java.util.concurrent.RunnableFuture;
20 import java.util.concurrent.TimeUnit;
21
22 /**
23 * An {@link ExecutorService} for running {@link ForkJoinTask}s.
24 * A {@code ForkJoinPool} provides the entry point for submissions
25 * from non-{@code ForkJoinTask} clients, as well as management and
26 * monitoring operations.
27 *
28 * <p>A {@code ForkJoinPool} differs from other kinds of {@link
29 * ExecutorService} mainly by virtue of employing
30 * <em>work-stealing</em>: all threads in the pool attempt to find and
31 * execute tasks submitted to the pool and/or created by other active
32 * tasks (eventually blocking waiting for work if none exist). This
33 * enables efficient processing when most tasks spawn other subtasks
34 * (as do most {@code ForkJoinTask}s), as well as when many small
35 * tasks are submitted to the pool from external clients. Especially
36 * when setting <em>asyncMode</em> to true in constructors, {@code
37 * ForkJoinPool}s may also be appropriate for use with event-style
38 * tasks that are never joined.
39 *
40 * <p>A static {@link #commonPool()} is available and appropriate for
41 * most applications. The common pool is used by any ForkJoinTask that
42 * is not explicitly submitted to a specified pool. Using the common
43 * pool normally reduces resource usage (its threads are slowly
44 * reclaimed during periods of non-use, and reinstated upon subsequent
45 * use).
46 *
47 * <p>For applications that require separate or custom pools, a {@code
48 * ForkJoinPool} may be constructed with a given target parallelism
49 * level; by default, equal to the number of available processors. The
50 * pool attempts to maintain enough active (or available) threads by
51 * dynamically adding, suspending, or resuming internal worker
52 * threads, even if some tasks are stalled waiting to join
53 * others. However, no such adjustments are guaranteed in the face of
54 * blocked IO or other unmanaged synchronization. The nested {@link
55 * ManagedBlocker} interface enables extension of the kinds of
56 * synchronization accommodated.
57 *
58 * <p>In addition to execution and lifecycle control methods, this
59 * class provides status check methods (for example
60 * {@link #getStealCount}) that are intended to aid in developing,
61 * tuning, and monitoring fork/join applications. Also, method
62 * {@link #toString} returns indications of pool state in a
63 * convenient form for informal monitoring.
64 *
65 * <p>As is the case with other ExecutorServices, there are three
66 * main task execution methods summarized in the following table.
67 * These are designed to be used primarily by clients not already
68 * engaged in fork/join computations in the current pool. The main
69 * forms of these methods accept instances of {@code ForkJoinTask},
70 * but overloaded forms also allow mixed execution of plain {@code
71 * Runnable}- or {@code Callable}- based activities as well. However,
72 * tasks that are already executing in a pool should normally instead
73 * use the within-computation forms listed in the table unless using
74 * async event-style tasks that are not usually joined, in which case
75 * there is little difference among choice of methods.
76 *
77 * <table BORDER CELLPADDING=3 CELLSPACING=1>
78 * <tr>
79 * <td></td>
80 * <td ALIGN=CENTER> <b>Call from non-fork/join clients</b></td>
81 * <td ALIGN=CENTER> <b>Call from within fork/join computations</b></td>
82 * </tr>
83 * <tr>
84 * <td> <b>Arrange async execution</td>
85 * <td> {@link #execute(ForkJoinTask)}</td>
86 * <td> {@link ForkJoinTask#fork}</td>
87 * </tr>
88 * <tr>
89 * <td> <b>Await and obtain result</td>
90 * <td> {@link #invoke(ForkJoinTask)}</td>
91 * <td> {@link ForkJoinTask#invoke}</td>
92 * </tr>
93 * <tr>
94 * <td> <b>Arrange exec and obtain Future</td>
95 * <td> {@link #submit(ForkJoinTask)}</td>
96 * <td> {@link ForkJoinTask#fork} (ForkJoinTasks <em>are</em> Futures)</td>
97 * </tr>
98 * </table>
99 *
100 * <p>The common pool is by default constructed with default
101 * parameters, but these may be controlled by setting three {@link
102 * System#getProperty properties} with prefix {@code
103 * java.util.concurrent.ForkJoinPool.common}: {@code parallelism} --
104 * an integer greater than zero, {@code threadFactory} -- the class
105 * name of a {@link ForkJoinWorkerThreadFactory}, and {@code
106 <<<<<<< ForkJoinPool.java
107 * exceptionHandler} -- the class name of a {@code
108 =======
109 * exceptionHandler} -- the class name of a {@link
110 * java.lang.Thread.UncaughtExceptionHandler
111 >>>>>>> 1.111
112 * Thread.UncaughtExceptionHandler}. Upon any error in establishing
113 * these settings, default parameters are used.
114 *
115 * <p><b>Implementation notes</b>: This implementation restricts the
116 * maximum number of running threads to 32767. Attempts to create
117 * pools with greater than the maximum number result in
118 * {@code IllegalArgumentException}.
119 *
120 * <p>This implementation rejects submitted tasks (that is, by throwing
121 * {@link RejectedExecutionException}) only when the pool is shut down
122 * or internal resources have been exhausted.
123 *
124 * @since 1.7
125 * @author Doug Lea
126 */
127 public class ForkJoinPool extends AbstractExecutorService {
128
129 /*
130 * Implementation Overview
131 *
132 * This class and its nested classes provide the main
133 * functionality and control for a set of worker threads:
134 * Submissions from non-FJ threads enter into submission queues.
135 * Workers take these tasks and typically split them into subtasks
136 * that may be stolen by other workers. Preference rules give
137 * first priority to processing tasks from their own queues (LIFO
138 * or FIFO, depending on mode), then to randomized FIFO steals of
139 * tasks in other queues.
140 *
141 * WorkQueues
142 * ==========
143 *
144 * Most operations occur within work-stealing queues (in nested
145 * class WorkQueue). These are special forms of Deques that
146 * support only three of the four possible end-operations -- push,
147 * pop, and poll (aka steal), under the further constraints that
148 * push and pop are called only from the owning thread (or, as
149 * extended here, under a lock), while poll may be called from
150 * other threads. (If you are unfamiliar with them, you probably
151 * want to read Herlihy and Shavit's book "The Art of
152 * Multiprocessor programming", chapter 16 describing these in
153 * more detail before proceeding.) The main work-stealing queue
154 * design is roughly similar to those in the papers "Dynamic
155 * Circular Work-Stealing Deque" by Chase and Lev, SPAA 2005
156 * (http://research.sun.com/scalable/pubs/index.html) and
157 * "Idempotent work stealing" by Michael, Saraswat, and Vechev,
158 * PPoPP 2009 (http://portal.acm.org/citation.cfm?id=1504186).
159 * The main differences ultimately stem from GC requirements that
160 * we null out taken slots as soon as we can, to maintain as small
161 * a footprint as possible even in programs generating huge
162 * numbers of tasks. To accomplish this, we shift the CAS
163 * arbitrating pop vs poll (steal) from being on the indices
164 * ("base" and "top") to the slots themselves. So, both a
165 * successful pop and poll mainly entail a CAS of a slot from
166 * non-null to null. Because we rely on CASes of references, we
167 * do not need tag bits on base or top. They are simple ints as
168 * used in any circular array-based queue (see for example
169 * ArrayDeque). Updates to the indices must still be ordered in a
170 * way that guarantees that top == base means the queue is empty,
171 * but otherwise may err on the side of possibly making the queue
172 * appear nonempty when a push, pop, or poll have not fully
173 * committed. Note that this means that the poll operation,
174 * considered individually, is not wait-free. One thief cannot
175 * successfully continue until another in-progress one (or, if
176 * previously empty, a push) completes. However, in the
177 * aggregate, we ensure at least probabilistic non-blockingness.
178 * If an attempted steal fails, a thief always chooses a different
179 * random victim target to try next. So, in order for one thief to
180 * progress, it suffices for any in-progress poll or new push on
181 * any empty queue to complete. (This is why we normally use
182 * method pollAt and its variants that try once at the apparent
183 * base index, else consider alternative actions, rather than
184 * method poll.)
185 *
186 * This approach also enables support of a user mode in which local
187 * task processing is in FIFO, not LIFO order, simply by using
188 * poll rather than pop. This can be useful in message-passing
189 * frameworks in which tasks are never joined. However neither
190 * mode considers affinities, loads, cache localities, etc, so
191 * rarely provide the best possible performance on a given
192 * machine, but portably provide good throughput by averaging over
193 * these factors. (Further, even if we did try to use such
194 * information, we do not usually have a basis for exploiting it.
195 * For example, some sets of tasks profit from cache affinities,
196 * but others are harmed by cache pollution effects.)
197 *
198 * WorkQueues are also used in a similar way for tasks submitted
199 * to the pool. We cannot mix these tasks in the same queues used
200 * for work-stealing (this would contaminate lifo/fifo
201 * processing). Instead, we randomly associate submission queues
202 * with submitting threads, using a form of hashing. The
203 * ThreadLocal Submitter class contains a value initially used as
204 * a hash code for choosing existing queues, but may be randomly
205 * repositioned upon contention with other submitters. In
206 * essence, submitters act like workers except that they are
207 * restricted to executing local tasks that they submitted (or in
208 * the case of CountedCompleters, others with the same root task).
209 * However, because most shared/external queue operations are more
210 * expensive than internal, and because, at steady state, external
211 * submitters will compete for CPU with workers, ForkJoinTask.join
212 * and related methods disable them from repeatedly helping to
213 * process tasks if all workers are active. Insertion of tasks in
214 * shared mode requires a lock (mainly to protect in the case of
215 * resizing) but we use only a simple spinlock (using bits in
216 * field qlock), because submitters encountering a busy queue move
217 * on to try or create other queues -- they block only when
218 * creating and registering new queues.
219 *
220 * Management
221 * ==========
222 *
223 * The main throughput advantages of work-stealing stem from
224 * decentralized control -- workers mostly take tasks from
225 * themselves or each other. We cannot negate this in the
226 * implementation of other management responsibilities. The main
227 * tactic for avoiding bottlenecks is packing nearly all
228 * essentially atomic control state into two volatile variables
229 * that are by far most often read (not written) as status and
230 * consistency checks.
231 *
232 * Field "ctl" contains 64 bits holding all the information needed
233 * to atomically decide to add, inactivate, enqueue (on an event
234 * queue), dequeue, and/or re-activate workers. To enable this
235 * packing, we restrict maximum parallelism to (1<<15)-1 (which is
236 * far in excess of normal operating range) to allow ids, counts,
237 * and their negations (used for thresholding) to fit into 16bit
238 * fields.
239 *
240 * Field "plock" is a form of sequence lock with a saturating
241 * shutdown bit (similarly for per-queue "qlocks"), mainly
242 * protecting updates to the workQueues array, as well as to
243 * enable shutdown. When used as a lock, it is normally only very
244 * briefly held, so is nearly always available after at most a
245 * brief spin, but we use a monitor-based backup strategy to
246 * block when needed.
247 *
248 * Recording WorkQueues. WorkQueues are recorded in the
249 * "workQueues" array that is created upon first use and expanded
250 * if necessary. Updates to the array while recording new workers
251 * and unrecording terminated ones are protected from each other
252 * by a lock but the array is otherwise concurrently readable, and
253 * accessed directly. To simplify index-based operations, the
254 * array size is always a power of two, and all readers must
255 * tolerate null slots. Worker queues are at odd indices. Shared
256 * (submission) queues are at even indices, up to a maximum of 64
257 * slots, to limit growth even if array needs to expand to add
258 * more workers. Grouping them together in this way simplifies and
259 * speeds up task scanning.
260 *
261 * All worker thread creation is on-demand, triggered by task
262 * submissions, replacement of terminated workers, and/or
263 * compensation for blocked workers. However, all other support
264 * code is set up to work with other policies. To ensure that we
265 * do not hold on to worker references that would prevent GC, ALL
266 * accesses to workQueues are via indices into the workQueues
267 * array (which is one source of some of the messy code
268 * constructions here). In essence, the workQueues array serves as
269 * a weak reference mechanism. Thus for example the wait queue
270 * field of ctl stores indices, not references. Access to the
271 * workQueues in associated methods (for example signalWork) must
272 * both index-check and null-check the IDs. All such accesses
273 * ignore bad IDs by returning out early from what they are doing,
274 * since this can only be associated with termination, in which
275 * case it is OK to give up. All uses of the workQueues array
276 * also check that it is non-null (even if previously
277 * non-null). This allows nulling during termination, which is
278 * currently not necessary, but remains an option for
279 * resource-revocation-based shutdown schemes. It also helps
280 * reduce JIT issuance of uncommon-trap code, which tends to
281 * unnecessarily complicate control flow in some methods.
282 *
283 * Event Queuing. Unlike HPC work-stealing frameworks, we cannot
284 * let workers spin indefinitely scanning for tasks when none can
285 * be found immediately, and we cannot start/resume workers unless
286 * there appear to be tasks available. On the other hand, we must
287 * quickly prod them into action when new tasks are submitted or
288 * generated. In many usages, ramp-up time to activate workers is
289 * the main limiting factor in overall performance (this is
290 * compounded at program start-up by JIT compilation and
291 * allocation). So we try to streamline this as much as possible.
292 * We park/unpark workers after placing in an event wait queue
293 * when they cannot find work. This "queue" is actually a simple
294 * Treiber stack, headed by the "id" field of ctl, plus a 15bit
295 * counter value (that reflects the number of times a worker has
296 * been inactivated) to avoid ABA effects (we need only as many
297 * version numbers as worker threads). Successors are held in
298 * field WorkQueue.nextWait. Queuing deals with several intrinsic
299 * races, mainly that a task-producing thread can miss seeing (and
300 * signalling) another thread that gave up looking for work but
301 * has not yet entered the wait queue. We solve this by requiring
302 * a full sweep of all workers (via repeated calls to method
303 * scan()) both before and after a newly waiting worker is added
304 * to the wait queue. During a rescan, the worker might release
305 * some other queued worker rather than itself, which has the same
306 * net effect. Because enqueued workers may actually be rescanning
307 * rather than waiting, we set and clear the "parker" field of
308 * WorkQueues to reduce unnecessary calls to unpark. (This
309 * requires a secondary recheck to avoid missed signals.) Note
310 * the unusual conventions about Thread.interrupts surrounding
311 * parking and other blocking: Because interrupts are used solely
312 * to alert threads to check termination, which is checked anyway
313 * upon blocking, we clear status (using Thread.interrupted)
314 * before any call to park, so that park does not immediately
315 * return due to status being set via some other unrelated call to
316 * interrupt in user code.
317 *
318 * Signalling. We create or wake up workers only when there
319 * appears to be at least one task they might be able to find and
320 * execute. However, many other threads may notice the same task
321 * and each signal to wake up a thread that might take it. So in
322 * general, pools will be over-signalled. When a submission is
323 * added or another worker adds a task to a queue that is
324 * apparently empty, they signal waiting workers (or trigger
325 * creation of new ones if fewer than the given parallelism
326 * level). These primary signals are buttressed by signals
327 * whenever other threads scan for work or do not have a task to
328 * process (including the case of leaving a hint to unparked
329 * threads to help signal others upon wakeup). On most platforms,
330 * signalling (unpark) overhead time is noticeably long, and the
331 * time between signalling a thread and it actually making
332 * progress can be very noticeably long, so it is worth offloading
333 * these delays from critical paths as much as possible.
334 *
335 * Trimming workers. To release resources after periods of lack of
336 * use, a worker starting to wait when the pool is quiescent will
337 * time out and terminate if the pool has remained quiescent for a
338 * given period -- a short period if there are more threads than
339 * parallelism, longer as the number of threads decreases. This
340 * will slowly propagate, eventually terminating all workers after
341 * periods of non-use.
342 *
343 * Shutdown and Termination. A call to shutdownNow atomically sets
344 * a plock bit and then (non-atomically) sets each worker's
345 * qlock status, cancels all unprocessed tasks, and wakes up
346 * all waiting workers. Detecting whether termination should
347 * commence after a non-abrupt shutdown() call requires more work
348 * and bookkeeping. We need consensus about quiescence (i.e., that
349 * there is no more work). The active count provides a primary
350 * indication but non-abrupt shutdown still requires a rechecking
351 * scan for any workers that are inactive but not queued.
352 *
353 * Joining Tasks
354 * =============
355 *
356 * Any of several actions may be taken when one worker is waiting
357 * to join a task stolen (or always held) by another. Because we
358 * are multiplexing many tasks on to a pool of workers, we can't
359 * just let them block (as in Thread.join). We also cannot just
360 * reassign the joiner's run-time stack with another and replace
361 * it later, which would be a form of "continuation", that even if
362 * possible is not necessarily a good idea since we sometimes need
363 * both an unblocked task and its continuation to progress.
364 * Instead we combine two tactics:
365 *
366 * Helping: Arranging for the joiner to execute some task that it
367 * would be running if the steal had not occurred.
368 *
369 * Compensating: Unless there are already enough live threads,
370 * method tryCompensate() may create or re-activate a spare
371 * thread to compensate for blocked joiners until they unblock.
372 *
373 * A third form (implemented in tryRemoveAndExec) amounts to
374 * helping a hypothetical compensator: If we can readily tell that
375 * a possible action of a compensator is to steal and execute the
376 * task being joined, the joining thread can do so directly,
377 * without the need for a compensation thread (although at the
378 * expense of larger run-time stacks, but the tradeoff is
379 * typically worthwhile).
380 *
381 * The ManagedBlocker extension API can't use helping so relies
382 * only on compensation in method awaitBlocker.
383 *
384 * The algorithm in tryHelpStealer entails a form of "linear"
385 * helping: Each worker records (in field currentSteal) the most
386 * recent task it stole from some other worker. Plus, it records
387 * (in field currentJoin) the task it is currently actively
388 * joining. Method tryHelpStealer uses these markers to try to
389 * find a worker to help (i.e., steal back a task from and execute
390 * it) that could hasten completion of the actively joined task.
391 * In essence, the joiner executes a task that would be on its own
392 * local deque had the to-be-joined task not been stolen. This may
393 * be seen as a conservative variant of the approach in Wagner &
394 * Calder "Leapfrogging: a portable technique for implementing
395 * efficient futures" SIGPLAN Notices, 1993
396 * (http://portal.acm.org/citation.cfm?id=155354). It differs in
397 * that: (1) We only maintain dependency links across workers upon
398 * steals, rather than use per-task bookkeeping. This sometimes
399 * requires a linear scan of workQueues array to locate stealers,
400 * but often doesn't because stealers leave hints (that may become
401 * stale/wrong) of where to locate them. It is only a hint
402 * because a worker might have had multiple steals and the hint
403 * records only one of them (usually the most current). Hinting
404 * isolates cost to when it is needed, rather than adding to
405 * per-task overhead. (2) It is "shallow", ignoring nesting and
406 * potentially cyclic mutual steals. (3) It is intentionally
407 * racy: field currentJoin is updated only while actively joining,
408 * which means that we miss links in the chain during long-lived
409 * tasks, GC stalls etc (which is OK since blocking in such cases
410 * is usually a good idea). (4) We bound the number of attempts
411 * to find work (see MAX_HELP) and fall back to suspending the
412 * worker and if necessary replacing it with another.
413 *
414 * Helping actions for CountedCompleters are much simpler: Method
415 * helpComplete can take and execute any task with the same root
416 * as the task being waited on. However, this still entails some
417 * traversal of completer chains, so is less efficient than using
418 * CountedCompleters without explicit joins.
419 *
420 * It is impossible to keep exactly the target parallelism number
421 * of threads running at any given time. Determining the
422 * existence of conservatively safe helping targets, the
423 * availability of already-created spares, and the apparent need
424 * to create new spares are all racy, so we rely on multiple
425 * retries of each. Compensation in the apparent absence of
426 * helping opportunities is challenging to control on JVMs, where
427 * GC and other activities can stall progress of tasks that in
428 * turn stall out many other dependent tasks, without us being
429 * able to determine whether they will ever require compensation.
430 * Even though work-stealing otherwise encounters little
431 * degradation in the presence of more threads than cores,
432 * aggressively adding new threads in such cases entails risk of
433 * unwanted positive feedback control loops in which more threads
434 * cause more dependent stalls (as well as delayed progress of
435 * unblocked threads to the point that we know they are available)
436 * leading to more situations requiring more threads, and so
437 * on. This aspect of control can be seen as an (analytically
438 * intractable) game with an opponent that may choose the worst
439 * (for us) active thread to stall at any time. We take several
440 * precautions to bound losses (and thus bound gains), mainly in
441 * methods tryCompensate and awaitJoin.
442 *
443 * Common Pool
444 * ===========
445 *
446 * The static commonPool always exists after static
447 * initialization. Since it (or any other created pool) need
448 * never be used, we minimize initial construction overhead and
449 * footprint to the setup of about a dozen fields, with no nested
450 * allocation. Most bootstrapping occurs within method
451 * fullExternalPush during the first submission to the pool.
452 *
453 * When external threads submit to the common pool, they can
454 * perform some subtask processing (see externalHelpJoin and
455 * related methods). We do not need to record whether these
456 * submissions are to the common pool -- if not, externalHelpJoin
457 * returns quickly (at the most helping to signal some common pool
458 * workers). These submitters would otherwise be blocked waiting
459 * for completion, so the extra effort (with liberally sprinkled
460 * task status checks) in inapplicable cases amounts to an odd
461 * form of limited spin-wait before blocking in ForkJoinTask.join.
462 *
463 * Style notes
464 * ===========
465 *
466 * There is a lot of representation-level coupling among classes
467 * ForkJoinPool, ForkJoinWorkerThread, and ForkJoinTask. The
468 * fields of WorkQueue maintain data structures managed by
469 * ForkJoinPool, so are directly accessed. There is little point
470 * trying to reduce this, since any associated future changes in
471 * representations will need to be accompanied by algorithmic
472 * changes anyway. Several methods intrinsically sprawl because
473 * they must accumulate sets of consistent reads of volatiles held
474 * in local variables. Methods signalWork() and scan() are the
475 * main bottlenecks, so are especially heavily
476 * micro-optimized/mangled. There are lots of inline assignments
477 * (of form "while ((local = field) != 0)") which are usually the
478 * simplest way to ensure the required read orderings (which are
479 * sometimes critical). This leads to a "C"-like style of listing
480 * declarations of these locals at the heads of methods or blocks.
481 * There are several occurrences of the unusual "do {} while
482 * (!cas...)" which is the simplest way to force an update of a
483 * CAS'ed variable. There are also other coding oddities (including
484 * several unnecessary-looking hoisted null checks) that help
485 * some methods perform reasonably even when interpreted (not
486 * compiled).
487 *
488 * The order of declarations in this file is:
489 * (1) Static utility functions
490 * (2) Nested (static) classes
491 * (3) Static fields
492 * (4) Fields, along with constants used when unpacking some of them
493 * (5) Internal control methods
494 * (6) Callbacks and other support for ForkJoinTask methods
495 * (7) Exported methods
496 * (8) Static block initializing statics in minimally dependent order
497 */
498
499 // Static utilities
500
501 /**
502 * If there is a security manager, makes sure caller has
503 * permission to modify threads.
504 */
505 private static void checkPermission() {
506 SecurityManager security = System.getSecurityManager();
507 if (security != null)
508 security.checkPermission(modifyThreadPermission);
509 }
510
511 // Nested classes
512
513 /**
514 * Factory for creating new {@link ForkJoinWorkerThread}s.
515 * A {@code ForkJoinWorkerThreadFactory} must be defined and used
516 * for {@code ForkJoinWorkerThread} subclasses that extend base
517 * functionality or initialize threads with different contexts.
518 */
519 public static interface ForkJoinWorkerThreadFactory {
520 /**
521 * Returns a new worker thread operating in the given pool.
522 *
523 * @param pool the pool this thread works in
524 * @throws NullPointerException if the pool is null
525 */
526 public ForkJoinWorkerThread newThread(ForkJoinPool pool);
527 }
528
529 /**
530 * Default ForkJoinWorkerThreadFactory implementation; creates a
531 * new ForkJoinWorkerThread.
532 */
533 static final class DefaultForkJoinWorkerThreadFactory
534 implements ForkJoinWorkerThreadFactory {
535 public final ForkJoinWorkerThread newThread(ForkJoinPool pool) {
536 return new ForkJoinWorkerThread(pool);
537 }
538 }
539
540 /**
541 * Class for artificial tasks that are used to replace the target
542 * of local joins if they are removed from an interior queue slot
543 * in WorkQueue.tryRemoveAndExec. We don't need the proxy to
544 * actually do anything beyond having a unique identity.
545 */
546 static final class EmptyTask extends ForkJoinTask<Void> {
547 private static final long serialVersionUID = -7721805057305804111L;
548 EmptyTask() { status = ForkJoinTask.NORMAL; } // force done
549 public final Void getRawResult() { return null; }
550 public final void setRawResult(Void x) {}
551 public final boolean exec() { return true; }
552 }
553
554 /**
555 * Queues supporting work-stealing as well as external task
556 * submission. See above for main rationale and algorithms.
557 * Implementation relies heavily on "Unsafe" intrinsics
558 * and selective use of "volatile":
559 *
560 * Field "base" is the index (mod array.length) of the least valid
561 * queue slot, which is always the next position to steal (poll)
562 * from if nonempty. Reads and writes require volatile orderings
563 * but not CAS, because updates are only performed after slot
564 * CASes.
565 *
566 * Field "top" is the index (mod array.length) of the next queue
567 * slot to push to or pop from. It is written only by owner thread
568 * for push, or under lock for external/shared push, and accessed
569 * by other threads only after reading (volatile) base. Both top
570 * and base are allowed to wrap around on overflow, but (top -
571 * base) (or more commonly -(base - top) to force volatile read of
572 * base before top) still estimates size. The lock ("qlock") is
573 * forced to -1 on termination, causing all further lock attempts
574 * to fail. (Note: we don't need CAS for termination state because
575 * upon pool shutdown, all shared-queues will stop being used
576 * anyway.) Nearly all lock bodies are set up so that exceptions
577 * within lock bodies are "impossible" (modulo JVM errors that
578 * would cause failure anyway.)
579 *
580 * The array slots are read and written using the emulation of
581 * volatiles/atomics provided by Unsafe. Insertions must in
582 * general use putOrderedObject as a form of releasing store to
583 * ensure that all writes to the task object are ordered before
584 * its publication in the queue. All removals entail a CAS to
585 * null. The array is always a power of two. To ensure safety of
586 * Unsafe array operations, all accesses perform explicit null
587 * checks and implicit bounds checks via power-of-two masking.
588 *
589 * In addition to basic queuing support, this class contains
590 * fields described elsewhere to control execution. It turns out
591 * to work better memory-layout-wise to include them in this class
592 * rather than a separate class.
593 *
594 * Performance on most platforms is very sensitive to placement of
595 * instances of both WorkQueues and their arrays -- we absolutely
596 * do not want multiple WorkQueue instances or multiple queue
597 * arrays sharing cache lines. (It would be best for queue objects
598 * and their arrays to share, but there is nothing available to
599 * help arrange that). Unfortunately, because they are recorded
600 * in a common array, WorkQueue instances are often moved to be
601 * adjacent by garbage collectors. To reduce impact, we use field
602 * padding that works OK on common platforms; this effectively
603 * trades off slightly slower average field access for the sake of
604 * avoiding really bad worst-case access. (Until better JVM
605 * support is in place, this padding is dependent on transient
606 * properties of JVM field layout rules.)
607 */
608 static final class WorkQueue {
609 /**
610 * Capacity of work-stealing queue array upon initialization.
611 * Must be a power of two; at least 4, but should be larger to
612 * reduce or eliminate cacheline sharing among queues.
613 * Currently, it is much larger, as a partial workaround for
614 * the fact that JVMs often place arrays in locations that
615 * share GC bookkeeping (especially cardmarks) such that
616 * per-write accesses encounter serious memory contention.
617 */
618 static final int INITIAL_QUEUE_CAPACITY = 1 << 13;
619
620 /**
621 * Maximum size for queue arrays. Must be a power of two less
622 * than or equal to 1 << (31 - width of array entry) to ensure
623 * lack of wraparound of index calculations, but defined to a
624 * value a bit less than this to help users trap runaway
625 * programs before saturating systems.
626 */
627 static final int MAXIMUM_QUEUE_CAPACITY = 1 << 26; // 64M
628
629 int seed; // for random scanning; initialize nonzero
630 volatile int eventCount; // encoded inactivation count; < 0 if inactive
631 int nextWait; // encoded record of next event waiter
632 int hint; // steal or signal hint (index)
633 int poolIndex; // index of this queue in pool (or 0)
634 final int mode; // 0: lifo, > 0: fifo, < 0: shared
635 int nsteals; // number of steals
636 volatile int qlock; // 1: locked, -1: terminate; else 0
637 volatile int base; // index of next slot for poll
638 int top; // index of next slot for push
639 ForkJoinTask<?>[] array; // the elements (initially unallocated)
640 final ForkJoinPool pool; // the containing pool (may be null)
641 final ForkJoinWorkerThread owner; // owning thread or null if shared
642 volatile Thread parker; // == owner during call to park; else null
643 volatile ForkJoinTask<?> currentJoin; // task being joined in awaitJoin
644 ForkJoinTask<?> currentSteal; // current non-local task being executed
645
646 // Heuristic padding to ameliorate unfortunate memory placements
647 Object p00, p01, p02, p03, p04, p05, p06, p07;
648 Object p08, p09, p0a, p0b, p0c;
649
650 WorkQueue(ForkJoinPool pool, ForkJoinWorkerThread owner, int mode,
651 int seed) {
652 this.array = new ForkJoinTask<?>[WorkQueue.INITIAL_QUEUE_CAPACITY];
653 this.pool = pool;
654 this.owner = owner;
655 this.mode = mode;
656 this.seed = seed;
657 // Place indices in the center of array
658 base = top = INITIAL_QUEUE_CAPACITY >>> 1;
659 }
660
661 /**
662 * Pushes a task. Call only by owner in unshared queues.
663 * Cases needing resizing or rejection are relayed to fullPush
664 * (that also handles shared queues).
665 *
666 * @param task the task. Caller must ensure non-null.
667 * @throw RejectedExecutionException if array cannot be resized
668 */
669 final void push(ForkJoinTask<?> task) {
670 ForkJoinTask<?>[] a; ForkJoinPool p;
671 int s = top, m, n;
672 if ((a = array) != null) { // ignore if queue removed
673 U.putOrderedObject
674 (a, (((m = a.length - 1) & s) << ASHIFT) + ABASE, task);
675 if ((n = (top = s + 1) - base) <= 1) {
676 if ((p = pool) != null)
677 p.signalWork(this, 0);
678 }
679 else if (n >= m)
680 growArray();
681 }
682 }
683
684 /**
685 * Pushes a task if lock is free and array is either big
686 * enough or can be resized to be big enough.
687 *
688 * @param task the task. Caller must ensure non-null.
689 * @return true if submitted
690 */
691 final boolean trySharedPush(ForkJoinTask<?> task) {
692 boolean submitted = false;
693 if (qlock == 0 && U.compareAndSwapInt(this, QLOCK, 0, 1)) {
694 ForkJoinTask<?>[] a = array; ForkJoinPool p;
695 int s = top;
696 try {
697 if ((a != null && a.length > s + 1 - base) ||
698 (a = growArray()) != null) { // must presize
699 int j = (((a.length - 1) & s) << ASHIFT) + ABASE;
700 U.putOrderedObject(a, j, task);
701 top = s + 1;
702 submitted = true;
703 }
704 } finally {
705 qlock = 0; // unlock
706 }
707 if (submitted && (p = pool) != null)
708 p.signalWork(this, 0);
709 }
710 return submitted;
711 }
712
713 /**
714 * Initializes or doubles the capacity of array. Call either
715 * by owner or with lock held -- it is OK for base, but not
716 * top, to move while resizings are in progress.
717 */
718 final ForkJoinTask<?>[] growArray() {
719 ForkJoinTask<?>[] oldA = array;
720 int size = oldA != null ? oldA.length << 1 : INITIAL_QUEUE_CAPACITY;
721 if (size > MAXIMUM_QUEUE_CAPACITY)
722 throw new RejectedExecutionException("Queue capacity exceeded");
723 int oldMask, t, b;
724 ForkJoinTask<?>[] a = array = new ForkJoinTask<?>[size];
725 if (oldA != null && (oldMask = oldA.length - 1) >= 0 &&
726 (t = top) - (b = base) > 0) {
727 int mask = size - 1;
728 do {
729 ForkJoinTask<?> x;
730 int oldj = ((b & oldMask) << ASHIFT) + ABASE;
731 int j = ((b & mask) << ASHIFT) + ABASE;
732 x = (ForkJoinTask<?>)U.getObjectVolatile(oldA, oldj);
733 if (x != null &&
734 U.compareAndSwapObject(oldA, oldj, x, null))
735 U.putObjectVolatile(a, j, x);
736 } while (++b != t);
737 }
738 return a;
739 }
740
741 /**
742 * Takes next task, if one exists, in LIFO order. Call only
743 * by owner in unshared queues.
744 */
745 final ForkJoinTask<?> pop() {
746 ForkJoinTask<?>[] a; ForkJoinTask<?> t; int m;
747 if ((a = array) != null && (m = a.length - 1) >= 0) {
748 for (int s; (s = top - 1) - base >= 0;) {
749 long j = ((m & s) << ASHIFT) + ABASE;
750 if ((t = (ForkJoinTask<?>)U.getObject(a, j)) == null)
751 break;
752 if (U.compareAndSwapObject(a, j, t, null)) {
753 top = s;
754 return t;
755 }
756 }
757 }
758 return null;
759 }
760
761 /**
762 * Takes a task in FIFO order if b is base of queue and a task
763 * can be claimed without contention. Specialized versions
764 * appear in ForkJoinPool methods scan and tryHelpStealer.
765 */
766 final ForkJoinTask<?> pollAt(int b) {
767 ForkJoinTask<?> t; ForkJoinTask<?>[] a;
768 if ((a = array) != null) {
769 int j = (((a.length - 1) & b) << ASHIFT) + ABASE;
770 if ((t = (ForkJoinTask<?>)U.getObjectVolatile(a, j)) != null &&
771 base == b &&
772 U.compareAndSwapObject(a, j, t, null)) {
773 base = b + 1;
774 return t;
775 }
776 }
777 return null;
778 }
779
780 /**
781 * Takes next task, if one exists, in FIFO order.
782 */
783 final ForkJoinTask<?> poll() {
784 ForkJoinTask<?>[] a; int b; ForkJoinTask<?> t;
785 while ((b = base) - top < 0 && (a = array) != null) {
786 int j = (((a.length - 1) & b) << ASHIFT) + ABASE;
787 t = (ForkJoinTask<?>)U.getObjectVolatile(a, j);
788 if (t != null) {
789 if (base == b &&
790 U.compareAndSwapObject(a, j, t, null)) {
791 base = b + 1;
792 return t;
793 }
794 }
795 else if (base == b) {
796 if (b + 1 == top)
797 break;
798 Thread.yield(); // wait for lagging update (very rare)
799 }
800 }
801 return null;
802 }
803
804 /**
805 * Takes next task, if one exists, in order specified by mode.
806 */
807 final ForkJoinTask<?> nextLocalTask() {
808 return mode == 0 ? pop() : poll();
809 }
810
811 /**
812 * Returns next task, if one exists, in order specified by mode.
813 */
814 final ForkJoinTask<?> peek() {
815 ForkJoinTask<?>[] a = array; int m;
816 if (a == null || (m = a.length - 1) < 0)
817 return null;
818 int i = mode == 0 ? top - 1 : base;
819 int j = ((i & m) << ASHIFT) + ABASE;
820 return (ForkJoinTask<?>)U.getObjectVolatile(a, j);
821 }
822
823 /**
824 * Pops the given task only if it is at the current top.
825 * (A shared version is available only via FJP.tryExternalUnpush)
826 */
827 final boolean tryUnpush(ForkJoinTask<?> t) {
828 ForkJoinTask<?>[] a; int s;
829 if ((a = array) != null && (s = top) != base &&
830 U.compareAndSwapObject
831 (a, (((a.length - 1) & --s) << ASHIFT) + ABASE, t, null)) {
832 top = s;
833 return true;
834 }
835 return false;
836 }
837
838 /**
839 * Removes and cancels all known tasks, ignoring any exceptions.
840 */
841 final void cancelAll() {
842 ForkJoinTask.cancelIgnoringExceptions(currentJoin);
843 ForkJoinTask.cancelIgnoringExceptions(currentSteal);
844 for (ForkJoinTask<?> t; (t = poll()) != null; )
845 ForkJoinTask.cancelIgnoringExceptions(t);
846 }
847
848 /**
849 * Computes next value for random probes. Scans don't require
850 * a very high quality generator, but also not a crummy one.
851 * Marsaglia xor-shift is cheap and works well enough. Note:
852 * This is manually inlined in its usages in ForkJoinPool to
853 * avoid writes inside busy scan loops.
854 */
855 final int nextSeed() {
856 int r = seed;
857 r ^= r << 13;
858 r ^= r >>> 17;
859 return seed = r ^= r << 5;
860 }
861
862 /**
863 * Provides a more accurate estimate of size than (top - base)
864 * by ordering reads and checking whether a near-empty queue
865 * has at least one unclaimed task.
866 */
867 final int queueSize() {
868 ForkJoinTask<?>[] a; int k, s, n;
869 return ((n = base - (s = top)) < 0 &&
870 (n != -1 ||
871 ((a = array) != null && (k = a.length) > 0 &&
872 U.getObject
873 (a, (long)((((k - 1) & (s - 1)) << ASHIFT) + ABASE)) != null))) ?
874 -n : 0;
875 }
876
877 // Specialized execution methods
878
879 /**
880 * Pops and runs tasks until empty.
881 */
882 private void popAndExecAll() {
883 // A bit faster than repeated pop calls
884 ForkJoinTask<?>[] a; int m, s; long j; ForkJoinTask<?> t;
885 while ((a = array) != null && (m = a.length - 1) >= 0 &&
886 (s = top - 1) - base >= 0 &&
887 (t = ((ForkJoinTask<?>)
888 U.getObject(a, j = ((m & s) << ASHIFT) + ABASE)))
889 != null) {
890 if (U.compareAndSwapObject(a, j, t, null)) {
891 top = s;
892 t.doExec();
893 }
894 }
895 }
896
897 /**
898 * Polls and runs tasks until empty.
899 */
900 private void pollAndExecAll() {
901 for (ForkJoinTask<?> t; (t = poll()) != null;)
902 t.doExec();
903 }
904
905 /**
906 * If present, removes from queue and executes the given task,
907 * or any other cancelled task. Returns (true) on any CAS
908 * or consistency check failure so caller can retry.
909 *
910 * @return false if no progress can be made, else true;
911 */
912 final boolean tryRemoveAndExec(ForkJoinTask<?> task) {
913 boolean stat = true, removed = false, empty = true;
914 ForkJoinTask<?>[] a; int m, s, b, n;
915 if ((a = array) != null && (m = a.length - 1) >= 0 &&
916 (n = (s = top) - (b = base)) > 0) {
917 for (ForkJoinTask<?> t;;) { // traverse from s to b
918 int j = ((--s & m) << ASHIFT) + ABASE;
919 t = (ForkJoinTask<?>)U.getObjectVolatile(a, j);
920 if (t == null) // inconsistent length
921 break;
922 else if (t == task) {
923 if (s + 1 == top) { // pop
924 if (!U.compareAndSwapObject(a, j, task, null))
925 break;
926 top = s;
927 removed = true;
928 }
929 else if (base == b) // replace with proxy
930 removed = U.compareAndSwapObject(a, j, task,
931 new EmptyTask());
932 break;
933 }
934 else if (t.status >= 0)
935 empty = false;
936 else if (s + 1 == top) { // pop and throw away
937 if (U.compareAndSwapObject(a, j, t, null))
938 top = s;
939 break;
940 }
941 if (--n == 0) {
942 if (!empty && base == b)
943 stat = false;
944 break;
945 }
946 }
947 }
948 if (removed)
949 task.doExec();
950 return stat;
951 }
952
953 /**
954 * Polls for and executes the given task or any other task in
955 * its CountedCompleter computation
956 */
957 final boolean pollAndExecCC(ForkJoinTask<?> root) {
958 ForkJoinTask<?>[] a; int b; Object o;
959 outer: while ((b = base) - top < 0 && (a = array) != null) {
960 long j = (((a.length - 1) & b) << ASHIFT) + ABASE;
961 if ((o = U.getObject(a, j)) == null ||
962 !(o instanceof CountedCompleter))
963 break;
964 for (CountedCompleter<?> t = (CountedCompleter<?>)o, r = t;;) {
965 if (r == root) {
966 if (base == b &&
967 U.compareAndSwapObject(a, j, t, null)) {
968 base = b + 1;
969 t.doExec();
970 return true;
971 }
972 else
973 break; // restart
974 }
975 if ((r = r.completer) == null)
976 break outer; // not part of root computation
977 }
978 }
979 return false;
980 }
981
982 /**
983 * Executes a top-level task and any local tasks remaining
984 * after execution.
985 */
986 final void runTask(ForkJoinTask<?> t) {
987 if (t != null) {
988 (currentSteal = t).doExec();
989 currentSteal = null;
990 ++nsteals;
991 if (top != base) { // process remaining local tasks
992 if (mode == 0)
993 popAndExecAll();
994 else
995 pollAndExecAll();
996 }
997 }
998 }
999
1000 /**
1001 * Executes a non-top-level (stolen) task.
1002 */
1003 final void runSubtask(ForkJoinTask<?> t) {
1004 if (t != null) {
1005 ForkJoinTask<?> ps = currentSteal;
1006 (currentSteal = t).doExec();
1007 currentSteal = ps;
1008 }
1009 }
1010
1011 /**
1012 * Returns true if owned and not known to be blocked.
1013 */
1014 final boolean isApparentlyUnblocked() {
1015 Thread wt; Thread.State s;
1016 return (eventCount >= 0 &&
1017 (wt = owner) != null &&
1018 (s = wt.getState()) != Thread.State.BLOCKED &&
1019 s != Thread.State.WAITING &&
1020 s != Thread.State.TIMED_WAITING);
1021 }
1022
1023 /**
1024 * If this owned and is not already interrupted, try to
1025 * interrupt and/or unpark, ignoring exceptions.
1026 */
1027 final void interruptOwner() {
1028 Thread wt, p;
1029 if ((wt = owner) != null && !wt.isInterrupted()) {
1030 try {
1031 wt.interrupt();
1032 } catch (SecurityException ignore) {
1033 }
1034 }
1035 if ((p = parker) != null)
1036 U.unpark(p);
1037 }
1038
1039 // Unsafe mechanics
1040 private static final sun.misc.Unsafe U;
1041 private static final long QLOCK;
1042 private static final int ABASE;
1043 private static final int ASHIFT;
1044 static {
1045 int s;
1046 try {
1047 U = getUnsafe();
1048 Class<?> k = WorkQueue.class;
1049 Class<?> ak = ForkJoinTask[].class;
1050 QLOCK = U.objectFieldOffset
1051 (k.getDeclaredField("qlock"));
1052 ABASE = U.arrayBaseOffset(ak);
1053 s = U.arrayIndexScale(ak);
1054 } catch (Exception e) {
1055 throw new Error(e);
1056 }
1057 if ((s & (s-1)) != 0)
1058 throw new Error("data type scale not a power of two");
1059 ASHIFT = 31 - Integer.numberOfLeadingZeros(s);
1060 }
1061 }
1062
1063 // static fields (initialized in static initializer below)
1064
1065 /**
1066 * Creates a new ForkJoinWorkerThread. This factory is used unless
1067 * overridden in ForkJoinPool constructors.
1068 */
1069 public static final ForkJoinWorkerThreadFactory
1070 defaultForkJoinWorkerThreadFactory;
1071
1072 /**
1073 * Per-thread records for threads that submit to pools. Currently
1074 * holds only pseudo-random seed / index that is used to choose
1075 * submission queues in method externalPush. In the future, this may
1076 * also incorporate a means to implement different task rejection
1077 * and resubmission policies.
1078 *
1079 * Seeds for submitters and workers/workQueues work in basically
1080 * the same way but are initialized and updated using slightly
1081 * different mechanics. Both are initialized using the same
1082 * approach as in class ThreadLocal, where successive values are
1083 * unlikely to collide with previous values. Seeds are then
1084 * randomly modified upon collisions using xorshifts, which
1085 * requires a non-zero seed.
1086 */
1087 static final class Submitter {
1088 int seed;
1089 Submitter(int s) { seed = s; }
1090 }
1091
1092 /**
1093 * Per-thread submission bookkeeping. Shared across all pools
1094 * to reduce ThreadLocal pollution and because random motion
1095 * to avoid contention in one pool is likely to hold for others.
1096 * Lazily initialized on first submission (but null-checked
1097 * in other contexts to avoid unnecessary initialization).
1098 */
1099 static final ThreadLocal<Submitter> submitters;
1100
1101 /**
1102 * Common (static) pool. Non-null for public use unless a static
1103 * construction exception, but internal usages null-check on use
1104 * to paranoically avoid potential initialization circularities
1105 * as well as to simplify generated code.
1106 */
1107 static final ForkJoinPool commonPool;
1108
1109 /**
1110 * Permission required for callers of methods that may start or
1111 * kill threads.
1112 */
1113 private static final RuntimePermission modifyThreadPermission;
1114
1115 /**
1116 * Common pool parallelism. Must equal commonPool.parallelism.
1117 */
1118 static final int commonPoolParallelism;
1119
1120 /**
1121 * Sequence number for creating workerNamePrefix.
1122 */
1123 private static int poolNumberSequence;
1124
1125 /**
1126 * Return the next sequence number. We don't expect this to
1127 * ever contend so use simple builtin sync.
1128 */
1129 private static final synchronized int nextPoolId() {
1130 return ++poolNumberSequence;
1131 }
1132
1133 // static constants
1134
1135 /**
1136 * Initial timeout value (in nanoseconds) for the thread
1137 * triggering quiescence to park waiting for new work. On timeout,
1138 * the thread will instead try to shrink the number of
1139 * workers. The value should be large enough to avoid overly
1140 * aggressive shrinkage during most transient stalls (long GCs
1141 * etc).
1142 */
1143 private static final long IDLE_TIMEOUT = 2000L * 1000L * 1000L; // 2sec
1144
1145 /**
1146 * Timeout value when there are more threads than parallelism level
1147 */
1148 private static final long FAST_IDLE_TIMEOUT = 200L * 1000L * 1000L;
1149
1150 /**
1151 * The maximum stolen->joining link depth allowed in method
1152 * tryHelpStealer. Must be a power of two. Depths for legitimate
1153 * chains are unbounded, but we use a fixed constant to avoid
1154 * (otherwise unchecked) cycles and to bound staleness of
1155 * traversal parameters at the expense of sometimes blocking when
1156 * we could be helping.
1157 */
1158 private static final int MAX_HELP = 64;
1159
1160 /**
1161 * Increment for seed generators. See class ThreadLocal for
1162 * explanation.
1163 */
1164 private static final int SEED_INCREMENT = 0x61c88647;
1165
1166 /**
1167 * Bits and masks for control variables
1168 *
1169 * Field ctl is a long packed with:
1170 * AC: Number of active running workers minus target parallelism (16 bits)
1171 * TC: Number of total workers minus target parallelism (16 bits)
1172 * ST: true if pool is terminating (1 bit)
1173 * EC: the wait count of top waiting thread (15 bits)
1174 * ID: poolIndex of top of Treiber stack of waiters (16 bits)
1175 *
1176 * When convenient, we can extract the upper 32 bits of counts and
1177 * the lower 32 bits of queue state, u = (int)(ctl >>> 32) and e =
1178 * (int)ctl. The ec field is never accessed alone, but always
1179 * together with id and st. The offsets of counts by the target
1180 * parallelism and the positionings of fields makes it possible to
1181 * perform the most common checks via sign tests of fields: When
1182 * ac is negative, there are not enough active workers, when tc is
1183 * negative, there are not enough total workers, and when e is
1184 * negative, the pool is terminating. To deal with these possibly
1185 * negative fields, we use casts in and out of "short" and/or
1186 * signed shifts to maintain signedness.
1187 *
1188 * When a thread is queued (inactivated), its eventCount field is
1189 * set negative, which is the only way to tell if a worker is
1190 * prevented from executing tasks, even though it must continue to
1191 * scan for them to avoid queuing races. Note however that
1192 * eventCount updates lag releases so usage requires care.
1193 *
1194 * Field plock is an int packed with:
1195 * SHUTDOWN: true if shutdown is enabled (1 bit)
1196 * SEQ: a sequence lock, with PL_LOCK bit set if locked (30 bits)
1197 * SIGNAL: set when threads may be waiting on the lock (1 bit)
1198 *
1199 * The sequence number enables simple consistency checks:
1200 * Staleness of read-only operations on the workQueues array can
1201 * be checked by comparing plock before vs after the reads.
1202 */
1203
1204 // bit positions/shifts for fields
1205 private static final int AC_SHIFT = 48;
1206 private static final int TC_SHIFT = 32;
1207 private static final int ST_SHIFT = 31;
1208 private static final int EC_SHIFT = 16;
1209
1210 // bounds
1211 private static final int SMASK = 0xffff; // short bits
1212 private static final int MAX_CAP = 0x7fff; // max #workers - 1
1213 private static final int EVENMASK = 0xfffe; // even short bits
1214 private static final int SQMASK = 0x007e; // max 64 (even) slots
1215 private static final int SHORT_SIGN = 1 << 15;
1216 private static final int INT_SIGN = 1 << 31;
1217
1218 // masks
1219 private static final long STOP_BIT = 0x0001L << ST_SHIFT;
1220 private static final long AC_MASK = ((long)SMASK) << AC_SHIFT;
1221 private static final long TC_MASK = ((long)SMASK) << TC_SHIFT;
1222
1223 // units for incrementing and decrementing
1224 private static final long TC_UNIT = 1L << TC_SHIFT;
1225 private static final long AC_UNIT = 1L << AC_SHIFT;
1226
1227 // masks and units for dealing with u = (int)(ctl >>> 32)
1228 private static final int UAC_SHIFT = AC_SHIFT - 32;
1229 private static final int UTC_SHIFT = TC_SHIFT - 32;
1230 private static final int UAC_MASK = SMASK << UAC_SHIFT;
1231 private static final int UTC_MASK = SMASK << UTC_SHIFT;
1232 private static final int UAC_UNIT = 1 << UAC_SHIFT;
1233 private static final int UTC_UNIT = 1 << UTC_SHIFT;
1234
1235 // masks and units for dealing with e = (int)ctl
1236 private static final int E_MASK = 0x7fffffff; // no STOP_BIT
1237 private static final int E_SEQ = 1 << EC_SHIFT;
1238
1239 // plock bits
1240 private static final int SHUTDOWN = 1 << 31;
1241 private static final int PL_LOCK = 2;
1242 private static final int PL_SIGNAL = 1;
1243 private static final int PL_SPINS = 1 << 8;
1244
1245 // access mode for WorkQueue
1246 static final int LIFO_QUEUE = 0;
1247 static final int FIFO_QUEUE = 1;
1248 static final int SHARED_QUEUE = -1;
1249
1250 // bounds for #steps in scan loop -- must be power 2 minus 1
1251 private static final int MIN_SCAN = 0x1ff; // cover estimation slop
1252 private static final int MAX_SCAN = 0x1ffff; // 4 * max workers
1253
1254 // Instance fields
1255
1256 /*
1257 * Field layout of this class tends to matter more than one would
1258 * like. Runtime layout order is only loosely related to
1259 * declaration order and may differ across JVMs, but the following
1260 * empirically works OK on current JVMs.
1261 */
1262 volatile long stealCount; // collects worker counts
1263 volatile long ctl; // main pool control
1264 volatile int plock; // shutdown status and seqLock
1265 volatile int indexSeed; // worker/submitter index seed
1266 final int config; // mode and parallelism level
1267 WorkQueue[] workQueues; // main registry
1268 final ForkJoinWorkerThreadFactory factory;
1269 final Thread.UncaughtExceptionHandler ueh; // per-worker UEH
1270 final String workerNamePrefix; // to create worker name string
1271
1272 /*
1273 * Acquires the plock lock to protect worker array and related
1274 * updates. This method is called only if an initial CAS on plock
1275 * fails. This acts as a spinLock for normal cases, but falls back
1276 * to builtin monitor to block when (rarely) needed. This would be
1277 * a terrible idea for a highly contended lock, but works fine as
1278 * a more conservative alternative to a pure spinlock. See
1279 * internal ConcurrentHashMap documentation for further
1280 * explanation of nearly the same construction.
1281 */
1282 private int acquirePlock() {
1283 int spins = PL_SPINS, r = 0, ps, nps;
1284 for (;;) {
1285 if (((ps = plock) & PL_LOCK) == 0 &&
1286 U.compareAndSwapInt(this, PLOCK, ps, nps = ps + PL_LOCK))
1287 return nps;
1288 else if (r == 0) { // randomize spins if possible
1289 Thread t = Thread.currentThread(); WorkQueue w; Submitter z;
1290 if ((t instanceof ForkJoinWorkerThread) &&
1291 (w = ((ForkJoinWorkerThread)t).workQueue) != null)
1292 r = w.seed;
1293 else if ((z = submitters.get()) != null)
1294 r = z.seed;
1295 else
1296 r = 1;
1297 }
1298 else if (spins >= 0) {
1299 r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift
1300 if (r >= 0)
1301 --spins;
1302 }
1303 else if (U.compareAndSwapInt(this, PLOCK, ps, ps | PL_SIGNAL)) {
1304 synchronized (this) {
1305 if ((plock & PL_SIGNAL) != 0) {
1306 try {
1307 wait();
1308 } catch (InterruptedException ie) {
1309 try {
1310 Thread.currentThread().interrupt();
1311 } catch (SecurityException ignore) {
1312 }
1313 }
1314 }
1315 else
1316 notifyAll();
1317 }
1318 }
1319 }
1320 }
1321
1322 /**
1323 * Unlocks and signals any thread waiting for plock. Called only
1324 * when CAS of seq value for unlock fails.
1325 */
1326 private void releasePlock(int ps) {
1327 plock = ps;
1328 synchronized (this) { notifyAll(); }
1329 }
1330
1331 /**
1332 * Tries to create and start a worker; adjusts counts etc on failure
1333 */
1334 private void addWorker() {
1335 ForkJoinWorkerThread wt = null;
1336 try {
1337 (wt = factory.newThread(this)).start();
1338 } catch (Throwable ex) {
1339 deregisterWorker(wt, ex); // adjust on failure
1340 }
1341 }
1342
1343 /**
1344 * Performs secondary initialization, called when plock is zero.
1345 * Creates workQueue array and sets plock to a valid value. The
1346 * lock body must be exception-free (so no try/finally) so we
1347 * optimistically allocate new array outside the lock and throw
1348 * away if (very rarely) not needed. (A similar tactic is used in
1349 * fullExternalPush.) Because the plock seq value can eventually
1350 * wrap around zero, this method harmlessly fails to reinitialize
1351 * if workQueues exists, while still advancing plock.
1352 */
1353 private void initWorkQueuesArray() {
1354 WorkQueue[] ws; int ps;
1355 int p = config & SMASK; // find power of two table size
1356 int n = (p > 1) ? p - 1 : 1; // ensure at least 2 slots
1357 n |= n >>> 1; n |= n >>> 2; n |= n >>> 4; n |= n >>> 8; n |= n >>> 16;
1358 WorkQueue[] nws = new WorkQueue[(n + 1) << 1];
1359 if (((ps = plock) & PL_LOCK) != 0 ||
1360 !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
1361 ps = acquirePlock();
1362 if ((ws = workQueues) == null || ws.length == 0)
1363 workQueues = nws;
1364 int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
1365 if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
1366 releasePlock(nps);
1367 long c; int u;
1368 if ((u = (int)((c = ctl) >>> 32)) < 0 && (int)c == 0) {
1369 long nc = (long)(((u + UTC_UNIT) & UTC_MASK) |
1370 ((u + UAC_UNIT) & UAC_MASK)) << 32;
1371 if (U.compareAndSwapLong(this, CTL, c, nc))
1372 addWorker();
1373 }
1374
1375 }
1376
1377 // Registering and deregistering workers
1378
1379 /**
1380 * Callback from ForkJoinWorkerThread to establish and record its
1381 * WorkQueue. To avoid scanning bias due to packing entries in
1382 * front of the workQueues array, we treat the array as a simple
1383 * power-of-two hash table using per-thread seed as hash,
1384 * expanding as needed.
1385 *
1386 * @param wt the worker thread
1387 */
1388 final void registerWorker(ForkJoinWorkerThread wt) {
1389 if (wt != null && wt.workQueue == null) {
1390 int s, ps; // generate a rarely colliding candidate index seed
1391 do {} while (!U.compareAndSwapInt(this, INDEXSEED, s = indexSeed,
1392 s += SEED_INCREMENT) ||
1393 s == 0); // skip 0
1394 WorkQueue w = new WorkQueue(this, wt, config >>> 16, s);
1395 if (((ps = plock) & PL_LOCK) != 0 ||
1396 !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
1397 ps = acquirePlock();
1398 int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
1399 try {
1400 WorkQueue[] ws;
1401 if ((ws = workQueues) != null && wt.workQueue == null) {
1402 int n = ws.length, m = n - 1;
1403 int r = (s << 1) | 1; // use odd-numbered indices
1404 if (ws[r &= m] != null) { // collision
1405 int probes = 0; // step by approx half size
1406 int step = (n <= 4) ? 2 : ((n >>> 1) & EVENMASK) + 2;
1407 while (ws[r = (r + step) & m] != null) {
1408 if (++probes >= n) {
1409 workQueues = ws = Arrays.copyOf(ws, n <<= 1);
1410 m = n - 1;
1411 probes = 0;
1412 }
1413 }
1414 }
1415 w.eventCount = w.poolIndex = r; // volatile write orders
1416 wt.workQueue = ws[r] = w;
1417 }
1418 } finally {
1419 if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
1420 releasePlock(nps);
1421 }
1422 }
1423 }
1424
1425 /**
1426 * Final callback from terminating worker, as well as upon failure
1427 * to construct or start a worker. Removes record of worker from
1428 * array, and adjusts counts. If pool is shutting down, tries to
1429 * complete termination.
1430 *
1431 * @param wt the worker thread or null if construction failed
1432 * @param ex the exception causing failure, or null if none
1433 */
1434 final void deregisterWorker(ForkJoinWorkerThread wt, Throwable ex) {
1435 WorkQueue w = null;
1436 if (wt != null && (w = wt.workQueue) != null) {
1437 int ps;
1438 w.qlock = -1; // ensure set
1439 long ns = w.nsteals, sc; // collect steal count
1440 do {} while (!U.compareAndSwapLong(this, STEALCOUNT,
1441 sc = stealCount, sc + ns));
1442 if (((ps = plock) & PL_LOCK) != 0 ||
1443 !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
1444 ps = acquirePlock();
1445 int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
1446 try {
1447 int idx = w.poolIndex;
1448 WorkQueue[] ws = workQueues;
1449 if (ws != null && idx >= 0 && idx < ws.length && ws[idx] == w)
1450 ws[idx] = null;
1451 } finally {
1452 if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
1453 releasePlock(nps);
1454 }
1455 }
1456
1457 long c; // adjust ctl counts
1458 do {} while (!U.compareAndSwapLong
1459 (this, CTL, c = ctl, (((c - AC_UNIT) & AC_MASK) |
1460 ((c - TC_UNIT) & TC_MASK) |
1461 (c & ~(AC_MASK|TC_MASK)))));
1462
1463 if (!tryTerminate(false, false) && w != null) {
1464 w.cancelAll(); // cancel remaining tasks
1465 if (w.array != null) // suppress signal if never ran
1466 helpSignal(null, 0); // wake up or create replacement
1467 if (ex == null) // help clean refs on way out
1468 ForkJoinTask.helpExpungeStaleExceptions();
1469 }
1470
1471 if (ex != null) // rethrow
1472 ForkJoinTask.rethrow(ex);
1473 }
1474
1475 // Submissions
1476
1477 /**
1478 * Unless shutting down, adds the given task to a submission queue
1479 * at submitter's current queue index (modulo submission
1480 * range). Only the most common path is directly handled in this
1481 * method. All others are relayed to fullExternalPush.
1482 *
1483 * @param task the task. Caller must ensure non-null.
1484 */
1485 final void externalPush(ForkJoinTask<?> task) {
1486 WorkQueue[] ws; WorkQueue q; Submitter z; int m; ForkJoinTask<?>[] a;
1487 if ((z = submitters.get()) != null && plock > 0 &&
1488 (ws = workQueues) != null && (m = (ws.length - 1)) >= 0 &&
1489 (q = ws[m & z.seed & SQMASK]) != null &&
1490 U.compareAndSwapInt(q, QLOCK, 0, 1)) { // lock
1491 int b = q.base, s = q.top, n, an;
1492 if ((a = q.array) != null && (an = a.length) > (n = s + 1 - b)) {
1493 U.putObject(a, (long)(((an - 1) & s) << ASHIFT) + ABASE, task);
1494 q.top = s + 1; // push on to deque
1495 q.qlock = 0;
1496 if (n <= 2)
1497 signalWork(q, 0);
1498 return;
1499 }
1500 q.qlock = 0;
1501 }
1502 fullExternalPush(task);
1503 }
1504
1505 /**
1506 * Full version of externalPush. This method is called, among
1507 * other times, upon the first submission of the first task to the
1508 * pool, so must perform secondary initialization (via
1509 * initWorkQueuesArray). It also detects first submission by an
1510 * external thread by looking up its ThreadLocal, and creates a
1511 * new shared queue if the one at index if empty or contended. The
1512 * lock body must be exception-free (so no try/finally) so we
1513 * optimistically allocate new queues outside the lock and throw
1514 * them away if (very rarely) not needed.
1515 */
1516 private void fullExternalPush(ForkJoinTask<?> task) {
1517 int r = 0;
1518 for (Submitter z = submitters.get();;) {
1519 WorkQueue[] ws; WorkQueue q; int ps, m, k;
1520 if (z == null) {
1521 if (U.compareAndSwapInt(this, INDEXSEED, r = indexSeed,
1522 r += SEED_INCREMENT) && r != 0)
1523 submitters.set(z = new Submitter(r));
1524 }
1525 else if (r == 0) { // move to a different index
1526 r = z.seed;
1527 r ^= r << 13; // same xorshift as WorkQueues
1528 r ^= r >>> 17;
1529 z.seed = r ^ (r << 5);
1530 }
1531 else if ((ps = plock) < 0)
1532 throw new RejectedExecutionException();
1533 else if (ps == 0 || (ws = workQueues) == null ||
1534 (m = ws.length - 1) < 0)
1535 initWorkQueuesArray();
1536 else if ((q = ws[k = r & m & SQMASK]) != null) {
1537 if (q.trySharedPush(task))
1538 return;
1539 else
1540 r = 0; // move on contention
1541 }
1542 else if (((ps = plock) & PL_LOCK) == 0) { // create new queue
1543 q = new WorkQueue(this, null, SHARED_QUEUE, r);
1544 if (((ps = plock) & PL_LOCK) != 0 ||
1545 !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
1546 ps = acquirePlock();
1547 if ((ws = workQueues) != null && k < ws.length && ws[k] == null)
1548 ws[k] = q;
1549 int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
1550 if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
1551 releasePlock(nps);
1552 }
1553 else
1554 r = 0; // try elsewhere while lock held
1555 }
1556 }
1557
1558 // Maintaining ctl counts
1559
1560 /**
1561 * Increments active count; mainly called upon return from blocking.
1562 */
1563 final void incrementActiveCount() {
1564 long c;
1565 do {} while (!U.compareAndSwapLong(this, CTL, c = ctl, c + AC_UNIT));
1566 }
1567
1568 /**
1569 * Tries to create (at most one) or activate (possibly several)
1570 * workers if too few are active. On contention failure, continues
1571 * until at least one worker is signalled or the given queue is
1572 * empty or all workers are active.
1573 *
1574 * @param q if non-null, the queue holding tasks to be signalled
1575 * @param signals the target number of signals (at least one --
1576 * if argument is zero also sets signallee hint if parked).
1577 */
1578 final void signalWork(WorkQueue q, int signals) {
1579 long c; int e, u, i, s; WorkQueue[] ws; WorkQueue w; Thread p;
1580 while ((u = (int)((c = ctl) >>> 32)) < 0) {
1581 if ((e = (int)c) > 0) {
1582 if ((ws = workQueues) != null && ws.length > (i = e & SMASK) &&
1583 (w = ws[i]) != null && w.eventCount == (e | INT_SIGN)) {
1584 long nc = (((long)(w.nextWait & E_MASK)) |
1585 ((long)(u + UAC_UNIT) << 32));
1586 if (U.compareAndSwapLong(this, CTL, c, nc)) {
1587 w.eventCount = (e + E_SEQ) & E_MASK;
1588 if ((p = w.parker) != null) {
1589 if (q != null && signals == 0)
1590 w.hint = q.poolIndex;
1591 U.unpark(p);
1592 }
1593 if (--signals <= 0)
1594 break;
1595 }
1596 if (q != null && (s = q.queueSize()) <= signals &&
1597 (signals = s) <= 0)
1598 break;
1599 }
1600 else
1601 break;
1602 }
1603 else if (e == 0 && (u & SHORT_SIGN) != 0) {
1604 long nc = (long)(((u + UTC_UNIT) & UTC_MASK) |
1605 ((u + UAC_UNIT) & UAC_MASK)) << 32;
1606 if (U.compareAndSwapLong(this, CTL, c, nc)) {
1607 addWorker();
1608 break;
1609 }
1610 }
1611 else
1612 break;
1613 }
1614 }
1615
1616 // Scanning for tasks
1617
1618 /**
1619 * Top-level runloop for workers, called by ForkJoinWorkerThread.run.
1620 */
1621 final void runWorker(WorkQueue w) {
1622 if (w != null) // skip on initialization failure
1623 do { w.runTask(scan(w)); } while (w.qlock >= 0);
1624 }
1625
1626 /**
1627 * Scans for and, if found, returns one task, else possibly
1628 * inactivates the worker. This method operates on single reads of
1629 * volatile state and is designed to be re-invoked continuously,
1630 * in part because it returns upon detecting inconsistencies,
1631 * contention, or state changes that indicate possible success on
1632 * re-invocation.
1633 *
1634 * The scan searches for tasks across queues (starting at a random
1635 * index, and relying on registerWorker to irregularly scatter
1636 * them within array to avoid bias), checking each at least twice.
1637 * The scan terminates upon either finding a non-empty queue, or
1638 * completing the sweep. If the worker is not inactivated, it
1639 * takes and returns a task from this queue. Otherwise, if not
1640 * activated, it signals workers (that may include itself) and
1641 * returns so caller can retry. Also returns for true if the
1642 * worker array may have changed during an empty scan. On failure
1643 * to find a task, we take one of the following actions, after
1644 * which the caller will retry calling this method unless
1645 * terminated.
1646 *
1647 * * If pool is terminating, terminate the worker.
1648 *
1649 * * If not already enqueued, try to inactivate and enqueue the
1650 * worker on wait queue. Or, if inactivating has caused the pool
1651 * to be quiescent, relay to idleAwaitWork to check for
1652 * termination and possibly shrink pool.
1653 *
1654 * * If already enqueued and none of the above apply, possibly
1655 * (with 1/2 probability) park awaiting signal, else lingering to
1656 * help scan and signal.
1657 *
1658 * @param w the worker (via its WorkQueue)
1659 * @return a task or null if none found
1660 */
1661 private final ForkJoinTask<?> scan(WorkQueue w) {
1662 WorkQueue[] ws; int m, hint;
1663 int ps = plock; // read plock before ws
1664 if (w != null && (ws = workQueues) != null && (m = ws.length - 1) >= 0) {
1665 int ec = w.eventCount; // ec is negative if inactive
1666 int r = w.seed; r ^= r << 13; r ^= r >>> 17; w.seed = r ^= r << 5;
1667 for (int j = ((m + m + 1) | MIN_SCAN) & MAX_SCAN; ; --j) {
1668 WorkQueue q; ForkJoinTask<?>[] a; int b;
1669 if ((q = ws[(r + j) & m]) != null && (b = q.base) - q.top < 0 &&
1670 (a = q.array) != null) { // probably nonempty
1671 int i = (((a.length - 1) & b) << ASHIFT) + ABASE;
1672 ForkJoinTask<?> t = (ForkJoinTask<?>)
1673 U.getObjectVolatile(a, i);
1674 if (q.base == b && ec >= 0 && t != null &&
1675 U.compareAndSwapObject(a, i, t, null)) {
1676 if ((q.base = b + 1) - q.top < 0)
1677 signalWork(q, 0);
1678 return t; // taken
1679 }
1680 else if (ec < 0 || j < m) { // cannot take or cannot rescan
1681 w.hint = q.poolIndex; // use hint below
1682 break; // let caller retry after signal
1683 }
1684 }
1685 else if (j < 0) { // end of scan; in loop to simplify code
1686 long c, sc; int e, ns;
1687 if ((ns = w.nsteals) != 0) {
1688 if (U.compareAndSwapLong(this, STEALCOUNT,
1689 sc = stealCount, sc + ns))
1690 w.nsteals = 0; // collect steals
1691 }
1692 else if (plock != ps) // ws may have changed
1693 break;
1694 else if ((e = (int)(c = ctl)) < 0)
1695 w.qlock = -1; // pool is terminating
1696 else if (ec >= 0) { // try to enqueue/inactivate
1697 long nc = ((long)ec |
1698 ((c - AC_UNIT) & (AC_MASK|TC_MASK)));
1699 w.nextWait = e; // link and mark inactive
1700 w.hint = -1; // use hint if set while parked
1701 w.eventCount = ec | INT_SIGN;
1702 if (ctl != c ||
1703 !U.compareAndSwapLong(this, CTL, c, nc))
1704 w.eventCount = ec; // unmark on CAS failure
1705 else if ((int)(c >> AC_SHIFT) == 1 - (config & SMASK))
1706 idleAwaitWork(w, nc, c);
1707 }
1708 else if (w.eventCount < 0) { // block
1709 Thread wt = Thread.currentThread();
1710 Thread.interrupted(); // clear status
1711 U.putObject(wt, PARKBLOCKER, this);
1712 w.parker = wt; // emulate LockSupport.park
1713 if (w.eventCount < 0) // recheck
1714 U.park(false, 0L);
1715 w.parker = null;
1716 U.putObject(wt, PARKBLOCKER, null);
1717 }
1718 break;
1719 }
1720 }
1721 if ((hint = w.hint) >= 0) { // help signal
1722 WorkQueue[] vs; WorkQueue v; int k;
1723 w.hint = -1; // suppress resignal
1724 if ((vs = workQueues) != null && hint < vs.length &&
1725 (v = vs[hint]) != null && (k = v.base - v.top) < -1)
1726 signalWork(v, 1 - k);
1727 }
1728 }
1729 return null;
1730 }
1731
1732 /**
1733 * If inactivating worker w has caused the pool to become
1734 * quiescent, checks for pool termination, and, so long as this is
1735 * not the only worker, waits for event for up to a given
1736 * duration. On timeout, if ctl has not changed, terminates the
1737 * worker, which will in turn wake up another worker to possibly
1738 * repeat this process.
1739 *
1740 * @param w the calling worker
1741 * @param currentCtl the ctl value triggering possible quiescence
1742 * @param prevCtl the ctl value to restore if thread is terminated
1743 */
1744 private void idleAwaitWork(WorkQueue w, long currentCtl, long prevCtl) {
1745 if (w != null && w.eventCount < 0 &&
1746 !tryTerminate(false, false) && (int)prevCtl != 0) {
1747 int dc = -(short)(currentCtl >>> TC_SHIFT);
1748 long parkTime = dc < 0 ? FAST_IDLE_TIMEOUT: (dc + 1) * IDLE_TIMEOUT;
1749 long deadline = System.nanoTime() + parkTime - 100000L; // 1ms slop
1750 Thread wt = Thread.currentThread();
1751 while (ctl == currentCtl) {
1752 Thread.interrupted(); // timed variant of version in scan()
1753 U.putObject(wt, PARKBLOCKER, this);
1754 w.parker = wt;
1755 if (ctl == currentCtl)
1756 U.park(false, parkTime);
1757 w.parker = null;
1758 U.putObject(wt, PARKBLOCKER, null);
1759 if (ctl != currentCtl)
1760 break;
1761 if (deadline - System.nanoTime() <= 0L &&
1762 U.compareAndSwapLong(this, CTL, currentCtl, prevCtl)) {
1763 w.eventCount = (w.eventCount + E_SEQ) | E_MASK;
1764 w.qlock = -1; // shrink
1765 w.hint = -1; // suppress helping
1766 break;
1767 }
1768 }
1769 }
1770 }
1771
1772 /**
1773 * Scans through queues looking for work (optionally, while
1774 * joining a task); if any are present, signals. May return early
1775 * if more signalling is detectably unneeded.
1776 *
1777 * @param task if non-null, return early if done
1778 * @param origin an index to start scan
1779 */
1780 final int helpSignal(ForkJoinTask<?> task, int origin) {
1781 WorkQueue[] ws; WorkQueue q; int m, n, s, u;
1782 if ((ws = workQueues) != null && (m = ws.length - 1) >= 0) {
1783 for (int i = 0; i <= m; ++i) {
1784 if (task != null && (s = task.status) < 0)
1785 return s;
1786 if ((q = ws[(i + origin) & m]) != null &&
1787 (n = q.queueSize()) > 0) {
1788 signalWork(q, n);
1789 if ((u = (int)(ctl >>> 32)) >= 0 || (u >> UAC_SHIFT) >= 0)
1790 break;
1791 }
1792 }
1793 }
1794 return 0;
1795 }
1796
1797 /**
1798 * Tries to locate and execute tasks for a stealer of the given
1799 * task, or in turn one of its stealers, Traces currentSteal ->
1800 * currentJoin links looking for a thread working on a descendant
1801 * of the given task and with a non-empty queue to steal back and
1802 * execute tasks from. The first call to this method upon a
1803 * waiting join will often entail scanning/search, (which is OK
1804 * because the joiner has nothing better to do), but this method
1805 * leaves hints in workers to speed up subsequent calls. The
1806 * implementation is very branchy to cope with potential
1807 * inconsistencies or loops encountering chains that are stale,
1808 * unknown, or so long that they are likely cyclic.
1809 *
1810 * @param joiner the joining worker
1811 * @param task the task to join
1812 * @return 0 if no progress can be made, negative if task
1813 * known complete, else positive
1814 */
1815 private int tryHelpStealer(WorkQueue joiner, ForkJoinTask<?> task) {
1816 int stat = 0, steps = 0; // bound to avoid cycles
1817 if (joiner != null && task != null) { // hoist null checks
1818 restart: for (;;) {
1819 ForkJoinTask<?> subtask = task; // current target
1820 for (WorkQueue j = joiner, v;;) { // v is stealer of subtask
1821 WorkQueue[] ws; int m, s, h;
1822 if ((s = task.status) < 0) {
1823 stat = s;
1824 break restart;
1825 }
1826 if ((ws = workQueues) == null || (m = ws.length - 1) <= 0)
1827 break restart; // shutting down
1828 if ((v = ws[h = (j.hint | 1) & m]) == null ||
1829 v.currentSteal != subtask) {
1830 for (int origin = h;;) { // find stealer
1831 if (((h = (h + 2) & m) & 15) == 1 &&
1832 (subtask.status < 0 || j.currentJoin != subtask))
1833 continue restart; // occasional staleness check
1834 if ((v = ws[h]) != null &&
1835 v.currentSteal == subtask) {
1836 j.hint = h; // save hint
1837 break;
1838 }
1839 if (h == origin)
1840 break restart; // cannot find stealer
1841 }
1842 }
1843 for (;;) { // help stealer or descend to its stealer
1844 ForkJoinTask[] a; int b;
1845 if (subtask.status < 0) // surround probes with
1846 continue restart; // consistency checks
1847 if ((b = v.base) - v.top < 0 && (a = v.array) != null) {
1848 int i = (((a.length - 1) & b) << ASHIFT) + ABASE;
1849 ForkJoinTask<?> t =
1850 (ForkJoinTask<?>)U.getObjectVolatile(a, i);
1851 if (subtask.status < 0 || j.currentJoin != subtask ||
1852 v.currentSteal != subtask)
1853 continue restart; // stale
1854 stat = 1; // apparent progress
1855 if (t != null && v.base == b &&
1856 U.compareAndSwapObject(a, i, t, null)) {
1857 v.base = b + 1; // help stealer
1858 joiner.runSubtask(t);
1859 }
1860 else if (v.base == b && ++steps == MAX_HELP)
1861 break restart; // v apparently stalled
1862 }
1863 else { // empty -- try to descend
1864 ForkJoinTask<?> next = v.currentJoin;
1865 if (subtask.status < 0 || j.currentJoin != subtask ||
1866 v.currentSteal != subtask)
1867 continue restart; // stale
1868 else if (next == null || ++steps == MAX_HELP)
1869 break restart; // dead-end or maybe cyclic
1870 else {
1871 subtask = next;
1872 j = v;
1873 break;
1874 }
1875 }
1876 }
1877 }
1878 }
1879 }
1880 return stat;
1881 }
1882
1883 /**
1884 * Analog of tryHelpStealer for CountedCompleters. Tries to steal
1885 * and run tasks within the target's computation.
1886 *
1887 * @param task the task to join
1888 * @param mode if shared, exit upon completing any task
1889 * if all workers are active
1890 *
1891 */
1892 private int helpComplete(ForkJoinTask<?> task, int mode) {
1893 WorkQueue[] ws; WorkQueue q; int m, n, s, u;
1894 if (task != null && (ws = workQueues) != null &&
1895 (m = ws.length - 1) >= 0) {
1896 for (int j = 1, origin = j;;) {
1897 if ((s = task.status) < 0)
1898 return s;
1899 if ((q = ws[j & m]) != null && q.pollAndExecCC(task)) {
1900 origin = j;
1901 if (mode == SHARED_QUEUE &&
1902 ((u = (int)(ctl >>> 32)) >= 0 || (u >> UAC_SHIFT) >= 0))
1903 break;
1904 }
1905 else if ((j = (j + 2) & m) == origin)
1906 break;
1907 }
1908 }
1909 return 0;
1910 }
1911
1912 /**
1913 * Tries to decrement active count (sometimes implicitly) and
1914 * possibly release or create a compensating worker in preparation
1915 * for blocking. Fails on contention or termination. Otherwise,
1916 * adds a new thread if no idle workers are available and pool
1917 * may become starved.
1918 */
1919 final boolean tryCompensate() {
1920 int pc = config & SMASK, e, i, tc; long c;
1921 WorkQueue[] ws; WorkQueue w; Thread p;
1922 if ((ws = workQueues) != null && (e = (int)(c = ctl)) >= 0) {
1923 if (e != 0 && (i = e & SMASK) < ws.length &&
1924 (w = ws[i]) != null && w.eventCount == (e | INT_SIGN)) {
1925 long nc = ((long)(w.nextWait & E_MASK) |
1926 (c & (AC_MASK|TC_MASK)));
1927 if (U.compareAndSwapLong(this, CTL, c, nc)) {
1928 w.eventCount = (e + E_SEQ) & E_MASK;
1929 if ((p = w.parker) != null)
1930 U.unpark(p);
1931 return true; // replace with idle worker
1932 }
1933 }
1934 else if ((tc = (short)(c >>> TC_SHIFT)) >= 0 &&
1935 (int)(c >> AC_SHIFT) + pc > 1) {
1936 long nc = ((c - AC_UNIT) & AC_MASK) | (c & ~AC_MASK);
1937 if (U.compareAndSwapLong(this, CTL, c, nc))
1938 return true; // no compensation
1939 }
1940 else if (tc + pc < MAX_CAP) {
1941 long nc = ((c + TC_UNIT) & TC_MASK) | (c & ~TC_MASK);
1942 if (U.compareAndSwapLong(this, CTL, c, nc)) {
1943 addWorker();
1944 return true;
1945 }
1946 }
1947 }
1948 return false;
1949 }
1950
1951 /**
1952 * Helps and/or blocks until the given task is done.
1953 *
1954 * @param joiner the joining worker
1955 * @param task the task
1956 * @return task status on exit
1957 */
1958 final int awaitJoin(WorkQueue joiner, ForkJoinTask<?> task) {
1959 int s = 0;
1960 if (joiner != null && task != null && (s = task.status) >= 0) {
1961 ForkJoinTask<?> prevJoin = joiner.currentJoin;
1962 joiner.currentJoin = task;
1963 do {} while ((s = task.status) >= 0 &&
1964 joiner.queueSize() > 0 &&
1965 joiner.tryRemoveAndExec(task)); // process local tasks
1966 if (s >= 0 && (s = task.status) >= 0 &&
1967 (s = helpSignal(task, joiner.poolIndex)) >= 0 &&
1968 (task instanceof CountedCompleter))
1969 s = helpComplete(task, LIFO_QUEUE);
1970 int k = 0; // to perform pre-block yield for politeness
1971 while (s >= 0 && (s = task.status) >= 0) {
1972 if ((joiner.queueSize() > 0 || // try helping
1973 (s = tryHelpStealer(joiner, task)) == 0) &&
1974 (s = task.status) >= 0) {
1975 if (k < 3) {
1976 if (++k < 3)
1977 s = helpSignal(task, joiner.poolIndex);
1978 else
1979 Thread.yield();
1980 }
1981 else if (!tryCompensate())
1982 k = 0;
1983 else {
1984 if (task.trySetSignal() && (s = task.status) >= 0) {
1985 synchronized (task) {
1986 if (task.status >= 0) {
1987 try { // see ForkJoinTask
1988 task.wait(); // for explanation
1989 } catch (InterruptedException ie) {
1990 }
1991 }
1992 else
1993 task.notifyAll();
1994 }
1995 }
1996 long c; // re-activate
1997 do {} while (!U.compareAndSwapLong
1998 (this, CTL, c = ctl, c + AC_UNIT));
1999 }
2000 }
2001 }
2002 joiner.currentJoin = prevJoin;
2003 }
2004 return s;
2005 }
2006
2007 /**
2008 * Stripped-down variant of awaitJoin used by timed joins. Tries
2009 * to help join only while there is continuous progress. (Caller
2010 * will then enter a timed wait.)
2011 *
2012 * @param joiner the joining worker
2013 * @param task the task
2014 */
2015 final void helpJoinOnce(WorkQueue joiner, ForkJoinTask<?> task) {
2016 int s;
2017 if (joiner != null && task != null && (s = task.status) >= 0) {
2018 ForkJoinTask<?> prevJoin = joiner.currentJoin;
2019 joiner.currentJoin = task;
2020 do {} while ((s = task.status) >= 0 &&
2021 joiner.queueSize() > 0 &&
2022 joiner.tryRemoveAndExec(task));
2023 if (s >= 0 && (s = task.status) >= 0 &&
2024 (s = helpSignal(task, joiner.poolIndex)) >= 0 &&
2025 (task instanceof CountedCompleter))
2026 s = helpComplete(task, LIFO_QUEUE);
2027 if (s >= 0 && joiner.queueSize() == 0) {
2028 do {} while (task.status >= 0 &&
2029 tryHelpStealer(joiner, task) > 0);
2030 }
2031 joiner.currentJoin = prevJoin;
2032 }
2033 }
2034
2035 /**
2036 * Returns a (probably) non-empty steal queue, if one is found
2037 * during a random, then cyclic scan, else null. This method must
2038 * be retried by caller if, by the time it tries to use the queue,
2039 * it is empty.
2040 * @param r a (random) seed for scanning
2041 */
2042 private WorkQueue findNonEmptyStealQueue(int r) {
2043 for (WorkQueue[] ws;;) {
2044 int ps = plock, m, n;
2045 if ((ws = workQueues) == null || (m = ws.length - 1) < 1)
2046 return null;
2047 for (int j = (m + 1) << 2; ;) {
2048 WorkQueue q = ws[(((r + j) << 1) | 1) & m];
2049 if (q != null && (n = q.queueSize()) > 0) {
2050 if (n > 1)
2051 signalWork(q, 0);
2052 return q;
2053 }
2054 else if (--j < 0) {
2055 if (plock == ps)
2056 return null;
2057 break;
2058 }
2059 }
2060 }
2061 }
2062
2063 /**
2064 * Runs tasks until {@code isQuiescent()}. We piggyback on
2065 * active count ctl maintenance, but rather than blocking
2066 * when tasks cannot be found, we rescan until all others cannot
2067 * find tasks either.
2068 */
2069 final void helpQuiescePool(WorkQueue w) {
2070 for (boolean active = true;;) {
2071 ForkJoinTask<?> localTask; // exhaust local queue
2072 while ((localTask = w.nextLocalTask()) != null)
2073 localTask.doExec();
2074 // Similar to loop in scan(), but ignoring submissions
2075 WorkQueue q = findNonEmptyStealQueue(w.nextSeed());
2076 if (q != null) {
2077 ForkJoinTask<?> t; int b;
2078 if (!active) { // re-establish active count
2079 long c;
2080 active = true;
2081 do {} while (!U.compareAndSwapLong
2082 (this, CTL, c = ctl, c + AC_UNIT));
2083 }
2084 if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null)
2085 w.runSubtask(t);
2086 }
2087 else {
2088 long c;
2089 if (active) { // decrement active count without queuing
2090 active = false;
2091 do {} while (!U.compareAndSwapLong
2092 (this, CTL, c = ctl, c -= AC_UNIT));
2093 }
2094 else
2095 c = ctl; // re-increment on exit
2096 if ((int)(c >> AC_SHIFT) + (config & SMASK) == 0) {
2097 do {} while (!U.compareAndSwapLong
2098 (this, CTL, c = ctl, c + AC_UNIT));
2099 break;
2100 }
2101 }
2102 }
2103 }
2104
2105 /**
2106 * Gets and removes a local or stolen task for the given worker.
2107 *
2108 * @return a task, if available
2109 */
2110 final ForkJoinTask<?> nextTaskFor(WorkQueue w) {
2111 for (ForkJoinTask<?> t;;) {
2112 WorkQueue q; int b;
2113 if ((t = w.nextLocalTask()) != null)
2114 return t;
2115 if ((q = findNonEmptyStealQueue(w.nextSeed())) == null)
2116 return null;
2117 if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null)
2118 return t;
2119 }
2120 }
2121
2122 /**
2123 * Returns a cheap heuristic guide for task partitioning when
2124 * programmers, frameworks, tools, or languages have little or no
2125 * idea about task granularity. In essence by offering this
2126 * method, we ask users only about tradeoffs in overhead vs
2127 * expected throughput and its variance, rather than how finely to
2128 * partition tasks.
2129 *
2130 * In a steady state strict (tree-structured) computation, each
2131 * thread makes available for stealing enough tasks for other
2132 * threads to remain active. Inductively, if all threads play by
2133 * the same rules, each thread should make available only a
2134 * constant number of tasks.
2135 *
2136 * The minimum useful constant is just 1. But using a value of 1
2137 * would require immediate replenishment upon each steal to
2138 * maintain enough tasks, which is infeasible. Further,
2139 * partitionings/granularities of offered tasks should minimize
2140 * steal rates, which in general means that threads nearer the top
2141 * of computation tree should generate more than those nearer the
2142 * bottom. In perfect steady state, each thread is at
2143 * approximately the same level of computation tree. However,
2144 * producing extra tasks amortizes the uncertainty of progress and
2145 * diffusion assumptions.
2146 *
2147 * So, users will want to use values larger, but not much larger
2148 * than 1 to both smooth over transient shortages and hedge
2149 * against uneven progress; as traded off against the cost of
2150 * extra task overhead. We leave the user to pick a threshold
2151 * value to compare with the results of this call to guide
2152 * decisions, but recommend values such as 3.
2153 *
2154 * When all threads are active, it is on average OK to estimate
2155 * surplus strictly locally. In steady-state, if one thread is
2156 * maintaining say 2 surplus tasks, then so are others. So we can
2157 * just use estimated queue length. However, this strategy alone
2158 * leads to serious mis-estimates in some non-steady-state
2159 * conditions (ramp-up, ramp-down, other stalls). We can detect
2160 * many of these by further considering the number of "idle"
2161 * threads, that are known to have zero queued tasks, so
2162 * compensate by a factor of (#idle/#active) threads.
2163 *
2164 * Note: The approximation of #busy workers as #active workers is
2165 * not very good under current signalling scheme, and should be
2166 * improved.
2167 */
2168 static int getSurplusQueuedTaskCount() {
2169 Thread t; ForkJoinWorkerThread wt; ForkJoinPool pool; WorkQueue q;
2170 if (((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)) {
2171 int p = (pool = (wt = (ForkJoinWorkerThread)t).pool).config & SMASK;
2172 int n = (q = wt.workQueue).top - q.base;
2173 int a = (int)(pool.ctl >> AC_SHIFT) + p;
2174 return n - (a > (p >>>= 1) ? 0 :
2175 a > (p >>>= 1) ? 1 :
2176 a > (p >>>= 1) ? 2 :
2177 a > (p >>>= 1) ? 4 :
2178 8);
2179 }
2180 return 0;
2181 }
2182
2183 // Termination
2184
2185 /**
2186 * Possibly initiates and/or completes termination. The caller
2187 * triggering termination runs three passes through workQueues:
2188 * (0) Setting termination status, followed by wakeups of queued
2189 * workers; (1) cancelling all tasks; (2) interrupting lagging
2190 * threads (likely in external tasks, but possibly also blocked in
2191 * joins). Each pass repeats previous steps because of potential
2192 * lagging thread creation.
2193 *
2194 * @param now if true, unconditionally terminate, else only
2195 * if no work and no active workers
2196 * @param enable if true, enable shutdown when next possible
2197 * @return true if now terminating or terminated
2198 */
2199 private boolean tryTerminate(boolean now, boolean enable) {
2200 if (this == commonPool) // cannot shut down
2201 return false;
2202 for (long c;;) {
2203 if (((c = ctl) & STOP_BIT) != 0) { // already terminating
2204 if ((short)(c >>> TC_SHIFT) == -(config & SMASK)) {
2205 synchronized (this) {
2206 notifyAll(); // signal when 0 workers
2207 }
2208 }
2209 return true;
2210 }
2211 if (plock >= 0) { // not yet enabled
2212 int ps;
2213 if (!enable)
2214 return false;
2215 if (((ps = plock) & PL_LOCK) != 0 ||
2216 !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
2217 ps = acquirePlock();
2218 int nps = SHUTDOWN;
2219 if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
2220 releasePlock(nps);
2221 }
2222 if (!now) { // check if idle & no tasks
2223 if ((int)(c >> AC_SHIFT) != -(config & SMASK) ||
2224 hasQueuedSubmissions())
2225 return false;
2226 // Check for unqueued inactive workers. One pass suffices.
2227 WorkQueue[] ws = workQueues; WorkQueue w;
2228 if (ws != null) {
2229 for (int i = 1; i < ws.length; i += 2) {
2230 if ((w = ws[i]) != null && w.eventCount >= 0)
2231 return false;
2232 }
2233 }
2234 }
2235 if (U.compareAndSwapLong(this, CTL, c, c | STOP_BIT)) {
2236 for (int pass = 0; pass < 3; ++pass) {
2237 WorkQueue[] ws = workQueues;
2238 if (ws != null) {
2239 WorkQueue w;
2240 int n = ws.length;
2241 for (int i = 0; i < n; ++i) {
2242 if ((w = ws[i]) != null) {
2243 w.qlock = -1;
2244 if (pass > 0) {
2245 w.cancelAll();
2246 if (pass > 1)
2247 w.interruptOwner();
2248 }
2249 }
2250 }
2251 // Wake up workers parked on event queue
2252 int i, e; long cc; Thread p;
2253 while ((e = (int)(cc = ctl) & E_MASK) != 0 &&
2254 (i = e & SMASK) < n &&
2255 (w = ws[i]) != null) {
2256 long nc = ((long)(w.nextWait & E_MASK) |
2257 ((cc + AC_UNIT) & AC_MASK) |
2258 (cc & (TC_MASK|STOP_BIT)));
2259 if (w.eventCount == (e | INT_SIGN) &&
2260 U.compareAndSwapLong(this, CTL, cc, nc)) {
2261 w.eventCount = (e + E_SEQ) & E_MASK;
2262 w.qlock = -1;
2263 if ((p = w.parker) != null)
2264 U.unpark(p);
2265 }
2266 }
2267 }
2268 }
2269 }
2270 }
2271 }
2272
2273 // external operations on common pool
2274
2275 /**
2276 * Returns common pool queue for a thread that has submitted at
2277 * least one task.
2278 */
2279 static WorkQueue commonSubmitterQueue() {
2280 ForkJoinPool p; WorkQueue[] ws; int m; Submitter z;
2281 return ((z = submitters.get()) != null &&
2282 (p = commonPool) != null &&
2283 (ws = p.workQueues) != null &&
2284 (m = ws.length - 1) >= 0) ?
2285 ws[m & z.seed & SQMASK] : null;
2286 }
2287
2288 /**
2289 * Tries to pop the given task from submitter's queue in common pool.
2290 */
2291 static boolean tryExternalUnpush(ForkJoinTask<?> t) {
2292 ForkJoinPool p; WorkQueue[] ws; WorkQueue q; Submitter z;
2293 ForkJoinTask<?>[] a; int m, s; long j;
2294 if ((z = submitters.get()) != null &&
2295 (p = commonPool) != null &&
2296 (ws = p.workQueues) != null &&
2297 (m = ws.length - 1) >= 0 &&
2298 (q = ws[m & z.seed & SQMASK]) != null &&
2299 (s = q.top) != q.base &&
2300 (a = q.array) != null &&
2301 U.getObjectVolatile
2302 (a, j = (((a.length - 1) & (s - 1)) << ASHIFT) + ABASE) == t &&
2303 U.compareAndSwapInt(q, QLOCK, 0, 1)) {
2304 if (q.array == a && q.top == s && // recheck
2305 U.compareAndSwapObject(a, j, t, null)) {
2306 q.top = s - 1;
2307 q.qlock = 0;
2308 return true;
2309 }
2310 q.qlock = 0;
2311 }
2312 return false;
2313 }
2314
2315 /**
2316 * Tries to pop and run local tasks within the same computation
2317 * as the given root. On failure, tries to help complete from
2318 * other queues via helpComplete.
2319 */
2320 private void externalHelpComplete(WorkQueue q, ForkJoinTask<?> root) {
2321 ForkJoinTask<?>[] a; int m;
2322 if (q != null && (a = q.array) != null && (m = (a.length - 1)) >= 0 &&
2323 root != null && root.status >= 0) {
2324 for (;;) {
2325 int s, u; Object o; CountedCompleter<?> task = null;
2326 if ((s = q.top) - q.base > 0) {
2327 long j = ((m & (s - 1)) << ASHIFT) + ABASE;
2328 if ((o = U.getObject(a, j)) != null &&
2329 (o instanceof CountedCompleter)) {
2330 CountedCompleter<?> t = (CountedCompleter<?>)o, r = t;
2331 do {
2332 if (r == root) {
2333 if (U.compareAndSwapInt(q, QLOCK, 0, 1)) {
2334 if (q.array == a && q.top == s &&
2335 U.compareAndSwapObject(a, j, t, null)) {
2336 q.top = s - 1;
2337 task = t;
2338 }
2339 q.qlock = 0;
2340 }
2341 break;
2342 }
2343 } while ((r = r.completer) != null);
2344 }
2345 }
2346 if (task != null)
2347 task.doExec();
2348 if (root.status < 0 ||
2349 (u = (int)(ctl >>> 32)) >= 0 || (u >> UAC_SHIFT) >= 0)
2350 break;
2351 if (task == null) {
2352 if (helpSignal(root, q.poolIndex) >= 0)
2353 helpComplete(root, SHARED_QUEUE);
2354 break;
2355 }
2356 }
2357 }
2358 }
2359
2360 /**
2361 * Tries to help execute or signal availability of the given task
2362 * from submitter's queue in common pool.
2363 */
2364 static void externalHelpJoin(ForkJoinTask<?> t) {
2365 // Some hard-to-avoid overlap with tryExternalUnpush
2366 ForkJoinPool p; WorkQueue[] ws; WorkQueue q, w; Submitter z;
2367 ForkJoinTask<?>[] a; int m, s, n; long j;
2368 if (t != null &&
2369 (z = submitters.get()) != null &&
2370 (p = commonPool) != null &&
2371 (ws = p.workQueues) != null &&
2372 (m = ws.length - 1) >= 0 &&
2373 (q = ws[m & z.seed & SQMASK]) != null &&
2374 (a = q.array) != null &&
2375 t.status >= 0) {
2376 if ((s = q.top) != q.base &&
2377 U.getObjectVolatile
2378 (a, j = (((a.length - 1) & (s - 1)) << ASHIFT) + ABASE) == t &&
2379 U.compareAndSwapInt(q, QLOCK, 0, 1)) {
2380 if (q.array == a && q.top == s &&
2381 U.compareAndSwapObject(a, j, t, null)) {
2382 q.top = s - 1;
2383 q.qlock = 0;
2384 t.doExec();
2385 }
2386 else
2387 q.qlock = 0;
2388 }
2389 if (t.status >= 0) {
2390 if (t instanceof CountedCompleter)
2391 p.externalHelpComplete(q, t);
2392 else
2393 p.helpSignal(t, q.poolIndex);
2394 }
2395 }
2396 }
2397
2398 /**
2399 * Restricted version of helpQuiescePool for external callers
2400 */
2401 static void externalHelpQuiescePool() {
2402 ForkJoinPool p; ForkJoinTask<?> t; WorkQueue q; int b;
2403 if ((p = commonPool) != null &&
2404 (q = p.findNonEmptyStealQueue(1)) != null &&
2405 (b = q.base) - q.top < 0 &&
2406 (t = q.pollAt(b)) != null)
2407 t.doExec();
2408 }
2409
2410 // Exported methods
2411
2412 // Constructors
2413
2414 /**
2415 * Creates a {@code ForkJoinPool} with parallelism equal to {@link
2416 * java.lang.Runtime#availableProcessors}, using the {@linkplain
2417 * #defaultForkJoinWorkerThreadFactory default thread factory},
2418 * no UncaughtExceptionHandler, and non-async LIFO processing mode.
2419 *
2420 * @throws SecurityException if a security manager exists and
2421 * the caller is not permitted to modify threads
2422 * because it does not hold {@link
2423 * java.lang.RuntimePermission}{@code ("modifyThread")}
2424 */
2425 public ForkJoinPool() {
2426 this(Runtime.getRuntime().availableProcessors(),
2427 defaultForkJoinWorkerThreadFactory, null, false);
2428 }
2429
2430 /**
2431 * Creates a {@code ForkJoinPool} with the indicated parallelism
2432 * level, the {@linkplain
2433 * #defaultForkJoinWorkerThreadFactory default thread factory},
2434 * no UncaughtExceptionHandler, and non-async LIFO processing mode.
2435 *
2436 * @param parallelism the parallelism level
2437 * @throws IllegalArgumentException if parallelism less than or
2438 * equal to zero, or greater than implementation limit
2439 * @throws SecurityException if a security manager exists and
2440 * the caller is not permitted to modify threads
2441 * because it does not hold {@link
2442 * java.lang.RuntimePermission}{@code ("modifyThread")}
2443 */
2444 public ForkJoinPool(int parallelism) {
2445 this(parallelism, defaultForkJoinWorkerThreadFactory, null, false);
2446 }
2447
2448 /**
2449 * Creates a {@code ForkJoinPool} with the given parameters.
2450 *
2451 * @param parallelism the parallelism level. For default value,
2452 * use {@link java.lang.Runtime#availableProcessors}.
2453 * @param factory the factory for creating new threads. For default value,
2454 * use {@link #defaultForkJoinWorkerThreadFactory}.
2455 * @param handler the handler for internal worker threads that
2456 * terminate due to unrecoverable errors encountered while executing
2457 * tasks. For default value, use {@code null}.
2458 * @param asyncMode if true,
2459 * establishes local first-in-first-out scheduling mode for forked
2460 * tasks that are never joined. This mode may be more appropriate
2461 * than default locally stack-based mode in applications in which
2462 * worker threads only process event-style asynchronous tasks.
2463 * For default value, use {@code false}.
2464 * @throws IllegalArgumentException if parallelism less than or
2465 * equal to zero, or greater than implementation limit
2466 * @throws NullPointerException if the factory is null
2467 * @throws SecurityException if a security manager exists and
2468 * the caller is not permitted to modify threads
2469 * because it does not hold {@link
2470 * java.lang.RuntimePermission}{@code ("modifyThread")}
2471 */
2472 public ForkJoinPool(int parallelism,
2473 ForkJoinWorkerThreadFactory factory,
2474 Thread.UncaughtExceptionHandler handler,
2475 boolean asyncMode) {
2476 checkPermission();
2477 if (factory == null)
2478 throw new NullPointerException();
2479 if (parallelism <= 0 || parallelism > MAX_CAP)
2480 throw new IllegalArgumentException();
2481 this.factory = factory;
2482 this.ueh = handler;
2483 this.config = parallelism | (asyncMode ? (FIFO_QUEUE << 16) : 0);
2484 long np = (long)(-parallelism); // offset ctl counts
2485 this.ctl = ((np << AC_SHIFT) & AC_MASK) | ((np << TC_SHIFT) & TC_MASK);
2486 int pn = nextPoolId();
2487 StringBuilder sb = new StringBuilder("ForkJoinPool-");
2488 sb.append(Integer.toString(pn));
2489 sb.append("-worker-");
2490 this.workerNamePrefix = sb.toString();
2491 }
2492
2493 /**
2494 * Constructor for common pool, suitable only for static initialization.
2495 * Basically the same as above, but uses smallest possible initial footprint.
2496 */
2497 ForkJoinPool(int parallelism, long ctl,
2498 ForkJoinWorkerThreadFactory factory,
2499 Thread.UncaughtExceptionHandler handler) {
2500 this.config = parallelism;
2501 this.ctl = ctl;
2502 this.factory = factory;
2503 this.ueh = handler;
2504 this.workerNamePrefix = "ForkJoinPool.commonPool-worker-";
2505 }
2506
2507 /**
2508 * Returns the common pool instance.
2509 *
2510 * @return the common pool instance
2511 */
2512 public static ForkJoinPool commonPool() {
2513 // assert commonPool != null : "static init error";
2514 return commonPool;
2515 }
2516
2517 // Execution methods
2518
2519 /**
2520 * Performs the given task, returning its result upon completion.
2521 * If the computation encounters an unchecked Exception or Error,
2522 * it is rethrown as the outcome of this invocation. Rethrown
2523 * exceptions behave in the same way as regular exceptions, but,
2524 * when possible, contain stack traces (as displayed for example
2525 * using {@code ex.printStackTrace()}) of both the current thread
2526 * as well as the thread actually encountering the exception;
2527 * minimally only the latter.
2528 *
2529 * @param task the task
2530 * @return the task's result
2531 * @throws NullPointerException if the task is null
2532 * @throws RejectedExecutionException if the task cannot be
2533 * scheduled for execution
2534 */
2535 public <T> T invoke(ForkJoinTask<T> task) {
2536 if (task == null)
2537 throw new NullPointerException();
2538 externalPush(task);
2539 return task.join();
2540 }
2541
2542 /**
2543 * Arranges for (asynchronous) execution of the given task.
2544 *
2545 * @param task the task
2546 * @throws NullPointerException if the task is null
2547 * @throws RejectedExecutionException if the task cannot be
2548 * scheduled for execution
2549 */
2550 public void execute(ForkJoinTask<?> task) {
2551 if (task == null)
2552 throw new NullPointerException();
2553 externalPush(task);
2554 }
2555
2556 // AbstractExecutorService methods
2557
2558 /**
2559 * @throws NullPointerException if the task is null
2560 * @throws RejectedExecutionException if the task cannot be
2561 * scheduled for execution
2562 */
2563 public void execute(Runnable task) {
2564 if (task == null)
2565 throw new NullPointerException();
2566 ForkJoinTask<?> job;
2567 if (task instanceof ForkJoinTask<?>) // avoid re-wrap
2568 job = (ForkJoinTask<?>) task;
2569 else
2570 job = new ForkJoinTask.AdaptedRunnableAction(task);
2571 externalPush(job);
2572 }
2573
2574 /**
2575 * Submits a ForkJoinTask for execution.
2576 *
2577 * @param task the task to submit
2578 * @return the task
2579 * @throws NullPointerException if the task is null
2580 * @throws RejectedExecutionException if the task cannot be
2581 * scheduled for execution
2582 */
2583 public <T> ForkJoinTask<T> submit(ForkJoinTask<T> task) {
2584 if (task == null)
2585 throw new NullPointerException();
2586 externalPush(task);
2587 return task;
2588 }
2589
2590 /**
2591 * @throws NullPointerException if the task is null
2592 * @throws RejectedExecutionException if the task cannot be
2593 * scheduled for execution
2594 */
2595 public <T> ForkJoinTask<T> submit(Callable<T> task) {
2596 ForkJoinTask<T> job = new ForkJoinTask.AdaptedCallable<T>(task);
2597 externalPush(job);
2598 return job;
2599 }
2600
2601 /**
2602 * @throws NullPointerException if the task is null
2603 * @throws RejectedExecutionException if the task cannot be
2604 * scheduled for execution
2605 */
2606 public <T> ForkJoinTask<T> submit(Runnable task, T result) {
2607 ForkJoinTask<T> job = new ForkJoinTask.AdaptedRunnable<T>(task, result);
2608 externalPush(job);
2609 return job;
2610 }
2611
2612 /**
2613 * @throws NullPointerException if the task is null
2614 * @throws RejectedExecutionException if the task cannot be
2615 * scheduled for execution
2616 */
2617 public ForkJoinTask<?> submit(Runnable task) {
2618 if (task == null)
2619 throw new NullPointerException();
2620 ForkJoinTask<?> job;
2621 if (task instanceof ForkJoinTask<?>) // avoid re-wrap
2622 job = (ForkJoinTask<?>) task;
2623 else
2624 job = new ForkJoinTask.AdaptedRunnableAction(task);
2625 externalPush(job);
2626 return job;
2627 }
2628
2629 /**
2630 * @throws NullPointerException {@inheritDoc}
2631 * @throws RejectedExecutionException {@inheritDoc}
2632 */
2633 public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) {
2634 // In previous versions of this class, this method constructed
2635 // a task to run ForkJoinTask.invokeAll, but now external
2636 // invocation of multiple tasks is at least as efficient.
2637 List<ForkJoinTask<T>> fs = new ArrayList<ForkJoinTask<T>>(tasks.size());
2638 // Workaround needed because method wasn't declared with
2639 // wildcards in return type but should have been.
2640 @SuppressWarnings({"unchecked", "rawtypes"})
2641 List<Future<T>> futures = (List<Future<T>>) (List) fs;
2642
2643 boolean done = false;
2644 try {
2645 for (Callable<T> t : tasks) {
2646 ForkJoinTask<T> f = new ForkJoinTask.AdaptedCallable<T>(t);
2647 externalPush(f);
2648 fs.add(f);
2649 }
2650 for (ForkJoinTask<T> f : fs)
2651 f.quietlyJoin();
2652 done = true;
2653 return futures;
2654 } finally {
2655 if (!done)
2656 for (ForkJoinTask<T> f : fs)
2657 f.cancel(false);
2658 }
2659 }
2660
2661 /**
2662 * Returns the factory used for constructing new workers.
2663 *
2664 * @return the factory used for constructing new workers
2665 */
2666 public ForkJoinWorkerThreadFactory getFactory() {
2667 return factory;
2668 }
2669
2670 /**
2671 * Returns the handler for internal worker threads that terminate
2672 * due to unrecoverable errors encountered while executing tasks.
2673 *
2674 * @return the handler, or {@code null} if none
2675 */
2676 public Thread.UncaughtExceptionHandler getUncaughtExceptionHandler() {
2677 return ueh;
2678 }
2679
2680 /**
2681 * Returns the targeted parallelism level of this pool.
2682 *
2683 * @return the targeted parallelism level of this pool
2684 */
2685 public int getParallelism() {
2686 return config & SMASK;
2687 }
2688
2689 /**
2690 * Returns the targeted parallelism level of the common pool.
2691 *
2692 * @return the targeted parallelism level of the common pool
2693 */
2694 public static int getCommonPoolParallelism() {
2695 return commonPoolParallelism;
2696 }
2697
2698 /**
2699 * Returns the number of worker threads that have started but not
2700 * yet terminated. The result returned by this method may differ
2701 * from {@link #getParallelism} when threads are created to
2702 * maintain parallelism when others are cooperatively blocked.
2703 *
2704 * @return the number of worker threads
2705 */
2706 public int getPoolSize() {
2707 return (config & SMASK) + (short)(ctl >>> TC_SHIFT);
2708 }
2709
2710 /**
2711 * Returns {@code true} if this pool uses local first-in-first-out
2712 * scheduling mode for forked tasks that are never joined.
2713 *
2714 * @return {@code true} if this pool uses async mode
2715 */
2716 public boolean getAsyncMode() {
2717 return (config >>> 16) == FIFO_QUEUE;
2718 }
2719
2720 /**
2721 * Returns an estimate of the number of worker threads that are
2722 * not blocked waiting to join tasks or for other managed
2723 * synchronization. This method may overestimate the
2724 * number of running threads.
2725 *
2726 * @return the number of worker threads
2727 */
2728 public int getRunningThreadCount() {
2729 int rc = 0;
2730 WorkQueue[] ws; WorkQueue w;
2731 if ((ws = workQueues) != null) {
2732 for (int i = 1; i < ws.length; i += 2) {
2733 if ((w = ws[i]) != null && w.isApparentlyUnblocked())
2734 ++rc;
2735 }
2736 }
2737 return rc;
2738 }
2739
2740 /**
2741 * Returns an estimate of the number of threads that are currently
2742 * stealing or executing tasks. This method may overestimate the
2743 * number of active threads.
2744 *
2745 * @return the number of active threads
2746 */
2747 public int getActiveThreadCount() {
2748 int r = (config & SMASK) + (int)(ctl >> AC_SHIFT);
2749 return (r <= 0) ? 0 : r; // suppress momentarily negative values
2750 }
2751
2752 /**
2753 * Returns {@code true} if all worker threads are currently idle.
2754 * An idle worker is one that cannot obtain a task to execute
2755 * because none are available to steal from other threads, and
2756 * there are no pending submissions to the pool. This method is
2757 * conservative; it might not return {@code true} immediately upon
2758 * idleness of all threads, but will eventually become true if
2759 * threads remain inactive.
2760 *
2761 * @return {@code true} if all threads are currently idle
2762 */
2763 public boolean isQuiescent() {
2764 return (int)(ctl >> AC_SHIFT) + (config & SMASK) == 0;
2765 }
2766
2767 /**
2768 * Returns an estimate of the total number of tasks stolen from
2769 * one thread's work queue by another. The reported value
2770 * underestimates the actual total number of steals when the pool
2771 * is not quiescent. This value may be useful for monitoring and
2772 * tuning fork/join programs: in general, steal counts should be
2773 * high enough to keep threads busy, but low enough to avoid
2774 * overhead and contention across threads.
2775 *
2776 * @return the number of steals
2777 */
2778 public long getStealCount() {
2779 long count = stealCount;
2780 WorkQueue[] ws; WorkQueue w;
2781 if ((ws = workQueues) != null) {
2782 for (int i = 1; i < ws.length; i += 2) {
2783 if ((w = ws[i]) != null)
2784 count += w.nsteals;
2785 }
2786 }
2787 return count;
2788 }
2789
2790 /**
2791 * Returns an estimate of the total number of tasks currently held
2792 * in queues by worker threads (but not including tasks submitted
2793 * to the pool that have not begun executing). This value is only
2794 * an approximation, obtained by iterating across all threads in
2795 * the pool. This method may be useful for tuning task
2796 * granularities.
2797 *
2798 * @return the number of queued tasks
2799 */
2800 public long getQueuedTaskCount() {
2801 long count = 0;
2802 WorkQueue[] ws; WorkQueue w;
2803 if ((ws = workQueues) != null) {
2804 for (int i = 1; i < ws.length; i += 2) {
2805 if ((w = ws[i]) != null)
2806 count += w.queueSize();
2807 }
2808 }
2809 return count;
2810 }
2811
2812 /**
2813 * Returns an estimate of the number of tasks submitted to this
2814 * pool that have not yet begun executing. This method may take
2815 * time proportional to the number of submissions.
2816 *
2817 * @return the number of queued submissions
2818 */
2819 public int getQueuedSubmissionCount() {
2820 int count = 0;
2821 WorkQueue[] ws; WorkQueue w;
2822 if ((ws = workQueues) != null) {
2823 for (int i = 0; i < ws.length; i += 2) {
2824 if ((w = ws[i]) != null)
2825 count += w.queueSize();
2826 }
2827 }
2828 return count;
2829 }
2830
2831 /**
2832 * Returns {@code true} if there are any tasks submitted to this
2833 * pool that have not yet begun executing.
2834 *
2835 * @return {@code true} if there are any queued submissions
2836 */
2837 public boolean hasQueuedSubmissions() {
2838 WorkQueue[] ws; WorkQueue w;
2839 if ((ws = workQueues) != null) {
2840 for (int i = 0; i < ws.length; i += 2) {
2841 if ((w = ws[i]) != null && w.queueSize() != 0)
2842 return true;
2843 }
2844 }
2845 return false;
2846 }
2847
2848 /**
2849 * Removes and returns the next unexecuted submission if one is
2850 * available. This method may be useful in extensions to this
2851 * class that re-assign work in systems with multiple pools.
2852 *
2853 * @return the next submission, or {@code null} if none
2854 */
2855 protected ForkJoinTask<?> pollSubmission() {
2856 WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t;
2857 if ((ws = workQueues) != null) {
2858 for (int i = 0; i < ws.length; i += 2) {
2859 if ((w = ws[i]) != null && (t = w.poll()) != null)
2860 return t;
2861 }
2862 }
2863 return null;
2864 }
2865
2866 /**
2867 * Removes all available unexecuted submitted and forked tasks
2868 * from scheduling queues and adds them to the given collection,
2869 * without altering their execution status. These may include
2870 * artificially generated or wrapped tasks. This method is
2871 * designed to be invoked only when the pool is known to be
2872 * quiescent. Invocations at other times may not remove all
2873 * tasks. A failure encountered while attempting to add elements
2874 * to collection {@code c} may result in elements being in
2875 * neither, either or both collections when the associated
2876 * exception is thrown. The behavior of this operation is
2877 * undefined if the specified collection is modified while the
2878 * operation is in progress.
2879 *
2880 * @param c the collection to transfer elements into
2881 * @return the number of elements transferred
2882 */
2883 protected int drainTasksTo(Collection<? super ForkJoinTask<?>> c) {
2884 int count = 0;
2885 WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t;
2886 if ((ws = workQueues) != null) {
2887 for (int i = 0; i < ws.length; ++i) {
2888 if ((w = ws[i]) != null) {
2889 while ((t = w.poll()) != null) {
2890 c.add(t);
2891 ++count;
2892 }
2893 }
2894 }
2895 }
2896 return count;
2897 }
2898
2899 /**
2900 * Returns a string identifying this pool, as well as its state,
2901 * including indications of run state, parallelism level, and
2902 * worker and task counts.
2903 *
2904 * @return a string identifying this pool, as well as its state
2905 */
2906 public String toString() {
2907 // Use a single pass through workQueues to collect counts
2908 long qt = 0L, qs = 0L; int rc = 0;
2909 long st = stealCount;
2910 long c = ctl;
2911 WorkQueue[] ws; WorkQueue w;
2912 if ((ws = workQueues) != null) {
2913 for (int i = 0; i < ws.length; ++i) {
2914 if ((w = ws[i]) != null) {
2915 int size = w.queueSize();
2916 if ((i & 1) == 0)
2917 qs += size;
2918 else {
2919 qt += size;
2920 st += w.nsteals;
2921 if (w.isApparentlyUnblocked())
2922 ++rc;
2923 }
2924 }
2925 }
2926 }
2927 int pc = (config & SMASK);
2928 int tc = pc + (short)(c >>> TC_SHIFT);
2929 int ac = pc + (int)(c >> AC_SHIFT);
2930 if (ac < 0) // ignore transient negative
2931 ac = 0;
2932 String level;
2933 if ((c & STOP_BIT) != 0)
2934 level = (tc == 0) ? "Terminated" : "Terminating";
2935 else
2936 level = plock < 0 ? "Shutting down" : "Running";
2937 return super.toString() +
2938 "[" + level +
2939 ", parallelism = " + pc +
2940 ", size = " + tc +
2941 ", active = " + ac +
2942 ", running = " + rc +
2943 ", steals = " + st +
2944 ", tasks = " + qt +
2945 ", submissions = " + qs +
2946 "]";
2947 }
2948
2949 /**
2950 * Possibly initiates an orderly shutdown in which previously
2951 * submitted tasks are executed, but no new tasks will be
2952 * accepted. Invocation has no effect on execution state if this
2953 * is the {@link #commonPool}, and no additional effect if
2954 * already shut down. Tasks that are in the process of being
2955 * submitted concurrently during the course of this method may or
2956 * may not be rejected.
2957 *
2958 * @throws SecurityException if a security manager exists and
2959 * the caller is not permitted to modify threads
2960 * because it does not hold {@link
2961 * java.lang.RuntimePermission}{@code ("modifyThread")}
2962 */
2963 public void shutdown() {
2964 checkPermission();
2965 tryTerminate(false, true);
2966 }
2967
2968 /**
2969 * Possibly attempts to cancel and/or stop all tasks, and reject
2970 * all subsequently submitted tasks. Invocation has no effect on
2971 * execution state if this is the {@link #commonPool}, and no
2972 * additional effect if already shut down. Otherwise, tasks that
2973 * are in the process of being submitted or executed concurrently
2974 * during the course of this method may or may not be
2975 * rejected. This method cancels both existing and unexecuted
2976 * tasks, in order to permit termination in the presence of task
2977 * dependencies. So the method always returns an empty list
2978 * (unlike the case for some other Executors).
2979 *
2980 * @return an empty list
2981 * @throws SecurityException if a security manager exists and
2982 * the caller is not permitted to modify threads
2983 * because it does not hold {@link
2984 * java.lang.RuntimePermission}{@code ("modifyThread")}
2985 */
2986 public List<Runnable> shutdownNow() {
2987 checkPermission();
2988 tryTerminate(true, true);
2989 return Collections.emptyList();
2990 }
2991
2992 /**
2993 * Returns {@code true} if all tasks have completed following shut down.
2994 *
2995 * @return {@code true} if all tasks have completed following shut down
2996 */
2997 public boolean isTerminated() {
2998 long c = ctl;
2999 return ((c & STOP_BIT) != 0L &&
3000 (short)(c >>> TC_SHIFT) == -(config & SMASK));
3001 }
3002
3003 /**
3004 * Returns {@code true} if the process of termination has
3005 * commenced but not yet completed. This method may be useful for
3006 * debugging. A return of {@code true} reported a sufficient
3007 * period after shutdown may indicate that submitted tasks have
3008 * ignored or suppressed interruption, or are waiting for IO,
3009 * causing this executor not to properly terminate. (See the
3010 * advisory notes for class {@link ForkJoinTask} stating that
3011 * tasks should not normally entail blocking operations. But if
3012 * they do, they must abort them on interrupt.)
3013 *
3014 * @return {@code true} if terminating but not yet terminated
3015 */
3016 public boolean isTerminating() {
3017 long c = ctl;
3018 return ((c & STOP_BIT) != 0L &&
3019 (short)(c >>> TC_SHIFT) != -(config & SMASK));
3020 }
3021
3022 /**
3023 * Returns {@code true} if this pool has been shut down.
3024 *
3025 * @return {@code true} if this pool has been shut down
3026 */
3027 public boolean isShutdown() {
3028 return plock < 0;
3029 }
3030
3031 /**
3032 * Blocks until all tasks have completed execution after a
3033 * shutdown request, or the timeout occurs, or the current thread
3034 * is interrupted, whichever happens first. Note that the {@link
3035 * #commonPool()} never terminates until program shutdown so
3036 * this method will always time out.
3037 *
3038 * @param timeout the maximum time to wait
3039 * @param unit the time unit of the timeout argument
3040 * @return {@code true} if this executor terminated and
3041 * {@code false} if the timeout elapsed before termination
3042 * @throws InterruptedException if interrupted while waiting
3043 */
3044 public boolean awaitTermination(long timeout, TimeUnit unit)
3045 throws InterruptedException {
3046 long nanos = unit.toNanos(timeout);
3047 if (isTerminated())
3048 return true;
3049 long startTime = System.nanoTime();
3050 boolean terminated = false;
3051 synchronized (this) {
3052 for (long waitTime = nanos, millis = 0L;;) {
3053 if (terminated = isTerminated() ||
3054 waitTime <= 0L ||
3055 (millis = unit.toMillis(waitTime)) <= 0L)
3056 break;
3057 wait(millis);
3058 waitTime = nanos - (System.nanoTime() - startTime);
3059 }
3060 }
3061 return terminated;
3062 }
3063
3064 /**
3065 * Interface for extending managed parallelism for tasks running
3066 * in {@link ForkJoinPool}s.
3067 *
3068 * <p>A {@code ManagedBlocker} provides two methods. Method
3069 * {@code isReleasable} must return {@code true} if blocking is
3070 * not necessary. Method {@code block} blocks the current thread
3071 * if necessary (perhaps internally invoking {@code isReleasable}
3072 * before actually blocking). These actions are performed by any
3073 * thread invoking {@link ForkJoinPool#managedBlock}. The
3074 * unusual methods in this API accommodate synchronizers that may,
3075 * but don't usually, block for long periods. Similarly, they
3076 * allow more efficient internal handling of cases in which
3077 * additional workers may be, but usually are not, needed to
3078 * ensure sufficient parallelism. Toward this end,
3079 * implementations of method {@code isReleasable} must be amenable
3080 * to repeated invocation.
3081 *
3082 * <p>For example, here is a ManagedBlocker based on a
3083 * ReentrantLock:
3084 * <pre> {@code
3085 * class ManagedLocker implements ManagedBlocker {
3086 * final ReentrantLock lock;
3087 * boolean hasLock = false;
3088 * ManagedLocker(ReentrantLock lock) { this.lock = lock; }
3089 * public boolean block() {
3090 * if (!hasLock)
3091 * lock.lock();
3092 * return true;
3093 * }
3094 * public boolean isReleasable() {
3095 * return hasLock || (hasLock = lock.tryLock());
3096 * }
3097 * }}</pre>
3098 *
3099 * <p>Here is a class that possibly blocks waiting for an
3100 * item on a given queue:
3101 * <pre> {@code
3102 * class QueueTaker<E> implements ManagedBlocker {
3103 * final BlockingQueue<E> queue;
3104 * volatile E item = null;
3105 * QueueTaker(BlockingQueue<E> q) { this.queue = q; }
3106 * public boolean block() throws InterruptedException {
3107 * if (item == null)
3108 * item = queue.take();
3109 * return true;
3110 * }
3111 * public boolean isReleasable() {
3112 * return item != null || (item = queue.poll()) != null;
3113 * }
3114 * public E getItem() { // call after pool.managedBlock completes
3115 * return item;
3116 * }
3117 * }}</pre>
3118 */
3119 public static interface ManagedBlocker {
3120 /**
3121 * Possibly blocks the current thread, for example waiting for
3122 * a lock or condition.
3123 *
3124 * @return {@code true} if no additional blocking is necessary
3125 * (i.e., if isReleasable would return true)
3126 * @throws InterruptedException if interrupted while waiting
3127 * (the method is not required to do so, but is allowed to)
3128 */
3129 boolean block() throws InterruptedException;
3130
3131 /**
3132 * Returns {@code true} if blocking is unnecessary.
3133 */
3134 boolean isReleasable();
3135 }
3136
3137 /**
3138 * Blocks in accord with the given blocker. If the current thread
3139 * is a {@link ForkJoinWorkerThread}, this method possibly
3140 * arranges for a spare thread to be activated if necessary to
3141 * ensure sufficient parallelism while the current thread is blocked.
3142 *
3143 * <p>If the caller is not a {@link ForkJoinTask}, this method is
3144 * behaviorally equivalent to
3145 * <pre> {@code
3146 * while (!blocker.isReleasable())
3147 * if (blocker.block())
3148 * return;
3149 * }</pre>
3150 *
3151 * If the caller is a {@code ForkJoinTask}, then the pool may
3152 * first be expanded to ensure parallelism, and later adjusted.
3153 *
3154 * @param blocker the blocker
3155 * @throws InterruptedException if blocker.block did so
3156 */
3157 public static void managedBlock(ManagedBlocker blocker)
3158 throws InterruptedException {
3159 Thread t = Thread.currentThread();
3160 if (t instanceof ForkJoinWorkerThread) {
3161 ForkJoinPool p = ((ForkJoinWorkerThread)t).pool;
3162 while (!blocker.isReleasable()) { // variant of helpSignal
3163 WorkQueue[] ws; WorkQueue q; int m, n, u;
3164 if ((ws = p.workQueues) != null && (m = ws.length - 1) >= 0) {
3165 for (int i = 0; i <= m; ++i) {
3166 if (blocker.isReleasable())
3167 return;
3168 if ((q = ws[i]) != null && (n = q.queueSize()) > 0) {
3169 p.signalWork(q, n);
3170 if ((u = (int)(p.ctl >>> 32)) >= 0 ||
3171 (u >> UAC_SHIFT) >= 0)
3172 break;
3173 }
3174 }
3175 }
3176 if (p.tryCompensate()) {
3177 try {
3178 do {} while (!blocker.isReleasable() &&
3179 !blocker.block());
3180 } finally {
3181 p.incrementActiveCount();
3182 }
3183 break;
3184 }
3185 }
3186 }
3187 else {
3188 do {} while (!blocker.isReleasable() &&
3189 !blocker.block());
3190 }
3191 }
3192
3193 // AbstractExecutorService overrides. These rely on undocumented
3194 // fact that ForkJoinTask.adapt returns ForkJoinTasks that also
3195 // implement RunnableFuture.
3196
3197 protected <T> RunnableFuture<T> newTaskFor(Runnable runnable, T value) {
3198 return new ForkJoinTask.AdaptedRunnable<T>(runnable, value);
3199 }
3200
3201 protected <T> RunnableFuture<T> newTaskFor(Callable<T> callable) {
3202 return new ForkJoinTask.AdaptedCallable<T>(callable);
3203 }
3204
3205 // Unsafe mechanics
3206 private static final sun.misc.Unsafe U;
3207 private static final long CTL;
3208 private static final long PARKBLOCKER;
3209 private static final int ABASE;
3210 private static final int ASHIFT;
3211 private static final long STEALCOUNT;
3212 private static final long PLOCK;
3213 private static final long INDEXSEED;
3214 private static final long QLOCK;
3215
3216 static {
3217 int s; // initialize field offsets for CAS etc
3218 try {
3219 U = getUnsafe();
3220 Class<?> k = ForkJoinPool.class;
3221 CTL = U.objectFieldOffset
3222 (k.getDeclaredField("ctl"));
3223 STEALCOUNT = U.objectFieldOffset
3224 (k.getDeclaredField("stealCount"));
3225 PLOCK = U.objectFieldOffset
3226 (k.getDeclaredField("plock"));
3227 INDEXSEED = U.objectFieldOffset
3228 (k.getDeclaredField("indexSeed"));
3229 Class<?> tk = Thread.class;
3230 PARKBLOCKER = U.objectFieldOffset
3231 (tk.getDeclaredField("parkBlocker"));
3232 Class<?> wk = WorkQueue.class;
3233 QLOCK = U.objectFieldOffset
3234 (wk.getDeclaredField("qlock"));
3235 Class<?> ak = ForkJoinTask[].class;
3236 ABASE = U.arrayBaseOffset(ak);
3237 s = U.arrayIndexScale(ak);
3238 ASHIFT = 31 - Integer.numberOfLeadingZeros(s);
3239 } catch (Exception e) {
3240 throw new Error(e);
3241 }
3242 if ((s & (s-1)) != 0)
3243 throw new Error("data type scale not a power of two");
3244
3245 submitters = new ThreadLocal<Submitter>();
3246 ForkJoinWorkerThreadFactory fac = defaultForkJoinWorkerThreadFactory =
3247 new DefaultForkJoinWorkerThreadFactory();
3248 /*
3249 * Establish common pool parameters. For extra caution,
3250 * computations to set up common pool state are here; the
3251 * constructor just assigns these values to fields.
3252 */
3253
3254 int par = 0;
3255 Thread.UncaughtExceptionHandler handler = null;
3256 try { // TBD: limit or report ignored exceptions?
3257 String pp = System.getProperty
3258 ("java.util.concurrent.ForkJoinPool.common.parallelism");
3259 String hp = System.getProperty
3260 ("java.util.concurrent.ForkJoinPool.common.exceptionHandler");
3261 String fp = System.getProperty
3262 ("java.util.concurrent.ForkJoinPool.common.threadFactory");
3263 if (fp != null)
3264 fac = ((ForkJoinWorkerThreadFactory)ClassLoader.
3265 getSystemClassLoader().loadClass(fp).newInstance());
3266 if (hp != null)
3267 handler = ((Thread.UncaughtExceptionHandler)ClassLoader.
3268 getSystemClassLoader().loadClass(hp).newInstance());
3269 if (pp != null)
3270 par = Integer.parseInt(pp);
3271 } catch (Exception ignore) {
3272 }
3273
3274 if (par <= 0)
3275 par = Runtime.getRuntime().availableProcessors();
3276 if (par > MAX_CAP)
3277 par = MAX_CAP;
3278 commonPoolParallelism = par;
3279 long np = (long)(-par); // precompute initial ctl value
3280 long ct = ((np << AC_SHIFT) & AC_MASK) | ((np << TC_SHIFT) & TC_MASK);
3281
3282 commonPool = new ForkJoinPool(par, ct, fac, handler);
3283 modifyThreadPermission = new RuntimePermission("modifyThread");
3284 }
3285
3286 /**
3287 * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
3288 * Replace with a simple call to Unsafe.getUnsafe when integrating
3289 * into a jdk.
3290 *
3291 * @return a sun.misc.Unsafe
3292 */
3293 private static sun.misc.Unsafe getUnsafe() {
3294 try {
3295 return sun.misc.Unsafe.getUnsafe();
3296 } catch (SecurityException se) {
3297 try {
3298 return java.security.AccessController.doPrivileged
3299 (new java.security
3300 .PrivilegedExceptionAction<sun.misc.Unsafe>() {
3301 public sun.misc.Unsafe run() throws Exception {
3302 java.lang.reflect.Field f = sun.misc
3303 .Unsafe.class.getDeclaredField("theUnsafe");
3304 f.setAccessible(true);
3305 return (sun.misc.Unsafe) f.get(null);
3306 }});
3307 } catch (java.security.PrivilegedActionException e) {
3308 throw new RuntimeException("Could not initialize intrinsics",
3309 e.getCause());
3310 }
3311 }
3312 }
3313
3314 }