ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/jsr166e/ForkJoinPool.java
Revision: 1.17
Committed: Mon Nov 19 01:04:24 2012 UTC (11 years, 6 months ago) by jsr166
Branch: MAIN
Changes since 1.16: +1 -1 lines
Log Message:
javadoc style

File Contents

# Content
1 /*
2 * Written by Doug Lea with assistance from members of JCP JSR-166
3 * Expert Group and released to the public domain, as explained at
4 * http://creativecommons.org/publicdomain/zero/1.0/
5 */
6
7 package jsr166e;
8
9 import java.util.ArrayList;
10 import java.util.Arrays;
11 import java.util.Collection;
12 import java.util.Collections;
13 import java.util.List;
14 import java.util.concurrent.AbstractExecutorService;
15 import java.util.concurrent.Callable;
16 import java.util.concurrent.ExecutorService;
17 import java.util.concurrent.Future;
18 import java.util.concurrent.RejectedExecutionException;
19 import java.util.concurrent.RunnableFuture;
20 import java.util.concurrent.ThreadLocalRandom;
21 import java.util.concurrent.TimeUnit;
22
23 /**
24 * An {@link ExecutorService} for running {@link ForkJoinTask}s.
25 * A {@code ForkJoinPool} provides the entry point for submissions
26 * from non-{@code ForkJoinTask} clients, as well as management and
27 * monitoring operations.
28 *
29 * <p>A {@code ForkJoinPool} differs from other kinds of {@link
30 * ExecutorService} mainly by virtue of employing
31 * <em>work-stealing</em>: all threads in the pool attempt to find and
32 * execute tasks submitted to the pool and/or created by other active
33 * tasks (eventually blocking waiting for work if none exist). This
34 * enables efficient processing when most tasks spawn other subtasks
35 * (as do most {@code ForkJoinTask}s), as well as when many small
36 * tasks are submitted to the pool from external clients. Especially
37 * when setting <em>asyncMode</em> to true in constructors, {@code
38 * ForkJoinPool}s may also be appropriate for use with event-style
39 * tasks that are never joined.
40 *
41 * <p>A static {@link #commonPool} is available and appropriate for
42 * most applications. The common pool is used by any ForkJoinTask that
43 * is not explicitly submitted to a specified pool. Using the common
44 * pool normally reduces resource usage (its threads are slowly
45 * reclaimed during periods of non-use, and reinstated upon subsequent
46 * use).
47 *
48 * <p>For applications that require separate or custom pools, a {@code
49 * ForkJoinPool} may be constructed with a given target parallelism
50 * level; by default, equal to the number of available processors. The
51 * pool attempts to maintain enough active (or available) threads by
52 * dynamically adding, suspending, or resuming internal worker
53 * threads, even if some tasks are stalled waiting to join
54 * others. However, no such adjustments are guaranteed in the face of
55 * blocked IO or other unmanaged synchronization. The nested {@link
56 * ManagedBlocker} interface enables extension of the kinds of
57 * synchronization accommodated.
58 *
59 * <p>In addition to execution and lifecycle control methods, this
60 * class provides status check methods (for example
61 * {@link #getStealCount}) that are intended to aid in developing,
62 * tuning, and monitoring fork/join applications. Also, method
63 * {@link #toString} returns indications of pool state in a
64 * convenient form for informal monitoring.
65 *
66 * <p>As is the case with other ExecutorServices, there are three
67 * main task execution methods summarized in the following table.
68 * These are designed to be used primarily by clients not already
69 * engaged in fork/join computations in the current pool. The main
70 * forms of these methods accept instances of {@code ForkJoinTask},
71 * but overloaded forms also allow mixed execution of plain {@code
72 * Runnable}- or {@code Callable}- based activities as well. However,
73 * tasks that are already executing in a pool should normally instead
74 * use the within-computation forms listed in the table unless using
75 * async event-style tasks that are not usually joined, in which case
76 * there is little difference among choice of methods.
77 *
78 * <table BORDER CELLPADDING=3 CELLSPACING=1>
79 * <tr>
80 * <td></td>
81 * <td ALIGN=CENTER> <b>Call from non-fork/join clients</b></td>
82 * <td ALIGN=CENTER> <b>Call from within fork/join computations</b></td>
83 * </tr>
84 * <tr>
85 * <td> <b>Arrange async execution</td>
86 * <td> {@link #execute(ForkJoinTask)}</td>
87 * <td> {@link ForkJoinTask#fork}</td>
88 * </tr>
89 * <tr>
90 * <td> <b>Await and obtain result</td>
91 * <td> {@link #invoke(ForkJoinTask)}</td>
92 * <td> {@link ForkJoinTask#invoke}</td>
93 * </tr>
94 * <tr>
95 * <td> <b>Arrange exec and obtain Future</td>
96 * <td> {@link #submit(ForkJoinTask)}</td>
97 * <td> {@link ForkJoinTask#fork} (ForkJoinTasks <em>are</em> Futures)</td>
98 * </tr>
99 * </table>
100 *
101 * <p>The common pool is by default constructed with default
102 * parameters, but these may be controlled by setting three {@link
103 * System#getProperty properties} with prefix {@code
104 * java.util.concurrent.ForkJoinPool.common}: {@code parallelism} --
105 * an integer greater than zero, {@code threadFactory} -- the class
106 * name of a {@link ForkJoinWorkerThreadFactory}, and {@code
107 * exceptionHandler} -- the class name of a {@link
108 * java.lang.Thread.UncaughtExceptionHandler
109 * Thread.UncaughtExceptionHandler}. Upon any error in establishing
110 * these settings, default parameters are used.
111 *
112 * <p><b>Implementation notes</b>: This implementation restricts the
113 * maximum number of running threads to 32767. Attempts to create
114 * pools with greater than the maximum number result in
115 * {@code IllegalArgumentException}.
116 *
117 * <p>This implementation rejects submitted tasks (that is, by throwing
118 * {@link RejectedExecutionException}) only when the pool is shut down
119 * or internal resources have been exhausted.
120 *
121 * @since 1.7
122 * @author Doug Lea
123 */
124 public class ForkJoinPool extends AbstractExecutorService {
125
126 /*
127 * Implementation Overview
128 *
129 * This class and its nested classes provide the main
130 * functionality and control for a set of worker threads:
131 * Submissions from non-FJ threads enter into submission queues.
132 * Workers take these tasks and typically split them into subtasks
133 * that may be stolen by other workers. Preference rules give
134 * first priority to processing tasks from their own queues (LIFO
135 * or FIFO, depending on mode), then to randomized FIFO steals of
136 * tasks in other queues.
137 *
138 * WorkQueues
139 * ==========
140 *
141 * Most operations occur within work-stealing queues (in nested
142 * class WorkQueue). These are special forms of Deques that
143 * support only three of the four possible end-operations -- push,
144 * pop, and poll (aka steal), under the further constraints that
145 * push and pop are called only from the owning thread (or, as
146 * extended here, under a lock), while poll may be called from
147 * other threads. (If you are unfamiliar with them, you probably
148 * want to read Herlihy and Shavit's book "The Art of
149 * Multiprocessor programming", chapter 16 describing these in
150 * more detail before proceeding.) The main work-stealing queue
151 * design is roughly similar to those in the papers "Dynamic
152 * Circular Work-Stealing Deque" by Chase and Lev, SPAA 2005
153 * (http://research.sun.com/scalable/pubs/index.html) and
154 * "Idempotent work stealing" by Michael, Saraswat, and Vechev,
155 * PPoPP 2009 (http://portal.acm.org/citation.cfm?id=1504186).
156 * The main differences ultimately stem from GC requirements that
157 * we null out taken slots as soon as we can, to maintain as small
158 * a footprint as possible even in programs generating huge
159 * numbers of tasks. To accomplish this, we shift the CAS
160 * arbitrating pop vs poll (steal) from being on the indices
161 * ("base" and "top") to the slots themselves. So, both a
162 * successful pop and poll mainly entail a CAS of a slot from
163 * non-null to null. Because we rely on CASes of references, we
164 * do not need tag bits on base or top. They are simple ints as
165 * used in any circular array-based queue (see for example
166 * ArrayDeque). Updates to the indices must still be ordered in a
167 * way that guarantees that top == base means the queue is empty,
168 * but otherwise may err on the side of possibly making the queue
169 * appear nonempty when a push, pop, or poll have not fully
170 * committed. Note that this means that the poll operation,
171 * considered individually, is not wait-free. One thief cannot
172 * successfully continue until another in-progress one (or, if
173 * previously empty, a push) completes. However, in the
174 * aggregate, we ensure at least probabilistic non-blockingness.
175 * If an attempted steal fails, a thief always chooses a different
176 * random victim target to try next. So, in order for one thief to
177 * progress, it suffices for any in-progress poll or new push on
178 * any empty queue to complete. (This is why we normally use
179 * method pollAt and its variants that try once at the apparent
180 * base index, else consider alternative actions, rather than
181 * method poll.)
182 *
183 * This approach also enables support of a user mode in which local
184 * task processing is in FIFO, not LIFO order, simply by using
185 * poll rather than pop. This can be useful in message-passing
186 * frameworks in which tasks are never joined. However neither
187 * mode considers affinities, loads, cache localities, etc, so
188 * rarely provide the best possible performance on a given
189 * machine, but portably provide good throughput by averaging over
190 * these factors. (Further, even if we did try to use such
191 * information, we do not usually have a basis for exploiting it.
192 * For example, some sets of tasks profit from cache affinities,
193 * but others are harmed by cache pollution effects.)
194 *
195 * WorkQueues are also used in a similar way for tasks submitted
196 * to the pool. We cannot mix these tasks in the same queues used
197 * for work-stealing (this would contaminate lifo/fifo
198 * processing). Instead, we randomly associate submission queues
199 * with submitting threads, using a form of hashing. The
200 * ThreadLocal Submitter class contains a value initially used as
201 * a hash code for choosing existing queues, but may be randomly
202 * repositioned upon contention with other submitters. In
203 * essence, submitters act like workers except that they are
204 * restricted to executing local tasks that they submitted (or in
205 * the case of CountedCompleters, others with the same root task).
206 * However, because most shared/external queue operations are more
207 * expensive than internal, and because, at steady state, external
208 * submitters will compete for CPU with workers, ForkJoinTask.join
209 * and related methods disable them from repeatedly helping to
210 * process tasks if all workers are active. Insertion of tasks in
211 * shared mode requires a lock (mainly to protect in the case of
212 * resizing) but we use only a simple spinlock (using bits in
213 * field qlock), because submitters encountering a busy queue move
214 * on to try or create other queues -- they block only when
215 * creating and registering new queues.
216 *
217 * Management
218 * ==========
219 *
220 * The main throughput advantages of work-stealing stem from
221 * decentralized control -- workers mostly take tasks from
222 * themselves or each other. We cannot negate this in the
223 * implementation of other management responsibilities. The main
224 * tactic for avoiding bottlenecks is packing nearly all
225 * essentially atomic control state into two volatile variables
226 * that are by far most often read (not written) as status and
227 * consistency checks.
228 *
229 * Field "ctl" contains 64 bits holding all the information needed
230 * to atomically decide to add, inactivate, enqueue (on an event
231 * queue), dequeue, and/or re-activate workers. To enable this
232 * packing, we restrict maximum parallelism to (1<<15)-1 (which is
233 * far in excess of normal operating range) to allow ids, counts,
234 * and their negations (used for thresholding) to fit into 16bit
235 * fields.
236 *
237 * Field "plock" is a form of sequence lock with a saturating
238 * shutdown bit (similarly for per-queue "qlocks"), mainly
239 * protecting updates to the workQueues array, as well as to
240 * enable shutdown. When used as a lock, it is normally only very
241 * briefly held, so is nearly always available after at most a
242 * brief spin, but we use a monitor-based backup strategy to
243 * blocking when needed.
244 *
245 * Recording WorkQueues. WorkQueues are recorded in the
246 * "workQueues" array that is created upon first use and expanded
247 * if necessary. Updates to the array while recording new workers
248 * and unrecording terminated ones are protected from each other
249 * by a lock but the array is otherwise concurrently readable, and
250 * accessed directly. To simplify index-based operations, the
251 * array size is always a power of two, and all readers must
252 * tolerate null slots. Worker queues are at odd indices Shared
253 * (submission) queues are at even indices, up to a maximum of 64
254 * slots, to limit growth even if array needs to expand to add
255 * more workers. Grouping them together in this way simplifies and
256 * speeds up task scanning.
257 *
258 * All worker thread creation is on-demand, triggered by task
259 * submissions, replacement of terminated workers, and/or
260 * compensation for blocked workers. However, all other support
261 * code is set up to work with other policies. To ensure that we
262 * do not hold on to worker references that would prevent GC, ALL
263 * accesses to workQueues are via indices into the workQueues
264 * array (which is one source of some of the messy code
265 * constructions here). In essence, the workQueues array serves as
266 * a weak reference mechanism. Thus for example the wait queue
267 * field of ctl stores indices, not references. Access to the
268 * workQueues in associated methods (for example signalWork) must
269 * both index-check and null-check the IDs. All such accesses
270 * ignore bad IDs by returning out early from what they are doing,
271 * since this can only be associated with termination, in which
272 * case it is OK to give up. All uses of the workQueues array
273 * also check that it is non-null (even if previously
274 * non-null). This allows nulling during termination, which is
275 * currently not necessary, but remains an option for
276 * resource-revocation-based shutdown schemes. It also helps
277 * reduce JIT issuance of uncommon-trap code, which tends to
278 * unnecessarily complicate control flow in some methods.
279 *
280 * Event Queuing. Unlike HPC work-stealing frameworks, we cannot
281 * let workers spin indefinitely scanning for tasks when none can
282 * be found immediately, and we cannot start/resume workers unless
283 * there appear to be tasks available. On the other hand, we must
284 * quickly prod them into action when new tasks are submitted or
285 * generated. In many usages, ramp-up time to activate workers is
286 * the main limiting factor in overall performance (this is
287 * compounded at program start-up by JIT compilation and
288 * allocation). So we try to streamline this as much as possible.
289 * We park/unpark workers after placing in an event wait queue
290 * when they cannot find work. This "queue" is actually a simple
291 * Treiber stack, headed by the "id" field of ctl, plus a 15bit
292 * counter value (that reflects the number of times a worker has
293 * been inactivated) to avoid ABA effects (we need only as many
294 * version numbers as worker threads). Successors are held in
295 * field WorkQueue.nextWait. Queuing deals with several intrinsic
296 * races, mainly that a task-producing thread can miss seeing (and
297 * signalling) another thread that gave up looking for work but
298 * has not yet entered the wait queue. We solve this by requiring
299 * a full sweep of all workers (via repeated calls to method
300 * scan()) both before and after a newly waiting worker is added
301 * to the wait queue. During a rescan, the worker might release
302 * some other queued worker rather than itself, which has the same
303 * net effect. Because enqueued workers may actually be rescanning
304 * rather than waiting, we set and clear the "parker" field of
305 * WorkQueues to reduce unnecessary calls to unpark. (This
306 * requires a secondary recheck to avoid missed signals.) Note
307 * the unusual conventions about Thread.interrupts surrounding
308 * parking and other blocking: Because interrupts are used solely
309 * to alert threads to check termination, which is checked anyway
310 * upon blocking, we clear status (using Thread.interrupted)
311 * before any call to park, so that park does not immediately
312 * return due to status being set via some other unrelated call to
313 * interrupt in user code.
314 *
315 * Signalling. We create or wake up workers only when there
316 * appears to be at least one task they might be able to find and
317 * execute. However, many other threads may notice the same task
318 * and each signal to wake up a thread that might take it. So in
319 * general, pools will be over-signalled. When a submission is
320 * added or another worker adds a task to a queue that is
321 * apparently empty, they signal waiting workers (or trigger
322 * creation of new ones if fewer than the given parallelism level
323 * -- see signalWork). These primary signals are buttressed by
324 * signals whenever other threads scan for work or do not have a
325 * task to process. On most platforms, signalling (unpark)
326 * overhead time is noticeably long, and the time between
327 * signalling a thread and it actually making progress can be very
328 * noticeably long, so it is worth offloading these delays from
329 * critical paths as much as possible.
330 *
331 * Trimming workers. To release resources after periods of lack of
332 * use, a worker starting to wait when the pool is quiescent will
333 * time out and terminate if the pool has remained quiescent for a
334 * given period -- a short period if there are more threads than
335 * parallelism, longer as the number of threads decreases. This
336 * will slowly propagate, eventually terminating all workers after
337 * periods of non-use.
338 *
339 * Shutdown and Termination. A call to shutdownNow atomically sets
340 * a plock bit and then (non-atomically) sets each worker's
341 * qlock status, cancels all unprocessed tasks, and wakes up
342 * all waiting workers. Detecting whether termination should
343 * commence after a non-abrupt shutdown() call requires more work
344 * and bookkeeping. We need consensus about quiescence (i.e., that
345 * there is no more work). The active count provides a primary
346 * indication but non-abrupt shutdown still requires a rechecking
347 * scan for any workers that are inactive but not queued.
348 *
349 * Joining Tasks
350 * =============
351 *
352 * Any of several actions may be taken when one worker is waiting
353 * to join a task stolen (or always held) by another. Because we
354 * are multiplexing many tasks on to a pool of workers, we can't
355 * just let them block (as in Thread.join). We also cannot just
356 * reassign the joiner's run-time stack with another and replace
357 * it later, which would be a form of "continuation", that even if
358 * possible is not necessarily a good idea since we sometimes need
359 * both an unblocked task and its continuation to progress.
360 * Instead we combine two tactics:
361 *
362 * Helping: Arranging for the joiner to execute some task that it
363 * would be running if the steal had not occurred.
364 *
365 * Compensating: Unless there are already enough live threads,
366 * method tryCompensate() may create or re-activate a spare
367 * thread to compensate for blocked joiners until they unblock.
368 *
369 * A third form (implemented in tryRemoveAndExec) amounts to
370 * helping a hypothetical compensator: If we can readily tell that
371 * a possible action of a compensator is to steal and execute the
372 * task being joined, the joining thread can do so directly,
373 * without the need for a compensation thread (although at the
374 * expense of larger run-time stacks, but the tradeoff is
375 * typically worthwhile).
376 *
377 * The ManagedBlocker extension API can't use helping so relies
378 * only on compensation in method awaitBlocker.
379 *
380 * The algorithm in tryHelpStealer entails a form of "linear"
381 * helping: Each worker records (in field currentSteal) the most
382 * recent task it stole from some other worker. Plus, it records
383 * (in field currentJoin) the task it is currently actively
384 * joining. Method tryHelpStealer uses these markers to try to
385 * find a worker to help (i.e., steal back a task from and execute
386 * it) that could hasten completion of the actively joined task.
387 * In essence, the joiner executes a task that would be on its own
388 * local deque had the to-be-joined task not been stolen. This may
389 * be seen as a conservative variant of the approach in Wagner &
390 * Calder "Leapfrogging: a portable technique for implementing
391 * efficient futures" SIGPLAN Notices, 1993
392 * (http://portal.acm.org/citation.cfm?id=155354). It differs in
393 * that: (1) We only maintain dependency links across workers upon
394 * steals, rather than use per-task bookkeeping. This sometimes
395 * requires a linear scan of workQueues array to locate stealers,
396 * but often doesn't because stealers leave hints (that may become
397 * stale/wrong) of where to locate them. A stealHint is only a
398 * hint because a worker might have had multiple steals and the
399 * hint records only one of them (usually the most current).
400 * Hinting isolates cost to when it is needed, rather than adding
401 * to per-task overhead. (2) It is "shallow", ignoring nesting
402 * and potentially cyclic mutual steals. (3) It is intentionally
403 * racy: field currentJoin is updated only while actively joining,
404 * which means that we miss links in the chain during long-lived
405 * tasks, GC stalls etc (which is OK since blocking in such cases
406 * is usually a good idea). (4) We bound the number of attempts
407 * to find work (see MAX_HELP) and fall back to suspending the
408 * worker and if necessary replacing it with another.
409 *
410 * Helping actions for CountedCompleters are much simpler: Method
411 * helpComplete can take and execute any task with the same root
412 * as the task being waited on. However, this still entails some
413 * traversal of completer chains, so is less efficient than using
414 * CountedCompleters without explicit joins.
415 *
416 * It is impossible to keep exactly the target parallelism number
417 * of threads running at any given time. Determining the
418 * existence of conservatively safe helping targets, the
419 * availability of already-created spares, and the apparent need
420 * to create new spares are all racy, so we rely on multiple
421 * retries of each. Compensation in the apparent absence of
422 * helping opportunities is challenging to control on JVMs, where
423 * GC and other activities can stall progress of tasks that in
424 * turn stall out many other dependent tasks, without us being
425 * able to determine whether they will ever require compensation.
426 * Even though work-stealing otherwise encounters little
427 * degradation in the presence of more threads than cores,
428 * aggressively adding new threads in such cases entails risk of
429 * unwanted positive feedback control loops in which more threads
430 * cause more dependent stalls (as well as delayed progress of
431 * unblocked threads to the point that we know they are available)
432 * leading to more situations requiring more threads, and so
433 * on. This aspect of control can be seen as an (analytically
434 * intractable) game with an opponent that may choose the worst
435 * (for us) active thread to stall at any time. We take several
436 * precautions to bound losses (and thus bound gains), mainly in
437 * methods tryCompensate and awaitJoin.
438 *
439 * Common Pool
440 * ===========
441 *
442 * The static commonPool always exists after static
443 * initialization. Since it (or any other created pool) need
444 * never be used, we minimize initial construction overhead and
445 * footprint to the setup of about a dozen fields, with no nested
446 * allocation. Most bootstrapping occurs within method
447 * fullExternalPush during the first submission to the pool.
448 *
449 * When external threads submit to the common pool, they can
450 * perform some subtask processing (see externalHelpJoin and
451 * related methods). We do not need to record whether these
452 * submissions are to the common pool -- if not, externalHelpJoin
453 * returns quickly (at the most helping to signal some common pool
454 * workers). These submitters would otherwise be blocked waiting
455 * for completion, so the extra effort (with liberally sprinkled
456 * task status checks) in inapplicable cases amounts to an odd
457 * form of limited spin-wait before blocking in ForkJoinTask.join.
458 *
459 * Style notes
460 * ===========
461 *
462 * There is a lot of representation-level coupling among classes
463 * ForkJoinPool, ForkJoinWorkerThread, and ForkJoinTask. The
464 * fields of WorkQueue maintain data structures managed by
465 * ForkJoinPool, so are directly accessed. There is little point
466 * trying to reduce this, since any associated future changes in
467 * representations will need to be accompanied by algorithmic
468 * changes anyway. Several methods intrinsically sprawl because
469 * they must accumulate sets of consistent reads of volatiles held
470 * in local variables. Methods signalWork() and scan() are the
471 * main bottlenecks, so are especially heavily
472 * micro-optimized/mangled. There are lots of inline assignments
473 * (of form "while ((local = field) != 0)") which are usually the
474 * simplest way to ensure the required read orderings (which are
475 * sometimes critical). This leads to a "C"-like style of listing
476 * declarations of these locals at the heads of methods or blocks.
477 * There are several occurrences of the unusual "do {} while
478 * (!cas...)" which is the simplest way to force an update of a
479 * CAS'ed variable. There are also other coding oddities (including
480 * several unnecessary-looking hoisted null checks) that help
481 * some methods perform reasonably even when interpreted (not
482 * compiled).
483 *
484 * The order of declarations in this file is:
485 * (1) Static utility functions
486 * (2) Nested (static) classes
487 * (3) Static fields
488 * (4) Fields, along with constants used when unpacking some of them
489 * (5) Internal control methods
490 * (6) Callbacks and other support for ForkJoinTask methods
491 * (7) Exported methods
492 * (8) Static block initializing statics in minimally dependent order
493 */
494
495 // Static utilities
496
497 /**
498 * If there is a security manager, makes sure caller has
499 * permission to modify threads.
500 */
501 private static void checkPermission() {
502 SecurityManager security = System.getSecurityManager();
503 if (security != null)
504 security.checkPermission(modifyThreadPermission);
505 }
506
507 // Nested classes
508
509 /**
510 * Factory for creating new {@link ForkJoinWorkerThread}s.
511 * A {@code ForkJoinWorkerThreadFactory} must be defined and used
512 * for {@code ForkJoinWorkerThread} subclasses that extend base
513 * functionality or initialize threads with different contexts.
514 */
515 public static interface ForkJoinWorkerThreadFactory {
516 /**
517 * Returns a new worker thread operating in the given pool.
518 *
519 * @param pool the pool this thread works in
520 * @throws NullPointerException if the pool is null
521 */
522 public ForkJoinWorkerThread newThread(ForkJoinPool pool);
523 }
524
525 /**
526 * Default ForkJoinWorkerThreadFactory implementation; creates a
527 * new ForkJoinWorkerThread.
528 */
529 static class DefaultForkJoinWorkerThreadFactory
530 implements ForkJoinWorkerThreadFactory {
531 public ForkJoinWorkerThread newThread(ForkJoinPool pool) {
532 return new ForkJoinWorkerThread(pool);
533 }
534 }
535
536 /**
537 * Class for artificial tasks that are used to replace the target
538 * of local joins if they are removed from an interior queue slot
539 * in WorkQueue.tryRemoveAndExec. We don't need the proxy to
540 * actually do anything beyond having a unique identity.
541 */
542 static final class EmptyTask extends ForkJoinTask<Void> {
543 private static final long serialVersionUID = -7721805057305804111L;
544 EmptyTask() { status = ForkJoinTask.NORMAL; } // force done
545 public final Void getRawResult() { return null; }
546 public final void setRawResult(Void x) {}
547 public final boolean exec() { return true; }
548 }
549
550 /**
551 * Queues supporting work-stealing as well as external task
552 * submission. See above for main rationale and algorithms.
553 * Implementation relies heavily on "Unsafe" intrinsics
554 * and selective use of "volatile":
555 *
556 * Field "base" is the index (mod array.length) of the least valid
557 * queue slot, which is always the next position to steal (poll)
558 * from if nonempty. Reads and writes require volatile orderings
559 * but not CAS, because updates are only performed after slot
560 * CASes.
561 *
562 * Field "top" is the index (mod array.length) of the next queue
563 * slot to push to or pop from. It is written only by owner thread
564 * for push, or under lock for external/shared push, and accessed
565 * by other threads only after reading (volatile) base. Both top
566 * and base are allowed to wrap around on overflow, but (top -
567 * base) (or more commonly -(base - top) to force volatile read of
568 * base before top) still estimates size. The lock ("qlock") is
569 * forced to -1 on termination, causing all further lock attempts
570 * to fail. (Note: we don't need CAS for termination state because
571 * upon pool shutdown, all shared-queues will stop being used
572 * anyway.) Nearly all lock bodies are set up so that exceptions
573 * within lock bodies are "impossible" (modulo JVM errors that
574 * would cause failure anyway.)
575 *
576 * The array slots are read and written using the emulation of
577 * volatiles/atomics provided by Unsafe. Insertions must in
578 * general use putOrderedObject as a form of releasing store to
579 * ensure that all writes to the task object are ordered before
580 * its publication in the queue. All removals entail a CAS to
581 * null. The array is always a power of two. To ensure safety of
582 * Unsafe array operations, all accesses perform explicit null
583 * checks and implicit bounds checks via power-of-two masking.
584 *
585 * In addition to basic queuing support, this class contains
586 * fields described elsewhere to control execution. It turns out
587 * to work better memory-layout-wise to include them in this class
588 * rather than a separate class.
589 *
590 * Performance on most platforms is very sensitive to placement of
591 * instances of both WorkQueues and their arrays -- we absolutely
592 * do not want multiple WorkQueue instances or multiple queue
593 * arrays sharing cache lines. (It would be best for queue objects
594 * and their arrays to share, but there is nothing available to
595 * help arrange that). Unfortunately, because they are recorded
596 * in a common array, WorkQueue instances are often moved to be
597 * adjacent by garbage collectors. To reduce impact, we use field
598 * padding that works OK on common platforms; this effectively
599 * trades off slightly slower average field access for the sake of
600 * avoiding really bad worst-case access. (Until better JVM
601 * support is in place, this padding is dependent on transient
602 * properties of JVM field layout rules.) We also take care in
603 * allocating, sizing and resizing the array. Non-shared queue
604 * arrays are initialized by workers before use. Others are
605 * allocated on first use.
606 */
607 static final class WorkQueue {
608 /**
609 * Capacity of work-stealing queue array upon initialization.
610 * Must be a power of two; at least 4, but should be larger to
611 * reduce or eliminate cacheline sharing among queues.
612 * Currently, it is much larger, as a partial workaround for
613 * the fact that JVMs often place arrays in locations that
614 * share GC bookkeeping (especially cardmarks) such that
615 * per-write accesses encounter serious memory contention.
616 */
617 static final int INITIAL_QUEUE_CAPACITY = 1 << 13;
618
619 /**
620 * Maximum size for queue arrays. Must be a power of two less
621 * than or equal to 1 << (31 - width of array entry) to ensure
622 * lack of wraparound of index calculations, but defined to a
623 * value a bit less than this to help users trap runaway
624 * programs before saturating systems.
625 */
626 static final int MAXIMUM_QUEUE_CAPACITY = 1 << 26; // 64M
627
628 int seed; // for random scanning; initialize nonzero
629 volatile int eventCount; // encoded inactivation count; < 0 if inactive
630 int nextWait; // encoded record of next event waiter
631 final int mode; // lifo, fifo, or shared
632 int nsteals; // cumulative number of steals
633 int poolIndex; // index of this queue in pool (or 0)
634 int stealHint; // index of most recent known stealer
635 volatile int qlock; // 1: locked, -1: terminate; else 0
636 volatile int base; // index of next slot for poll
637 int top; // index of next slot for push
638 ForkJoinTask<?>[] array; // the elements (initially unallocated)
639 final ForkJoinPool pool; // the containing pool (may be null)
640 final ForkJoinWorkerThread owner; // owning thread or null if shared
641 volatile Thread parker; // == owner during call to park; else null
642 volatile ForkJoinTask<?> currentJoin; // task being joined in awaitJoin
643 ForkJoinTask<?> currentSteal; // current non-local task being executed
644 // Heuristic padding to ameliorate unfortunate memory placements
645 Object p00, p01, p02, p03, p04, p05, p06, p07;
646 Object p08, p09, p0a, p0b, p0c, p0d, p0e;
647
648 WorkQueue(ForkJoinPool pool, ForkJoinWorkerThread owner, int mode) {
649 this.mode = mode;
650 this.pool = pool;
651 this.owner = owner;
652 // Place indices in the center of array (that is not yet allocated)
653 base = top = INITIAL_QUEUE_CAPACITY >>> 1;
654 }
655
656 /**
657 * Pushes a task. Call only by owner in unshared queues.
658 * Cases needing resizing or rejection are relayed to fullPush
659 * (that also handles shared queues).
660 *
661 * @param task the task. Caller must ensure non-null.
662 * @throw RejectedExecutionException if array cannot be resized
663 */
664 final void push(ForkJoinTask<?> task) {
665 ForkJoinPool p; ForkJoinTask<?>[] a;
666 int s = top, n;
667 if ((a = array) != null && a.length > (n = s + 1 - base)) {
668 U.putOrderedObject
669 (a, (((a.length - 1) & s) << ASHIFT) + ABASE, task);
670 top = s + 1;
671 if (n <= 1 && (p = pool) != null)
672 p.signalWork(this, 1);
673 }
674 else
675 fullPush(task, true);
676 }
677
678 /**
679 * Pushes a task if lock is free and array is either big
680 * enough or can be resized to be big enough. Note: a
681 * specialization of a common fast path of this method is in
682 * ForkJoinPool.externalPush. When called from a FJWT queue,
683 * this can fail only if the pool has been shut down or
684 * an out of memory error.
685 *
686 * @param task the task. Caller must ensure non-null.
687 * @param owned if true, throw RJE on failure
688 */
689 final boolean fullPush(ForkJoinTask<?> task, boolean owned) {
690 ForkJoinPool p; ForkJoinTask<?>[] a;
691 if (owned) {
692 if (qlock < 0) // must be shutting down
693 throw new RejectedExecutionException();
694 }
695 else if (!U.compareAndSwapInt(this, QLOCK, 0, 1))
696 return false;
697 try {
698 int s = top, oldLen, len;
699 if ((a = array) == null)
700 a = array = new ForkJoinTask<?>[len=INITIAL_QUEUE_CAPACITY];
701 else if ((oldLen = a.length) > s + 1 - base)
702 len = oldLen;
703 else if ((len = oldLen << 1) > MAXIMUM_QUEUE_CAPACITY)
704 throw new RejectedExecutionException("Capacity exceeded");
705 else {
706 int oldMask, b;
707 ForkJoinTask<?>[] oldA = a;
708 a = array = new ForkJoinTask<?>[len];
709 if ((oldMask = oldLen - 1) >= 0 && s - (b = base) > 0) {
710 int mask = len - 1;
711 do {
712 ForkJoinTask<?> x;
713 int oldj = ((b & oldMask) << ASHIFT) + ABASE;
714 int j = ((b & mask) << ASHIFT) + ABASE;
715 x = (ForkJoinTask<?>)
716 U.getObjectVolatile(oldA, oldj);
717 if (x != null &&
718 U.compareAndSwapObject(oldA, oldj, x, null))
719 U.putObjectVolatile(a, j, x);
720 } while (++b != s);
721 }
722 }
723 U.putOrderedObject
724 (a, (((len - 1) & s) << ASHIFT) + ABASE, task);
725 top = s + 1;
726 } finally {
727 if (!owned)
728 qlock = 0;
729 }
730 if ((p = pool) != null)
731 p.signalWork(this, 1);
732 return true;
733 }
734
735 /**
736 * Takes next task, if one exists, in LIFO order. Call only
737 * by owner in unshared queues.
738 */
739 final ForkJoinTask<?> pop() {
740 ForkJoinTask<?>[] a; ForkJoinTask<?> t; int m;
741 if ((a = array) != null && (m = a.length - 1) >= 0) {
742 for (int s; (s = top - 1) - base >= 0;) {
743 long j = ((m & s) << ASHIFT) + ABASE;
744 if ((t = (ForkJoinTask<?>)U.getObject(a, j)) == null)
745 break;
746 if (U.compareAndSwapObject(a, j, t, null)) {
747 top = s;
748 return t;
749 }
750 }
751 }
752 return null;
753 }
754
755 /**
756 * Takes a task in FIFO order if b is base of queue and a task
757 * can be claimed without contention. Specialized versions
758 * appear in ForkJoinPool methods scan and tryHelpStealer.
759 */
760 final ForkJoinTask<?> pollAt(int b) {
761 ForkJoinTask<?> t; ForkJoinTask<?>[] a;
762 if ((a = array) != null) {
763 int j = (((a.length - 1) & b) << ASHIFT) + ABASE;
764 if ((t = (ForkJoinTask<?>)U.getObjectVolatile(a, j)) != null &&
765 base == b &&
766 U.compareAndSwapObject(a, j, t, null)) {
767 base = b + 1;
768 return t;
769 }
770 }
771 return null;
772 }
773
774 /**
775 * Takes next task, if one exists, in FIFO order.
776 */
777 final ForkJoinTask<?> poll() {
778 ForkJoinTask<?>[] a; int b; ForkJoinTask<?> t;
779 while ((b = base) - top < 0 && (a = array) != null) {
780 int j = (((a.length - 1) & b) << ASHIFT) + ABASE;
781 t = (ForkJoinTask<?>)U.getObjectVolatile(a, j);
782 if (t != null) {
783 if (base == b &&
784 U.compareAndSwapObject(a, j, t, null)) {
785 base = b + 1;
786 return t;
787 }
788 }
789 else if (base == b) {
790 if (b + 1 == top)
791 break;
792 Thread.yield(); // wait for lagging update (very rare)
793 }
794 }
795 return null;
796 }
797
798 /**
799 * Takes next task, if one exists, in order specified by mode.
800 */
801 final ForkJoinTask<?> nextLocalTask() {
802 return mode == 0 ? pop() : poll();
803 }
804
805 /**
806 * Returns next task, if one exists, in order specified by mode.
807 */
808 final ForkJoinTask<?> peek() {
809 ForkJoinTask<?>[] a = array; int m;
810 if (a == null || (m = a.length - 1) < 0)
811 return null;
812 int i = mode == 0 ? top - 1 : base;
813 int j = ((i & m) << ASHIFT) + ABASE;
814 return (ForkJoinTask<?>)U.getObjectVolatile(a, j);
815 }
816
817 /**
818 * Pops the given task only if it is at the current top.
819 * (A shared version is available only via FJP.tryExternalUnpush)
820 */
821 final boolean tryUnpush(ForkJoinTask<?> t) {
822 ForkJoinTask<?>[] a; int s;
823 if ((a = array) != null && (s = top) != base &&
824 U.compareAndSwapObject
825 (a, (((a.length - 1) & --s) << ASHIFT) + ABASE, t, null)) {
826 top = s;
827 return true;
828 }
829 return false;
830 }
831
832 /**
833 * Removes and cancels all known tasks, ignoring any exceptions.
834 */
835 final void cancelAll() {
836 ForkJoinTask.cancelIgnoringExceptions(currentJoin);
837 ForkJoinTask.cancelIgnoringExceptions(currentSteal);
838 for (ForkJoinTask<?> t; (t = poll()) != null; )
839 ForkJoinTask.cancelIgnoringExceptions(t);
840 }
841
842 /**
843 * Computes next value for random probes. Scans don't require
844 * a very high quality generator, but also not a crummy one.
845 * Marsaglia xor-shift is cheap and works well enough. Note:
846 * This is manually inlined in its usages in ForkJoinPool to
847 * avoid writes inside busy scan loops.
848 */
849 final int nextSeed() {
850 int r = seed;
851 r ^= r << 13;
852 r ^= r >>> 17;
853 return seed = r ^= r << 5;
854 }
855
856 /**
857 * Provides a more accurate estimate of size than (top - base)
858 * by ordering reads and checking whether a near-empty queue
859 * has at least one unclaimed task.
860 */
861 final int queueSize() {
862 ForkJoinTask<?>[] a; int k, s, n;
863 return ((n = base - (s = top)) < 0 &&
864 (n != -1 ||
865 ((a = array) != null && (k = a.length) > 0 &&
866 U.getObject
867 (a, (long)((((k - 1) & (s - 1)) << ASHIFT) + ABASE)) != null))) ?
868 -n : 0;
869 }
870
871 // Specialized execution methods
872
873 /**
874 * Pops and runs tasks until empty.
875 */
876 private void popAndExecAll() {
877 // A bit faster than repeated pop calls
878 ForkJoinTask<?>[] a; int m, s; long j; ForkJoinTask<?> t;
879 while ((a = array) != null && (m = a.length - 1) >= 0 &&
880 (s = top - 1) - base >= 0 &&
881 (t = ((ForkJoinTask<?>)
882 U.getObject(a, j = ((m & s) << ASHIFT) + ABASE)))
883 != null) {
884 if (U.compareAndSwapObject(a, j, t, null)) {
885 top = s;
886 t.doExec();
887 }
888 }
889 }
890
891 /**
892 * Polls and runs tasks until empty.
893 */
894 private void pollAndExecAll() {
895 for (ForkJoinTask<?> t; (t = poll()) != null;)
896 t.doExec();
897 }
898
899 /**
900 * If present, removes from queue and executes the given task,
901 * or any other cancelled task. Returns (true) on any CAS
902 * or consistency check failure so caller can retry.
903 *
904 * @return false if no progress can be made, else true;
905 */
906 final boolean tryRemoveAndExec(ForkJoinTask<?> task) {
907 boolean stat = true, removed = false, empty = true;
908 ForkJoinTask<?>[] a; int m, s, b, n;
909 if ((a = array) != null && (m = a.length - 1) >= 0 &&
910 (n = (s = top) - (b = base)) > 0) {
911 for (ForkJoinTask<?> t;;) { // traverse from s to b
912 int j = ((--s & m) << ASHIFT) + ABASE;
913 t = (ForkJoinTask<?>)U.getObjectVolatile(a, j);
914 if (t == null) // inconsistent length
915 break;
916 else if (t == task) {
917 if (s + 1 == top) { // pop
918 if (!U.compareAndSwapObject(a, j, task, null))
919 break;
920 top = s;
921 removed = true;
922 }
923 else if (base == b) // replace with proxy
924 removed = U.compareAndSwapObject(a, j, task,
925 new EmptyTask());
926 break;
927 }
928 else if (t.status >= 0)
929 empty = false;
930 else if (s + 1 == top) { // pop and throw away
931 if (U.compareAndSwapObject(a, j, t, null))
932 top = s;
933 break;
934 }
935 if (--n == 0) {
936 if (!empty && base == b)
937 stat = false;
938 break;
939 }
940 }
941 }
942 if (removed)
943 task.doExec();
944 return stat;
945 }
946
947 /**
948 * Polls for and executes the given task or any other task in
949 * its CountedCompleter computation
950 */
951 final boolean pollAndExecCC(ForkJoinTask<?> root) {
952 ForkJoinTask<?>[] a; int b; Object o;
953 outer: while ((b = base) - top < 0 && (a = array) != null) {
954 long j = (((a.length - 1) & b) << ASHIFT) + ABASE;
955 if ((o = U.getObject(a, j)) == null ||
956 !(o instanceof CountedCompleter))
957 break;
958 for (CountedCompleter<?> t = (CountedCompleter<?>)o, r = t;;) {
959 if (r == root) {
960 if (base == b &&
961 U.compareAndSwapObject(a, j, t, null)) {
962 base = b + 1;
963 t.doExec();
964 return true;
965 }
966 else
967 break; // restart
968 }
969 if ((r = r.completer) == null)
970 break outer; // not part of root computation
971 }
972 }
973 return false;
974 }
975
976 /**
977 * Executes a top-level task and any local tasks remaining
978 * after execution.
979 */
980 final void runTask(ForkJoinTask<?> t) {
981 if (t != null) {
982 (currentSteal = t).doExec();
983 currentSteal = null;
984 if (++nsteals < 0) { // spill on overflow
985 ForkJoinPool p;
986 if ((p = pool) != null)
987 p.collectStealCount(this);
988 }
989 if (top != base) { // process remaining local tasks
990 if (mode == 0)
991 popAndExecAll();
992 else
993 pollAndExecAll();
994 }
995 }
996 }
997
998 /**
999 * Executes a non-top-level (stolen) task.
1000 */
1001 final void runSubtask(ForkJoinTask<?> t) {
1002 if (t != null) {
1003 ForkJoinTask<?> ps = currentSteal;
1004 (currentSteal = t).doExec();
1005 currentSteal = ps;
1006 }
1007 }
1008
1009 /**
1010 * Returns true if owned and not known to be blocked.
1011 */
1012 final boolean isApparentlyUnblocked() {
1013 Thread wt; Thread.State s;
1014 return (eventCount >= 0 &&
1015 (wt = owner) != null &&
1016 (s = wt.getState()) != Thread.State.BLOCKED &&
1017 s != Thread.State.WAITING &&
1018 s != Thread.State.TIMED_WAITING);
1019 }
1020
1021 /**
1022 * If this owned and is not already interrupted, try to
1023 * interrupt and/or unpark, ignoring exceptions.
1024 */
1025 final void interruptOwner() {
1026 Thread wt, p;
1027 if ((wt = owner) != null && !wt.isInterrupted()) {
1028 try {
1029 wt.interrupt();
1030 } catch (SecurityException ignore) {
1031 }
1032 }
1033 if ((p = parker) != null)
1034 U.unpark(p);
1035 }
1036
1037 // Unsafe mechanics
1038 private static final sun.misc.Unsafe U;
1039 private static final long QLOCK;
1040 private static final int ABASE;
1041 private static final int ASHIFT;
1042 static {
1043 int s;
1044 try {
1045 U = getUnsafe();
1046 Class<?> k = WorkQueue.class;
1047 Class<?> ak = ForkJoinTask[].class;
1048 QLOCK = U.objectFieldOffset
1049 (k.getDeclaredField("qlock"));
1050 ABASE = U.arrayBaseOffset(ak);
1051 s = U.arrayIndexScale(ak);
1052 } catch (Exception e) {
1053 throw new Error(e);
1054 }
1055 if ((s & (s-1)) != 0)
1056 throw new Error("data type scale not a power of two");
1057 ASHIFT = 31 - Integer.numberOfLeadingZeros(s);
1058 }
1059 }
1060
1061 /**
1062 * Per-thread records for threads that submit to pools. Currently
1063 * holds only pseudo-random seed / index that is used to choose
1064 * submission queues in method externalPush. In the future, this may
1065 * also incorporate a means to implement different task rejection
1066 * and resubmission policies.
1067 *
1068 * Seeds for submitters and workers/workQueues work in basically
1069 * the same way but are initialized and updated using slightly
1070 * different mechanics. Both are initialized using the same
1071 * approach as in class ThreadLocal, where successive values are
1072 * unlikely to collide with previous values. Seeds are then
1073 * randomly modified upon collisions using xorshifts, which
1074 * requires a non-zero seed.
1075 */
1076 static final class Submitter {
1077 int seed;
1078 Submitter(int s) { seed = s; }
1079 }
1080
1081 /** Property prefix for constructing common pool */
1082 private static final String propPrefix =
1083 "java.util.concurrent.ForkJoinPool.common.";
1084
1085 // static fields (initialized in static initializer below)
1086
1087 /**
1088 * Creates a new ForkJoinWorkerThread. This factory is used unless
1089 * overridden in ForkJoinPool constructors.
1090 */
1091 public static final ForkJoinWorkerThreadFactory
1092 defaultForkJoinWorkerThreadFactory;
1093
1094 /**
1095 * Common (static) pool. Non-null for public use unless a static
1096 * construction exception, but internal usages null-check on use
1097 * to paranoically avoid potential initialization circularities
1098 * as well as to simplify generated code.
1099 */
1100 static final ForkJoinPool commonPool;
1101
1102 /**
1103 * Permission required for callers of methods that may start or
1104 * kill threads.
1105 */
1106 private static final RuntimePermission modifyThreadPermission;
1107
1108 /**
1109 * Per-thread submission bookkeeping. Shared across all pools
1110 * to reduce ThreadLocal pollution and because random motion
1111 * to avoid contention in one pool is likely to hold for others.
1112 * Lazily initialized on first submission (but null-checked
1113 * in other contexts to avoid unnecessary initialization).
1114 */
1115 static final ThreadLocal<Submitter> submitters;
1116
1117 /**
1118 * Common pool parallelism. Must equal commonPool.parallelism.
1119 */
1120 static final int commonPoolParallelism;
1121
1122 /**
1123 * Sequence number for creating workerNamePrefix.
1124 */
1125 private static int poolNumberSequence;
1126
1127 /**
1128 * Return the next sequence number. We don't expect this to
1129 * ever contend so use simple builtin sync.
1130 */
1131 private static final synchronized int nextPoolId() {
1132 return ++poolNumberSequence;
1133 }
1134
1135 // static constants
1136
1137 /**
1138 * Initial timeout value (in nanoseconds) for the thread
1139 * triggering quiescence to park waiting for new work. On timeout,
1140 * the thread will instead try to shrink the number of
1141 * workers. The value should be large enough to avoid overly
1142 * aggressive shrinkage during most transient stalls (long GCs
1143 * etc).
1144 */
1145 private static final long IDLE_TIMEOUT = 2000L * 1000L * 1000L; // 2sec
1146
1147 /**
1148 * Timeout value when there are more threads than parallelism level
1149 */
1150 private static final long FAST_IDLE_TIMEOUT = 200L * 1000L * 1000L;
1151
1152 /**
1153 * The maximum stolen->joining link depth allowed in method
1154 * tryHelpStealer. Must be a power of two. Depths for legitimate
1155 * chains are unbounded, but we use a fixed constant to avoid
1156 * (otherwise unchecked) cycles and to bound staleness of
1157 * traversal parameters at the expense of sometimes blocking when
1158 * we could be helping.
1159 */
1160 private static final int MAX_HELP = 64;
1161
1162 /**
1163 * Increment for seed generators. See class ThreadLocal for
1164 * explanation.
1165 */
1166 private static final int SEED_INCREMENT = 0x61c88647;
1167
1168 /**
1169 * Bits and masks for control variables
1170 *
1171 * Field ctl is a long packed with:
1172 * AC: Number of active running workers minus target parallelism (16 bits)
1173 * TC: Number of total workers minus target parallelism (16 bits)
1174 * ST: true if pool is terminating (1 bit)
1175 * EC: the wait count of top waiting thread (15 bits)
1176 * ID: poolIndex of top of Treiber stack of waiters (16 bits)
1177 *
1178 * When convenient, we can extract the upper 32 bits of counts and
1179 * the lower 32 bits of queue state, u = (int)(ctl >>> 32) and e =
1180 * (int)ctl. The ec field is never accessed alone, but always
1181 * together with id and st. The offsets of counts by the target
1182 * parallelism and the positionings of fields makes it possible to
1183 * perform the most common checks via sign tests of fields: When
1184 * ac is negative, there are not enough active workers, when tc is
1185 * negative, there are not enough total workers, and when e is
1186 * negative, the pool is terminating. To deal with these possibly
1187 * negative fields, we use casts in and out of "short" and/or
1188 * signed shifts to maintain signedness.
1189 *
1190 * When a thread is queued (inactivated), its eventCount field is
1191 * set negative, which is the only way to tell if a worker is
1192 * prevented from executing tasks, even though it must continue to
1193 * scan for them to avoid queuing races. Note however that
1194 * eventCount updates lag releases so usage requires care.
1195 *
1196 * Field plock is an int packed with:
1197 * SHUTDOWN: true if shutdown is enabled (1 bit)
1198 * SEQ: a sequence lock, with PL_LOCK bit set if locked (30 bits)
1199 * SIGNAL: set when threads may be waiting on the lock (1 bit)
1200 *
1201 * The sequence number enables simple consistency checks:
1202 * Staleness of read-only operations on the workQueues array can
1203 * be checked by comparing plock before vs after the reads.
1204 */
1205
1206 // bit positions/shifts for fields
1207 private static final int AC_SHIFT = 48;
1208 private static final int TC_SHIFT = 32;
1209 private static final int ST_SHIFT = 31;
1210 private static final int EC_SHIFT = 16;
1211
1212 // bounds
1213 private static final int SMASK = 0xffff; // short bits
1214 private static final int MAX_CAP = 0x7fff; // max #workers - 1
1215 private static final int EVENMASK = 0xfffe; // even short bits
1216 private static final int SQMASK = 0x007e; // max 64 (even) slots
1217 private static final int SHORT_SIGN = 1 << 15;
1218 private static final int INT_SIGN = 1 << 31;
1219
1220 // masks
1221 private static final long STOP_BIT = 0x0001L << ST_SHIFT;
1222 private static final long AC_MASK = ((long)SMASK) << AC_SHIFT;
1223 private static final long TC_MASK = ((long)SMASK) << TC_SHIFT;
1224
1225 // units for incrementing and decrementing
1226 private static final long TC_UNIT = 1L << TC_SHIFT;
1227 private static final long AC_UNIT = 1L << AC_SHIFT;
1228
1229 // masks and units for dealing with u = (int)(ctl >>> 32)
1230 private static final int UAC_SHIFT = AC_SHIFT - 32;
1231 private static final int UTC_SHIFT = TC_SHIFT - 32;
1232 private static final int UAC_MASK = SMASK << UAC_SHIFT;
1233 private static final int UTC_MASK = SMASK << UTC_SHIFT;
1234 private static final int UAC_UNIT = 1 << UAC_SHIFT;
1235 private static final int UTC_UNIT = 1 << UTC_SHIFT;
1236
1237 // masks and units for dealing with e = (int)ctl
1238 private static final int E_MASK = 0x7fffffff; // no STOP_BIT
1239 private static final int E_SEQ = 1 << EC_SHIFT;
1240
1241 // plock bits
1242 private static final int SHUTDOWN = 1 << 31;
1243 private static final int PL_LOCK = 2;
1244 private static final int PL_SIGNAL = 1;
1245 private static final int PL_SPINS = 1 << 8;
1246
1247 // access mode for WorkQueue
1248 static final int LIFO_QUEUE = 0;
1249 static final int FIFO_QUEUE = 1;
1250 static final int SHARED_QUEUE = -1;
1251
1252 // Instance fields
1253
1254 /*
1255 * Field layout order in this class tends to matter more than one
1256 * would like. Runtime layout order is only loosely related to
1257 * declaration order and may differ across JVMs, but the following
1258 * empirically works OK on current JVMs.
1259 */
1260 volatile long stealCount; // collects worker counts
1261 volatile long ctl; // main pool control
1262 final int parallelism; // parallelism level
1263 final int localMode; // per-worker scheduling mode
1264 volatile int indexSeed; // worker/submitter index seed
1265 volatile int plock; // shutdown status and seqLock
1266 WorkQueue[] workQueues; // main registry
1267 final ForkJoinWorkerThreadFactory factory; // factory for new workers
1268 final Thread.UncaughtExceptionHandler ueh; // per-worker UEH
1269 final String workerNamePrefix; // to create worker name string
1270
1271 /*
1272 * Acquires the plock lock to protect worker array and related
1273 * updates. This method is called only if an initial CAS on plock
1274 * fails. This acts as a spinLock for normal cases, but falls back
1275 * to builtin monitor to block when (rarely) needed. This would be
1276 * a terrible idea for a highly contended lock, but works fine as
1277 * a more conservative alternative to a pure spinlock. See
1278 * internal ConcurrentHashMap documentation for further
1279 * explanation of nearly the same construction.
1280 */
1281 private int acquirePlock() {
1282 int spins = PL_SPINS, r = 0, ps, nps;
1283 for (;;) {
1284 if (((ps = plock) & PL_LOCK) == 0 &&
1285 U.compareAndSwapInt(this, PLOCK, ps, nps = ps + PL_LOCK))
1286 return nps;
1287 else if (r == 0)
1288 r = ThreadLocalRandom.current().nextInt(); // randomize spins
1289 else if (spins >= 0) {
1290 r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift
1291 if (r >= 0)
1292 --spins;
1293 }
1294 else if (U.compareAndSwapInt(this, PLOCK, ps, ps | PL_SIGNAL)) {
1295 synchronized (this) {
1296 if ((plock & PL_SIGNAL) != 0) {
1297 try {
1298 wait();
1299 } catch (InterruptedException ie) {
1300 try {
1301 Thread.currentThread().interrupt();
1302 } catch (SecurityException ignore) {
1303 }
1304 }
1305 }
1306 else
1307 notifyAll();
1308 }
1309 }
1310 }
1311 }
1312
1313 /**
1314 * Unlocks and signals any thread waiting for plock. Called only
1315 * when CAS of seq value for unlock fails.
1316 */
1317 private void releasePlock(int ps) {
1318 plock = ps;
1319 synchronized (this) { notifyAll(); }
1320 }
1321
1322 // Registering and deregistering workers
1323
1324 /**
1325 * Callback from ForkJoinWorkerThread constructor to establish its
1326 * poolIndex and record its WorkQueue. To avoid scanning bias due
1327 * to packing entries in front of the workQueues array, we treat
1328 * the array as a simple power-of-two hash table using per-thread
1329 * seed as hash, expanding as needed.
1330 *
1331 * @param w the worker's queue
1332 */
1333 final void registerWorker(WorkQueue w) {
1334 int s, ps; // generate a rarely colliding candidate index seed
1335 do {} while (!U.compareAndSwapInt(this, INDEXSEED,
1336 s = indexSeed, s += SEED_INCREMENT) ||
1337 s == 0); // skip 0
1338 if (((ps = plock) & PL_LOCK) != 0 ||
1339 !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
1340 ps = acquirePlock();
1341 int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
1342 try {
1343 WorkQueue[] ws;
1344 if (w != null && (ws = workQueues) != null) {
1345 w.seed = s;
1346 int n = ws.length, m = n - 1;
1347 int r = (s << 1) | 1; // use odd-numbered indices
1348 if (ws[r &= m] != null) { // collision
1349 int probes = 0; // step by approx half size
1350 int step = (n <= 4) ? 2 : ((n >>> 1) & EVENMASK) + 2;
1351 while (ws[r = (r + step) & m] != null) {
1352 if (++probes >= n) {
1353 workQueues = ws = Arrays.copyOf(ws, n <<= 1);
1354 m = n - 1;
1355 probes = 0;
1356 }
1357 }
1358 }
1359 w.eventCount = w.poolIndex = r; // establish before recording
1360 ws[r] = w;
1361 }
1362 } finally {
1363 if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
1364 releasePlock(nps);
1365 }
1366 }
1367
1368 /**
1369 * Final callback from terminating worker, as well as upon failure
1370 * to construct or start a worker. Removes record of worker from
1371 * array, and adjusts counts. If pool is shutting down, tries to
1372 * complete termination.
1373 *
1374 * @param wt the worker thread or null if construction failed
1375 * @param ex the exception causing failure, or null if none
1376 */
1377 final void deregisterWorker(ForkJoinWorkerThread wt, Throwable ex) {
1378 WorkQueue w = null;
1379 if (wt != null && (w = wt.workQueue) != null) {
1380 int ps;
1381 collectStealCount(w);
1382 w.qlock = -1; // ensure set
1383 if (((ps = plock) & PL_LOCK) != 0 ||
1384 !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
1385 ps = acquirePlock();
1386 int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
1387 try {
1388 int idx = w.poolIndex;
1389 WorkQueue[] ws = workQueues;
1390 if (ws != null && idx >= 0 && idx < ws.length && ws[idx] == w)
1391 ws[idx] = null;
1392 } finally {
1393 if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
1394 releasePlock(nps);
1395 }
1396 }
1397
1398 long c; // adjust ctl counts
1399 do {} while (!U.compareAndSwapLong
1400 (this, CTL, c = ctl, (((c - AC_UNIT) & AC_MASK) |
1401 ((c - TC_UNIT) & TC_MASK) |
1402 (c & ~(AC_MASK|TC_MASK)))));
1403
1404 if (!tryTerminate(false, false) && w != null) {
1405 w.cancelAll(); // cancel remaining tasks
1406 if (w.array != null) // suppress signal if never ran
1407 signalWork(null, 1); // wake up or create replacement
1408 if (ex == null) // help clean refs on way out
1409 ForkJoinTask.helpExpungeStaleExceptions();
1410 }
1411
1412 if (ex != null) // rethrow
1413 ForkJoinTask.rethrow(ex);
1414 }
1415
1416 /**
1417 * Collect worker steal count into total. Called on termination
1418 * and upon int overflow of local count. (There is a possible race
1419 * in the latter case vs any caller of getStealCount, which can
1420 * make its results less accurate than usual.)
1421 */
1422 final void collectStealCount(WorkQueue w) {
1423 if (w != null) {
1424 long sc;
1425 int ns = w.nsteals;
1426 w.nsteals = 0; // handle overflow
1427 long steals = (ns >= 0) ? ns : 1L + (long)(Integer.MAX_VALUE);
1428 do {} while (!U.compareAndSwapLong(this, STEALCOUNT,
1429 sc = stealCount, sc + steals));
1430 }
1431 }
1432
1433 // Submissions
1434
1435 /**
1436 * Unless shutting down, adds the given task to a submission queue
1437 * at submitter's current queue index (modulo submission
1438 * range). Only the most common path is directly handled in this
1439 * method. All others are relayed to fullExternalPush.
1440 *
1441 * @param task the task. Caller must ensure non-null.
1442 */
1443 final void externalPush(ForkJoinTask<?> task) {
1444 WorkQueue[] ws; WorkQueue q; Submitter z; int m; ForkJoinTask<?>[] a;
1445 if ((z = submitters.get()) != null && plock > 0 &&
1446 (ws = workQueues) != null && (m = (ws.length - 1)) >= 0 &&
1447 (q = ws[m & z.seed & SQMASK]) != null &&
1448 U.compareAndSwapInt(q, QLOCK, 0, 1)) { // lock
1449 int s = q.top, n;
1450 if ((a = q.array) != null && a.length > (n = s + 1 - q.base)) {
1451 U.putObject(a, (long)(((a.length - 1) & s) << ASHIFT) + ABASE,
1452 task);
1453 q.top = s + 1; // push on to deque
1454 q.qlock = 0;
1455 if (n <= 1)
1456 signalWork(q, 1);
1457 return;
1458 }
1459 q.qlock = 0;
1460 }
1461 fullExternalPush(task);
1462 }
1463
1464 /**
1465 * Full version of externalPush. This method is called, among
1466 * other times, upon the first submission of the first task to the
1467 * pool, so must perform secondary initialization: creating
1468 * workQueue array and setting plock to a valid value. It also
1469 * detects first submission by an external thread by looking up
1470 * its ThreadLocal, and creates a new shared queue if the one at
1471 * index if empty or contended. The lock bodies must be
1472 * exception-free (so no try/finally) so we optimistically
1473 * allocate new queues/arrays outside the locks and throw them
1474 * away if (very rarely) not needed. Note that the plock seq value
1475 * can eventually wrap around zero, but if so harmlessly fails to
1476 * reinitialize.
1477 */
1478 private void fullExternalPush(ForkJoinTask<?> task) {
1479 for (Submitter z = null;;) {
1480 WorkQueue[] ws; WorkQueue q; int ps, m, r, s;
1481 if ((ps = plock) < 0)
1482 throw new RejectedExecutionException();
1483 else if ((ws = workQueues) == null || (m = ws.length - 1) < 0) {
1484 int n = parallelism - 1; n |= n >>> 1; n |= n >>> 2;
1485 n |= n >>> 4; n |= n >>> 8; n |= n >>> 16;
1486 WorkQueue[] nws = new WorkQueue[(n + 1) << 1]; // power of two
1487 if ((ps & PL_LOCK) != 0 ||
1488 !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
1489 ps = acquirePlock();
1490 if ((ws = workQueues) == null)
1491 workQueues = nws;
1492 int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
1493 if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
1494 releasePlock(nps);
1495 }
1496 else if (z == null && (z = submitters.get()) == null) {
1497 if (U.compareAndSwapInt(this, INDEXSEED,
1498 s = indexSeed, s += SEED_INCREMENT) &&
1499 s != 0) // skip 0
1500 submitters.set(z = new Submitter(s));
1501 }
1502 else {
1503 int k = (r = z.seed) & m & SQMASK;
1504 if ((q = ws[k]) == null && (ps & PL_LOCK) == 0) {
1505 (q = new WorkQueue(this, null, SHARED_QUEUE)).poolIndex = k;
1506 if (((ps = plock) & PL_LOCK) != 0 ||
1507 !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
1508 ps = acquirePlock();
1509 WorkQueue w = null;
1510 if ((ws = workQueues) != null && k < ws.length &&
1511 (w = ws[k]) == null)
1512 ws[k] = q;
1513 else
1514 q = w;
1515 int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
1516 if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
1517 releasePlock(nps);
1518 }
1519 if (q != null && q.qlock == 0 && q.fullPush(task, false))
1520 return;
1521 r ^= r << 13; // same xorshift as WorkQueues
1522 r ^= r >>> 17;
1523 z.seed = r ^= r << 5; // move to a different index
1524 }
1525 }
1526 }
1527
1528 // Maintaining ctl counts
1529
1530 /**
1531 * Increments active count; mainly called upon return from blocking.
1532 */
1533 final void incrementActiveCount() {
1534 long c;
1535 do {} while (!U.compareAndSwapLong(this, CTL, c = ctl, c + AC_UNIT));
1536 }
1537
1538 /**
1539 * Tries to create (at most one) or activate (possibly several)
1540 * workers if too few are active. On contention failure, continues
1541 * until at least one worker is signalled or the given queue is
1542 * empty or all workers are active.
1543 *
1544 * @param q if non-null, the queue holding tasks to be signalled
1545 * @param signals the target number of signals.
1546 */
1547 final void signalWork(WorkQueue q, int signals) {
1548 long c; int e, u, i; WorkQueue[] ws; WorkQueue w; Thread p;
1549 while ((u = (int)((c = ctl) >>> 32)) < 0) {
1550 if ((e = (int)c) > 0) {
1551 if ((ws = workQueues) != null && ws.length > (i = e & SMASK) &&
1552 (w = ws[i]) != null && w.eventCount == (e | INT_SIGN)) {
1553 long nc = (((long)(w.nextWait & E_MASK)) |
1554 ((long)(u + UAC_UNIT) << 32));
1555 if (U.compareAndSwapLong(this, CTL, c, nc)) {
1556 w.eventCount = (e + E_SEQ) & E_MASK;
1557 if ((p = w.parker) != null)
1558 U.unpark(p);
1559 if (--signals <= 0)
1560 break;
1561 }
1562 else
1563 signals = 1;
1564 if ((q != null && q.queueSize() == 0))
1565 break;
1566 }
1567 else
1568 break;
1569 }
1570 else if (e == 0 && (u & SHORT_SIGN) != 0) {
1571 long nc = (long)(((u + UTC_UNIT) & UTC_MASK) |
1572 ((u + UAC_UNIT) & UAC_MASK)) << 32;
1573 if (U.compareAndSwapLong(this, CTL, c, nc)) {
1574 ForkJoinWorkerThread wt = null;
1575 Throwable ex = null;
1576 boolean started = false;
1577 try {
1578 ForkJoinWorkerThreadFactory fac;
1579 if ((fac = factory) != null &&
1580 (wt = fac.newThread(this)) != null) {
1581 wt.start();
1582 started = true;
1583 }
1584 } catch (Throwable rex) {
1585 ex = rex;
1586 }
1587 if (!started)
1588 deregisterWorker(wt, ex); // adjust counts on failure
1589 break;
1590 }
1591 }
1592 else
1593 break;
1594 }
1595 }
1596
1597 // Scanning for tasks
1598
1599 /**
1600 * Top-level runloop for workers, called by ForkJoinWorkerThread.run.
1601 */
1602 final void runWorker(WorkQueue w) {
1603 // initialize queue array in this thread
1604 w.array = new ForkJoinTask<?>[WorkQueue.INITIAL_QUEUE_CAPACITY];
1605 do { w.runTask(scan(w)); } while (w.qlock >= 0);
1606 }
1607
1608 /**
1609 * Scans for and, if found, returns one task, else possibly
1610 * inactivates the worker. This method operates on single reads of
1611 * volatile state and is designed to be re-invoked continuously,
1612 * in part because it returns upon detecting inconsistencies,
1613 * contention, or state changes that indicate possible success on
1614 * re-invocation.
1615 *
1616 * The scan searches for tasks across a random permutation of
1617 * queues (starting at a random index and stepping by a random
1618 * relative prime, checking each at least once). The scan
1619 * terminates upon either finding a non-empty queue, or completing
1620 * the sweep. If the worker is not inactivated, it takes and
1621 * returns a task from this queue. Otherwise, if not activated, it
1622 * signals workers (that may include itself) and returns so caller
1623 * can retry. Also returns for trtry if the worker array may have
1624 * changed during an empty scan. On failure to find a task, we
1625 * take one of the following actions, after which the caller will
1626 * retry calling this method unless terminated.
1627 *
1628 * * If pool is terminating, terminate the worker.
1629 *
1630 * * If not already enqueued, try to inactivate and enqueue the
1631 * worker on wait queue. Or, if inactivating has caused the pool
1632 * to be quiescent, relay to idleAwaitWork to check for
1633 * termination and possibly shrink pool.
1634 *
1635 * * If already enqueued and none of the above apply, possibly
1636 * (with 1/2 probability) park awaiting signal, else lingering to
1637 * help scan and signal.
1638 *
1639 * @param w the worker (via its WorkQueue)
1640 * @return a task or null if none found
1641 */
1642 private final ForkJoinTask<?> scan(WorkQueue w) {
1643 WorkQueue[] ws; WorkQueue q; // first update random seed
1644 int r = w.seed; r ^= r << 13; r ^= r >>> 17; w.seed = r ^= r << 5;
1645 int ps = plock, m; // volatile read order matters
1646 if ((ws = workQueues) != null && (m = ws.length - 1) > 0) {
1647 int ec = w.eventCount; // ec is negative if inactive
1648 int step = (r >>> 16) | 1; // relatively prime
1649 for (int j = (m + 1) << 2; ; --j, r += step) {
1650 ForkJoinTask<?> t; ForkJoinTask<?>[] a; int b, n;
1651 if ((q = ws[r & m]) != null && (b = q.base) - q.top < 0 &&
1652 (a = q.array) != null) { // probably nonempty
1653 int i = (((a.length - 1) & b) << ASHIFT) + ABASE;
1654 t = (ForkJoinTask<?>)U.getObjectVolatile(a, i);
1655 if (q.base == b && ec >= 0 && t != null &&
1656 U.compareAndSwapObject(a, i, t, null)) {
1657 if ((n = q.top - (q.base = b + 1)) > 0)
1658 signalWork(q, n);
1659 return t; // taken
1660 }
1661 if (j < m || (ec < 0 && (ec = w.eventCount) < 0)) {
1662 if ((n = q.queueSize() - 1) > 0)
1663 signalWork(q, n);
1664 break; // let caller retry after signal
1665 }
1666 }
1667 else if (j < 0) { // end of scan
1668 long c = ctl; int e;
1669 if (plock != ps) // incomplete sweep
1670 break;
1671 if ((e = (int)c) < 0) // pool is terminating
1672 w.qlock = -1;
1673 else if (ec >= 0) { // try to enqueue/inactivate
1674 long nc = ((long)ec |
1675 ((c - AC_UNIT) & (AC_MASK|TC_MASK)));
1676 w.nextWait = e;
1677 w.eventCount = ec | INT_SIGN; // mark as inactive
1678 if (ctl != c ||
1679 !U.compareAndSwapLong(this, CTL, c, nc))
1680 w.eventCount = ec; // unmark on CAS failure
1681 else if ((int)(c >> AC_SHIFT) == 1 - parallelism)
1682 idleAwaitWork(w, nc, c); // quiescent
1683 }
1684 else if (w.seed >= 0 && w.eventCount < 0) {
1685 Thread wt = Thread.currentThread();
1686 Thread.interrupted(); // clear status
1687 U.putObject(wt, PARKBLOCKER, this);
1688 w.parker = wt; // emulate LockSupport.park
1689 if (w.eventCount < 0) // recheck
1690 U.park(false, 0L);
1691 w.parker = null;
1692 U.putObject(wt, PARKBLOCKER, null);
1693 }
1694 break;
1695 }
1696 }
1697 }
1698 return null;
1699 }
1700
1701 /**
1702 * If inactivating worker w has caused the pool to become
1703 * quiescent, checks for pool termination, and, so long as this is
1704 * not the only worker, waits for event for up to a given
1705 * duration. On timeout, if ctl has not changed, terminates the
1706 * worker, which will in turn wake up another worker to possibly
1707 * repeat this process.
1708 *
1709 * @param w the calling worker
1710 * @param currentCtl the ctl value triggering possible quiescence
1711 * @param prevCtl the ctl value to restore if thread is terminated
1712 */
1713 private void idleAwaitWork(WorkQueue w, long currentCtl, long prevCtl) {
1714 if (w.eventCount < 0 &&
1715 (this == commonPool || !tryTerminate(false, false)) &&
1716 (int)prevCtl != 0) {
1717 int dc = -(short)(currentCtl >>> TC_SHIFT);
1718 long parkTime = dc < 0 ? FAST_IDLE_TIMEOUT: (dc + 1) * IDLE_TIMEOUT;
1719 long deadline = System.nanoTime() + parkTime - 100000L; // 1ms slop
1720 Thread wt = Thread.currentThread();
1721 while (ctl == currentCtl) {
1722 Thread.interrupted(); // timed variant of version in scan()
1723 U.putObject(wt, PARKBLOCKER, this);
1724 w.parker = wt;
1725 if (ctl == currentCtl)
1726 U.park(false, parkTime);
1727 w.parker = null;
1728 U.putObject(wt, PARKBLOCKER, null);
1729 if (ctl != currentCtl)
1730 break;
1731 if (deadline - System.nanoTime() <= 0L &&
1732 U.compareAndSwapLong(this, CTL, currentCtl, prevCtl)) {
1733 w.eventCount = (w.eventCount + E_SEQ) | E_MASK;
1734 w.qlock = -1; // shrink
1735 break;
1736 }
1737 }
1738 }
1739 }
1740
1741 /**
1742 * Scans through queues looking for work while joining a task;
1743 * if any are present, signals.
1744 *
1745 * @param task to return early if done
1746 * @param origin an index to start scan
1747 */
1748 final int helpSignal(ForkJoinTask<?> task, int origin) {
1749 WorkQueue[] ws; WorkQueue q; int m, n, s;
1750 if (task != null && (ws = workQueues) != null &&
1751 (m = ws.length - 1) >= 0) {
1752 for (int i = 0; i <= m; ++i) {
1753 if ((s = task.status) < 0)
1754 return s;
1755 if ((q = ws[(i + origin) & m]) != null &&
1756 (n = q.queueSize()) > 0) {
1757 signalWork(q, n);
1758 if ((int)(ctl >> AC_SHIFT) >= 0)
1759 break;
1760 }
1761 }
1762 }
1763 return 0;
1764 }
1765
1766 /**
1767 * Tries to locate and execute tasks for a stealer of the given
1768 * task, or in turn one of its stealers, Traces currentSteal ->
1769 * currentJoin links looking for a thread working on a descendant
1770 * of the given task and with a non-empty queue to steal back and
1771 * execute tasks from. The first call to this method upon a
1772 * waiting join will often entail scanning/search, (which is OK
1773 * because the joiner has nothing better to do), but this method
1774 * leaves hints in workers to speed up subsequent calls. The
1775 * implementation is very branchy to cope with potential
1776 * inconsistencies or loops encountering chains that are stale,
1777 * unknown, or so long that they are likely cyclic.
1778 *
1779 * @param joiner the joining worker
1780 * @param task the task to join
1781 * @return 0 if no progress can be made, negative if task
1782 * known complete, else positive
1783 */
1784 private int tryHelpStealer(WorkQueue joiner, ForkJoinTask<?> task) {
1785 int stat = 0, steps = 0; // bound to avoid cycles
1786 if (joiner != null && task != null) { // hoist null checks
1787 restart: for (;;) {
1788 ForkJoinTask<?> subtask = task; // current target
1789 for (WorkQueue j = joiner, v;;) { // v is stealer of subtask
1790 WorkQueue[] ws; int m, s, h;
1791 if ((s = task.status) < 0) {
1792 stat = s;
1793 break restart;
1794 }
1795 if ((ws = workQueues) == null || (m = ws.length - 1) <= 0)
1796 break restart; // shutting down
1797 if ((v = ws[h = (j.stealHint | 1) & m]) == null ||
1798 v.currentSteal != subtask) {
1799 for (int origin = h;;) { // find stealer
1800 if (((h = (h + 2) & m) & 15) == 1 &&
1801 (subtask.status < 0 || j.currentJoin != subtask))
1802 continue restart; // occasional staleness check
1803 if ((v = ws[h]) != null &&
1804 v.currentSteal == subtask) {
1805 j.stealHint = h; // save hint
1806 break;
1807 }
1808 if (h == origin)
1809 break restart; // cannot find stealer
1810 }
1811 }
1812 for (;;) { // help stealer or descend to its stealer
1813 ForkJoinTask[] a; int b;
1814 if (subtask.status < 0) // surround probes with
1815 continue restart; // consistency checks
1816 if ((b = v.base) - v.top < 0 && (a = v.array) != null) {
1817 int i = (((a.length - 1) & b) << ASHIFT) + ABASE;
1818 ForkJoinTask<?> t =
1819 (ForkJoinTask<?>)U.getObjectVolatile(a, i);
1820 if (subtask.status < 0 || j.currentJoin != subtask ||
1821 v.currentSteal != subtask)
1822 continue restart; // stale
1823 stat = 1; // apparent progress
1824 if (t != null && v.base == b &&
1825 U.compareAndSwapObject(a, i, t, null)) {
1826 v.base = b + 1; // help stealer
1827 joiner.runSubtask(t);
1828 }
1829 else if (v.base == b && ++steps == MAX_HELP)
1830 break restart; // v apparently stalled
1831 }
1832 else { // empty -- try to descend
1833 ForkJoinTask<?> next = v.currentJoin;
1834 if (subtask.status < 0 || j.currentJoin != subtask ||
1835 v.currentSteal != subtask)
1836 continue restart; // stale
1837 else if (next == null || ++steps == MAX_HELP)
1838 break restart; // dead-end or maybe cyclic
1839 else {
1840 subtask = next;
1841 j = v;
1842 break;
1843 }
1844 }
1845 }
1846 }
1847 }
1848 }
1849 return stat;
1850 }
1851
1852 /**
1853 * Analog of tryHelpStealer for CountedCompleters. Tries to steal
1854 * and run tasks within the target's computation.
1855 *
1856 * @param task the task to join
1857 * @param mode if shared, exit upon completing any task
1858 * if all workers are active
1859 *
1860 */
1861 private int helpComplete(ForkJoinTask<?> task, int mode) {
1862 WorkQueue[] ws; WorkQueue q; int m, n, s;
1863 if (task != null && (ws = workQueues) != null &&
1864 (m = ws.length - 1) >= 0) {
1865 for (int j = 1, origin = j;;) {
1866 if ((s = task.status) < 0)
1867 return s;
1868 if ((q = ws[j & m]) != null && q.pollAndExecCC(task)) {
1869 origin = j;
1870 if (mode == SHARED_QUEUE && (int)(ctl >> AC_SHIFT) >= 0)
1871 break;
1872 }
1873 else if ((j = (j + 2) & m) == origin)
1874 break;
1875 }
1876 }
1877 return 0;
1878 }
1879
1880 /**
1881 * Tries to decrement active count (sometimes implicitly) and
1882 * possibly release or create a compensating worker in preparation
1883 * for blocking. Fails on contention or termination. Otherwise,
1884 * adds a new thread if no idle workers are available and pool
1885 * may become starved.
1886 */
1887 final boolean tryCompensate() {
1888 int pc = parallelism, e, u, i, tc; long c;
1889 WorkQueue[] ws; WorkQueue w; Thread p;
1890 if ((e = (int)(c = ctl)) >= 0 && (ws = workQueues) != null) {
1891 if (e != 0 && (i = e & SMASK) < ws.length &&
1892 (w = ws[i]) != null && w.eventCount == (e | INT_SIGN)) {
1893 long nc = ((long)(w.nextWait & E_MASK) |
1894 (c & (AC_MASK|TC_MASK)));
1895 if (U.compareAndSwapLong(this, CTL, c, nc)) {
1896 w.eventCount = (e + E_SEQ) & E_MASK;
1897 if ((p = w.parker) != null)
1898 U.unpark(p);
1899 return true; // replace with idle worker
1900 }
1901 }
1902 else if ((short)((u = (int)(c >>> 32)) >>> UTC_SHIFT) >= 0 &&
1903 (u >> UAC_SHIFT) + pc > 1) {
1904 long nc = ((c - AC_UNIT) & AC_MASK) | (c & ~AC_MASK);
1905 if (U.compareAndSwapLong(this, CTL, c, nc))
1906 return true; // no compensation
1907 }
1908 else if ((tc = u + pc) < MAX_CAP) {
1909 long nc = ((c + TC_UNIT) & TC_MASK) | (c & ~TC_MASK);
1910 if (U.compareAndSwapLong(this, CTL, c, nc)) {
1911 Throwable ex = null;
1912 ForkJoinWorkerThread wt = null;
1913 try {
1914 ForkJoinWorkerThreadFactory fac;
1915 if ((fac = factory) != null &&
1916 (wt = fac.newThread(this)) != null) {
1917 wt.start();
1918 return true;
1919 }
1920 } catch (Throwable rex) {
1921 ex = rex;
1922 }
1923 deregisterWorker(wt, ex); // adjust counts etc
1924 }
1925 }
1926 }
1927 return false;
1928 }
1929
1930 /**
1931 * Helps and/or blocks until the given task is done.
1932 *
1933 * @param joiner the joining worker
1934 * @param task the task
1935 * @return task status on exit
1936 */
1937 final int awaitJoin(WorkQueue joiner, ForkJoinTask<?> task) {
1938 int s = 0;
1939 if (joiner != null && task != null && (s = task.status) >= 0) {
1940 ForkJoinTask<?> prevJoin = joiner.currentJoin;
1941 joiner.currentJoin = task;
1942 do {} while ((s = task.status) >= 0 &&
1943 joiner.queueSize() > 0 &&
1944 joiner.tryRemoveAndExec(task)); // process local tasks
1945 if (s >= 0 && (s = task.status) >= 0 &&
1946 (s = helpSignal(task, joiner.poolIndex)) >= 0 &&
1947 (task instanceof CountedCompleter))
1948 s = helpComplete(task, LIFO_QUEUE);
1949 while (s >= 0 && (s = task.status) >= 0) {
1950 if ((joiner.queueSize() > 0 || // try helping
1951 (s = tryHelpStealer(joiner, task)) == 0) &&
1952 (s = task.status) >= 0 && tryCompensate()) {
1953 if (task.trySetSignal() && (s = task.status) >= 0) {
1954 synchronized (task) {
1955 if (task.status >= 0) {
1956 try { // see ForkJoinTask
1957 task.wait(); // for explanation
1958 } catch (InterruptedException ie) {
1959 }
1960 }
1961 else
1962 task.notifyAll();
1963 }
1964 }
1965 long c; // re-activate
1966 do {} while (!U.compareAndSwapLong
1967 (this, CTL, c = ctl, c + AC_UNIT));
1968 }
1969 }
1970 joiner.currentJoin = prevJoin;
1971 }
1972 return s;
1973 }
1974
1975 /**
1976 * Stripped-down variant of awaitJoin used by timed joins. Tries
1977 * to help join only while there is continuous progress. (Caller
1978 * will then enter a timed wait.)
1979 *
1980 * @param joiner the joining worker
1981 * @param task the task
1982 */
1983 final void helpJoinOnce(WorkQueue joiner, ForkJoinTask<?> task) {
1984 int s;
1985 if (joiner != null && task != null && (s = task.status) >= 0) {
1986 ForkJoinTask<?> prevJoin = joiner.currentJoin;
1987 joiner.currentJoin = task;
1988 do {} while ((s = task.status) >= 0 &&
1989 joiner.queueSize() > 0 &&
1990 joiner.tryRemoveAndExec(task));
1991 if (s >= 0 && (s = task.status) >= 0 &&
1992 (s = helpSignal(task, joiner.poolIndex)) >= 0 &&
1993 (task instanceof CountedCompleter))
1994 s = helpComplete(task, LIFO_QUEUE);
1995 if (s >= 0 && joiner.queueSize() == 0) {
1996 do {} while (task.status >= 0 &&
1997 tryHelpStealer(joiner, task) > 0);
1998 }
1999 joiner.currentJoin = prevJoin;
2000 }
2001 }
2002
2003 /**
2004 * Returns a (probably) non-empty steal queue, if one is found
2005 * during a random, then cyclic scan, else null. This method must
2006 * be retried by caller if, by the time it tries to use the queue,
2007 * it is empty.
2008 * @param r a (random) seed for scanning
2009 */
2010 private WorkQueue findNonEmptyStealQueue(int r) {
2011 int step = (r >>> 16) | 1;
2012 for (WorkQueue[] ws;;) {
2013 int ps = plock, m;
2014 if ((ws = workQueues) == null || (m = ws.length - 1) < 1)
2015 return null;
2016 for (int j = (m + 1) << 2; ; r += step) {
2017 WorkQueue q = ws[((r << 1) | 1) & m];
2018 if (q != null && q.queueSize() > 0)
2019 return q;
2020 else if (--j < 0) {
2021 if (plock == ps)
2022 return null;
2023 break;
2024 }
2025 }
2026 }
2027 }
2028
2029 /**
2030 * Runs tasks until {@code isQuiescent()}. We piggyback on
2031 * active count ctl maintenance, but rather than blocking
2032 * when tasks cannot be found, we rescan until all others cannot
2033 * find tasks either.
2034 */
2035 final void helpQuiescePool(WorkQueue w) {
2036 for (boolean active = true;;) {
2037 ForkJoinTask<?> localTask; // exhaust local queue
2038 while ((localTask = w.nextLocalTask()) != null)
2039 localTask.doExec();
2040 // Similar to loop in scan(), but ignoring submissions
2041 WorkQueue q = findNonEmptyStealQueue(w.nextSeed());
2042 if (q != null) {
2043 ForkJoinTask<?> t; int b;
2044 if (!active) { // re-establish active count
2045 long c;
2046 active = true;
2047 do {} while (!U.compareAndSwapLong
2048 (this, CTL, c = ctl, c + AC_UNIT));
2049 }
2050 if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null)
2051 w.runSubtask(t);
2052 }
2053 else {
2054 long c;
2055 if (active) { // decrement active count without queuing
2056 active = false;
2057 do {} while (!U.compareAndSwapLong
2058 (this, CTL, c = ctl, c -= AC_UNIT));
2059 }
2060 else
2061 c = ctl; // re-increment on exit
2062 if ((int)(c >> AC_SHIFT) + parallelism == 0) {
2063 do {} while (!U.compareAndSwapLong
2064 (this, CTL, c = ctl, c + AC_UNIT));
2065 break;
2066 }
2067 }
2068 }
2069 }
2070
2071 /**
2072 * Gets and removes a local or stolen task for the given worker.
2073 *
2074 * @return a task, if available
2075 */
2076 final ForkJoinTask<?> nextTaskFor(WorkQueue w) {
2077 for (ForkJoinTask<?> t;;) {
2078 WorkQueue q; int b;
2079 if ((t = w.nextLocalTask()) != null)
2080 return t;
2081 if ((q = findNonEmptyStealQueue(w.nextSeed())) == null)
2082 return null;
2083 if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null)
2084 return t;
2085 }
2086 }
2087
2088 /**
2089 * Returns a cheap heuristic guide for task partitioning when
2090 * programmers, frameworks, tools, or languages have little or no
2091 * idea about task granularity. In essence by offering this
2092 * method, we ask users only about tradeoffs in overhead vs
2093 * expected throughput and its variance, rather than how finely to
2094 * partition tasks.
2095 *
2096 * In a steady state strict (tree-structured) computation, each
2097 * thread makes available for stealing enough tasks for other
2098 * threads to remain active. Inductively, if all threads play by
2099 * the same rules, each thread should make available only a
2100 * constant number of tasks.
2101 *
2102 * The minimum useful constant is just 1. But using a value of 1
2103 * would require immediate replenishment upon each steal to
2104 * maintain enough tasks, which is infeasible. Further,
2105 * partitionings/granularities of offered tasks should minimize
2106 * steal rates, which in general means that threads nearer the top
2107 * of computation tree should generate more than those nearer the
2108 * bottom. In perfect steady state, each thread is at
2109 * approximately the same level of computation tree. However,
2110 * producing extra tasks amortizes the uncertainty of progress and
2111 * diffusion assumptions.
2112 *
2113 * So, users will want to use values larger, but not much larger
2114 * than 1 to both smooth over transient shortages and hedge
2115 * against uneven progress; as traded off against the cost of
2116 * extra task overhead. We leave the user to pick a threshold
2117 * value to compare with the results of this call to guide
2118 * decisions, but recommend values such as 3.
2119 *
2120 * When all threads are active, it is on average OK to estimate
2121 * surplus strictly locally. In steady-state, if one thread is
2122 * maintaining say 2 surplus tasks, then so are others. So we can
2123 * just use estimated queue length. However, this strategy alone
2124 * leads to serious mis-estimates in some non-steady-state
2125 * conditions (ramp-up, ramp-down, other stalls). We can detect
2126 * many of these by further considering the number of "idle"
2127 * threads, that are known to have zero queued tasks, so
2128 * compensate by a factor of (#idle/#active) threads.
2129 *
2130 * Note: The approximation of #busy workers as #active workers is
2131 * not very good under current signalling scheme, and should be
2132 * improved.
2133 */
2134 static int getSurplusQueuedTaskCount() {
2135 Thread t; ForkJoinWorkerThread wt; ForkJoinPool pool; WorkQueue q;
2136 if (((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)) {
2137 int b = (q = (wt = (ForkJoinWorkerThread)t).workQueue).base;
2138 int p = (pool = wt.pool).parallelism;
2139 int a = (int)(pool.ctl >> AC_SHIFT) + p;
2140 return q.top - b - (a > (p >>>= 1) ? 0 :
2141 a > (p >>>= 1) ? 1 :
2142 a > (p >>>= 1) ? 2 :
2143 a > (p >>>= 1) ? 4 :
2144 8);
2145 }
2146 return 0;
2147 }
2148
2149 // Termination
2150
2151 /**
2152 * Possibly initiates and/or completes termination. The caller
2153 * triggering termination runs three passes through workQueues:
2154 * (0) Setting termination status, followed by wakeups of queued
2155 * workers; (1) cancelling all tasks; (2) interrupting lagging
2156 * threads (likely in external tasks, but possibly also blocked in
2157 * joins). Each pass repeats previous steps because of potential
2158 * lagging thread creation.
2159 *
2160 * @param now if true, unconditionally terminate, else only
2161 * if no work and no active workers
2162 * @param enable if true, enable shutdown when next possible
2163 * @return true if now terminating or terminated
2164 */
2165 private boolean tryTerminate(boolean now, boolean enable) {
2166 if (this == commonPool) // cannot shut down
2167 return false;
2168 for (long c;;) {
2169 if (((c = ctl) & STOP_BIT) != 0) { // already terminating
2170 if ((short)(c >>> TC_SHIFT) == -parallelism) {
2171 synchronized (this) {
2172 notifyAll(); // signal when 0 workers
2173 }
2174 }
2175 return true;
2176 }
2177 if (plock >= 0) { // not yet enabled
2178 int ps;
2179 if (!enable)
2180 return false;
2181 if (((ps = plock) & PL_LOCK) != 0 ||
2182 !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
2183 ps = acquirePlock();
2184 int nps = SHUTDOWN;
2185 if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
2186 releasePlock(nps);
2187 }
2188 if (!now) { // check if idle & no tasks
2189 if ((int)(c >> AC_SHIFT) != -parallelism ||
2190 hasQueuedSubmissions())
2191 return false;
2192 // Check for unqueued inactive workers. One pass suffices.
2193 WorkQueue[] ws = workQueues; WorkQueue w;
2194 if (ws != null) {
2195 for (int i = 1; i < ws.length; i += 2) {
2196 if ((w = ws[i]) != null && w.eventCount >= 0)
2197 return false;
2198 }
2199 }
2200 }
2201 if (U.compareAndSwapLong(this, CTL, c, c | STOP_BIT)) {
2202 for (int pass = 0; pass < 3; ++pass) {
2203 WorkQueue[] ws = workQueues;
2204 if (ws != null) {
2205 WorkQueue w;
2206 int n = ws.length;
2207 for (int i = 0; i < n; ++i) {
2208 if ((w = ws[i]) != null) {
2209 w.qlock = -1;
2210 if (pass > 0) {
2211 w.cancelAll();
2212 if (pass > 1)
2213 w.interruptOwner();
2214 }
2215 }
2216 }
2217 // Wake up workers parked on event queue
2218 int i, e; long cc; Thread p;
2219 while ((e = (int)(cc = ctl) & E_MASK) != 0 &&
2220 (i = e & SMASK) < n &&
2221 (w = ws[i]) != null) {
2222 long nc = ((long)(w.nextWait & E_MASK) |
2223 ((cc + AC_UNIT) & AC_MASK) |
2224 (cc & (TC_MASK|STOP_BIT)));
2225 if (w.eventCount == (e | INT_SIGN) &&
2226 U.compareAndSwapLong(this, CTL, cc, nc)) {
2227 w.eventCount = (e + E_SEQ) & E_MASK;
2228 w.qlock = -1;
2229 if ((p = w.parker) != null)
2230 U.unpark(p);
2231 }
2232 }
2233 }
2234 }
2235 }
2236 }
2237 }
2238
2239 // external operations on common pool
2240
2241 /**
2242 * Returns common pool queue for a thread that has submitted at
2243 * least one task.
2244 */
2245 static WorkQueue commonSubmitterQueue() {
2246 ForkJoinPool p; WorkQueue[] ws; int m; Submitter z;
2247 return ((z = submitters.get()) != null &&
2248 (p = commonPool) != null &&
2249 (ws = p.workQueues) != null &&
2250 (m = ws.length - 1) >= 0) ?
2251 ws[m & z.seed & SQMASK] : null;
2252 }
2253
2254 /**
2255 * Tries to pop the given task from submitter's queue in common pool.
2256 */
2257 static boolean tryExternalUnpush(ForkJoinTask<?> t) {
2258 ForkJoinPool p; WorkQueue[] ws; WorkQueue q; Submitter z;
2259 ForkJoinTask<?>[] a; int m, s; long j;
2260 if ((z = submitters.get()) != null &&
2261 (p = commonPool) != null &&
2262 (ws = p.workQueues) != null &&
2263 (m = ws.length - 1) >= 0 &&
2264 (q = ws[m & z.seed & SQMASK]) != null &&
2265 (s = q.top) != q.base &&
2266 (a = q.array) != null &&
2267 U.getObjectVolatile
2268 (a, j = (((a.length - 1) & (s - 1)) << ASHIFT) + ABASE) == t &&
2269 U.compareAndSwapInt(q, QLOCK, 0, 1)) {
2270 if (q.array == a && q.top == s && // recheck
2271 U.compareAndSwapObject(a, j, t, null)) {
2272 q.top = s - 1;
2273 q.qlock = 0;
2274 return true;
2275 }
2276 q.qlock = 0;
2277 }
2278 return false;
2279 }
2280
2281 /**
2282 * Tries to pop and run local tasks within the same computation
2283 * as the given root. On failure, tries to help complete from
2284 * other queues via helpComplete.
2285 */
2286 private void externalHelpComplete(WorkQueue q, ForkJoinTask<?> root) {
2287 ForkJoinTask<?>[] a; int m;
2288 if (q != null && (a = q.array) != null && (m = (a.length - 1)) >= 0 &&
2289 root != null && root.status >= 0) {
2290 for (;;) {
2291 int s; Object o; CountedCompleter<?> task = null;
2292 if ((s = q.top) - q.base > 0) {
2293 long j = ((m & (s - 1)) << ASHIFT) + ABASE;
2294 if ((o = U.getObject(a, j)) != null &&
2295 (o instanceof CountedCompleter)) {
2296 CountedCompleter<?> t = (CountedCompleter<?>)o, r = t;
2297 do {
2298 if (r == root) {
2299 if (U.compareAndSwapInt(q, QLOCK, 0, 1)) {
2300 if (q.array == a && q.top == s &&
2301 U.compareAndSwapObject(a, j, t, null)) {
2302 q.top = s - 1;
2303 task = t;
2304 }
2305 q.qlock = 0;
2306 }
2307 break;
2308 }
2309 } while ((r = r.completer) != null);
2310 }
2311 }
2312 if (task != null)
2313 task.doExec();
2314 if (root.status < 0 || (int)(ctl >> AC_SHIFT) >= 0)
2315 break;
2316 if (task == null) {
2317 if (helpSignal(root, q.poolIndex) >= 0)
2318 helpComplete(root, SHARED_QUEUE);
2319 break;
2320 }
2321 }
2322 }
2323 }
2324
2325 /**
2326 * Tries to help execute or signal availability of the given task
2327 * from submitter's queue in common pool.
2328 */
2329 static void externalHelpJoin(ForkJoinTask<?> t) {
2330 // Some hard-to-avoid overlap with tryExternalUnpush
2331 ForkJoinPool p; WorkQueue[] ws; WorkQueue q, w; Submitter z;
2332 ForkJoinTask<?>[] a; int m, s, n; long j;
2333 if (t != null && t.status >= 0 &&
2334 (z = submitters.get()) != null &&
2335 (p = commonPool) != null &&
2336 (ws = p.workQueues) != null &&
2337 (m = ws.length - 1) >= 0 &&
2338 (q = ws[m & z.seed & SQMASK]) != null &&
2339 (a = q.array) != null) {
2340 if ((s = q.top) != q.base &&
2341 U.getObjectVolatile
2342 (a, j = (((a.length - 1) & (s - 1)) << ASHIFT) + ABASE) == t &&
2343 U.compareAndSwapInt(q, QLOCK, 0, 1)) {
2344 if (q.array == a && q.top == s &&
2345 U.compareAndSwapObject(a, j, t, null)) {
2346 q.top = s - 1;
2347 q.qlock = 0;
2348 t.doExec();
2349 }
2350 else
2351 q.qlock = 0;
2352 }
2353 if (t.status >= 0) {
2354 if (t instanceof CountedCompleter)
2355 p.externalHelpComplete(q, t);
2356 else
2357 p.helpSignal(t, q.poolIndex);
2358 }
2359 }
2360 }
2361
2362 /**
2363 * Restricted version of helpQuiescePool for external callers
2364 */
2365 static void externalHelpQuiescePool() {
2366 ForkJoinPool p; ForkJoinTask<?> t; WorkQueue q; int b;
2367 int r = ThreadLocalRandom.current().nextInt();
2368 if ((p = commonPool) != null &&
2369 (q = p.findNonEmptyStealQueue(r)) != null &&
2370 (b = q.base) - q.top < 0 &&
2371 (t = q.pollAt(b)) != null)
2372 t.doExec();
2373 }
2374
2375 // Exported methods
2376
2377 // Constructors
2378
2379 /**
2380 * Creates a {@code ForkJoinPool} with parallelism equal to {@link
2381 * java.lang.Runtime#availableProcessors}, using the {@linkplain
2382 * #defaultForkJoinWorkerThreadFactory default thread factory},
2383 * no UncaughtExceptionHandler, and non-async LIFO processing mode.
2384 *
2385 * @throws SecurityException if a security manager exists and
2386 * the caller is not permitted to modify threads
2387 * because it does not hold {@link
2388 * java.lang.RuntimePermission}{@code ("modifyThread")}
2389 */
2390 public ForkJoinPool() {
2391 this(Runtime.getRuntime().availableProcessors(),
2392 defaultForkJoinWorkerThreadFactory, null, false);
2393 }
2394
2395 /**
2396 * Creates a {@code ForkJoinPool} with the indicated parallelism
2397 * level, the {@linkplain
2398 * #defaultForkJoinWorkerThreadFactory default thread factory},
2399 * no UncaughtExceptionHandler, and non-async LIFO processing mode.
2400 *
2401 * @param parallelism the parallelism level
2402 * @throws IllegalArgumentException if parallelism less than or
2403 * equal to zero, or greater than implementation limit
2404 * @throws SecurityException if a security manager exists and
2405 * the caller is not permitted to modify threads
2406 * because it does not hold {@link
2407 * java.lang.RuntimePermission}{@code ("modifyThread")}
2408 */
2409 public ForkJoinPool(int parallelism) {
2410 this(parallelism, defaultForkJoinWorkerThreadFactory, null, false);
2411 }
2412
2413 /**
2414 * Creates a {@code ForkJoinPool} with the given parameters.
2415 *
2416 * @param parallelism the parallelism level. For default value,
2417 * use {@link java.lang.Runtime#availableProcessors}.
2418 * @param factory the factory for creating new threads. For default value,
2419 * use {@link #defaultForkJoinWorkerThreadFactory}.
2420 * @param handler the handler for internal worker threads that
2421 * terminate due to unrecoverable errors encountered while executing
2422 * tasks. For default value, use {@code null}.
2423 * @param asyncMode if true,
2424 * establishes local first-in-first-out scheduling mode for forked
2425 * tasks that are never joined. This mode may be more appropriate
2426 * than default locally stack-based mode in applications in which
2427 * worker threads only process event-style asynchronous tasks.
2428 * For default value, use {@code false}.
2429 * @throws IllegalArgumentException if parallelism less than or
2430 * equal to zero, or greater than implementation limit
2431 * @throws NullPointerException if the factory is null
2432 * @throws SecurityException if a security manager exists and
2433 * the caller is not permitted to modify threads
2434 * because it does not hold {@link
2435 * java.lang.RuntimePermission}{@code ("modifyThread")}
2436 */
2437 public ForkJoinPool(int parallelism,
2438 ForkJoinWorkerThreadFactory factory,
2439 Thread.UncaughtExceptionHandler handler,
2440 boolean asyncMode) {
2441 checkPermission();
2442 if (factory == null)
2443 throw new NullPointerException();
2444 if (parallelism <= 0 || parallelism > MAX_CAP)
2445 throw new IllegalArgumentException();
2446 this.parallelism = parallelism;
2447 this.factory = factory;
2448 this.ueh = handler;
2449 this.localMode = asyncMode ? FIFO_QUEUE : LIFO_QUEUE;
2450 long np = (long)(-parallelism); // offset ctl counts
2451 this.ctl = ((np << AC_SHIFT) & AC_MASK) | ((np << TC_SHIFT) & TC_MASK);
2452 int pn = nextPoolId();
2453 StringBuilder sb = new StringBuilder("ForkJoinPool-");
2454 sb.append(Integer.toString(pn));
2455 sb.append("-worker-");
2456 this.workerNamePrefix = sb.toString();
2457 }
2458
2459 /**
2460 * Constructor for common pool, suitable only for static initialization.
2461 * Basically the same as above, but uses smallest possible initial footprint.
2462 */
2463 ForkJoinPool(int parallelism, long ctl,
2464 ForkJoinWorkerThreadFactory factory,
2465 Thread.UncaughtExceptionHandler handler) {
2466 this.parallelism = parallelism;
2467 this.ctl = ctl;
2468 this.factory = factory;
2469 this.ueh = handler;
2470 this.localMode = LIFO_QUEUE;
2471 this.workerNamePrefix = "ForkJoinPool.commonPool-worker-";
2472 }
2473
2474 /**
2475 * Returns the common pool instance.
2476 *
2477 * @return the common pool instance
2478 */
2479 public static ForkJoinPool commonPool() {
2480 return commonPool; // cannot be null (if so, a static init error)
2481 }
2482
2483 // Execution methods
2484
2485 /**
2486 * Performs the given task, returning its result upon completion.
2487 * If the computation encounters an unchecked Exception or Error,
2488 * it is rethrown as the outcome of this invocation. Rethrown
2489 * exceptions behave in the same way as regular exceptions, but,
2490 * when possible, contain stack traces (as displayed for example
2491 * using {@code ex.printStackTrace()}) of both the current thread
2492 * as well as the thread actually encountering the exception;
2493 * minimally only the latter.
2494 *
2495 * @param task the task
2496 * @return the task's result
2497 * @throws NullPointerException if the task is null
2498 * @throws RejectedExecutionException if the task cannot be
2499 * scheduled for execution
2500 */
2501 public <T> T invoke(ForkJoinTask<T> task) {
2502 if (task == null)
2503 throw new NullPointerException();
2504 externalPush(task);
2505 return task.join();
2506 }
2507
2508 /**
2509 * Arranges for (asynchronous) execution of the given task.
2510 *
2511 * @param task the task
2512 * @throws NullPointerException if the task is null
2513 * @throws RejectedExecutionException if the task cannot be
2514 * scheduled for execution
2515 */
2516 public void execute(ForkJoinTask<?> task) {
2517 if (task == null)
2518 throw new NullPointerException();
2519 externalPush(task);
2520 }
2521
2522 // AbstractExecutorService methods
2523
2524 /**
2525 * @throws NullPointerException if the task is null
2526 * @throws RejectedExecutionException if the task cannot be
2527 * scheduled for execution
2528 */
2529 public void execute(Runnable task) {
2530 if (task == null)
2531 throw new NullPointerException();
2532 ForkJoinTask<?> job;
2533 if (task instanceof ForkJoinTask<?>) // avoid re-wrap
2534 job = (ForkJoinTask<?>) task;
2535 else
2536 job = new ForkJoinTask.AdaptedRunnableAction(task);
2537 externalPush(job);
2538 }
2539
2540 /**
2541 * Submits a ForkJoinTask for execution.
2542 *
2543 * @param task the task to submit
2544 * @return the task
2545 * @throws NullPointerException if the task is null
2546 * @throws RejectedExecutionException if the task cannot be
2547 * scheduled for execution
2548 */
2549 public <T> ForkJoinTask<T> submit(ForkJoinTask<T> task) {
2550 if (task == null)
2551 throw new NullPointerException();
2552 externalPush(task);
2553 return task;
2554 }
2555
2556 /**
2557 * @throws NullPointerException if the task is null
2558 * @throws RejectedExecutionException if the task cannot be
2559 * scheduled for execution
2560 */
2561 public <T> ForkJoinTask<T> submit(Callable<T> task) {
2562 ForkJoinTask<T> job = new ForkJoinTask.AdaptedCallable<T>(task);
2563 externalPush(job);
2564 return job;
2565 }
2566
2567 /**
2568 * @throws NullPointerException if the task is null
2569 * @throws RejectedExecutionException if the task cannot be
2570 * scheduled for execution
2571 */
2572 public <T> ForkJoinTask<T> submit(Runnable task, T result) {
2573 ForkJoinTask<T> job = new ForkJoinTask.AdaptedRunnable<T>(task, result);
2574 externalPush(job);
2575 return job;
2576 }
2577
2578 /**
2579 * @throws NullPointerException if the task is null
2580 * @throws RejectedExecutionException if the task cannot be
2581 * scheduled for execution
2582 */
2583 public ForkJoinTask<?> submit(Runnable task) {
2584 if (task == null)
2585 throw new NullPointerException();
2586 ForkJoinTask<?> job;
2587 if (task instanceof ForkJoinTask<?>) // avoid re-wrap
2588 job = (ForkJoinTask<?>) task;
2589 else
2590 job = new ForkJoinTask.AdaptedRunnableAction(task);
2591 externalPush(job);
2592 return job;
2593 }
2594
2595 /**
2596 * @throws NullPointerException {@inheritDoc}
2597 * @throws RejectedExecutionException {@inheritDoc}
2598 */
2599 public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) {
2600 // In previous versions of this class, this method constructed
2601 // a task to run ForkJoinTask.invokeAll, but now external
2602 // invocation of multiple tasks is at least as efficient.
2603 List<ForkJoinTask<T>> fs = new ArrayList<ForkJoinTask<T>>(tasks.size());
2604 // Workaround needed because method wasn't declared with
2605 // wildcards in return type but should have been.
2606 @SuppressWarnings({"unchecked", "rawtypes"})
2607 List<Future<T>> futures = (List<Future<T>>) (List) fs;
2608
2609 boolean done = false;
2610 try {
2611 for (Callable<T> t : tasks) {
2612 ForkJoinTask<T> f = new ForkJoinTask.AdaptedCallable<T>(t);
2613 externalPush(f);
2614 fs.add(f);
2615 }
2616 for (ForkJoinTask<T> f : fs)
2617 f.quietlyJoin();
2618 done = true;
2619 return futures;
2620 } finally {
2621 if (!done)
2622 for (ForkJoinTask<T> f : fs)
2623 f.cancel(false);
2624 }
2625 }
2626
2627 /**
2628 * Returns the factory used for constructing new workers.
2629 *
2630 * @return the factory used for constructing new workers
2631 */
2632 public ForkJoinWorkerThreadFactory getFactory() {
2633 return factory;
2634 }
2635
2636 /**
2637 * Returns the handler for internal worker threads that terminate
2638 * due to unrecoverable errors encountered while executing tasks.
2639 *
2640 * @return the handler, or {@code null} if none
2641 */
2642 public Thread.UncaughtExceptionHandler getUncaughtExceptionHandler() {
2643 return ueh;
2644 }
2645
2646 /**
2647 * Returns the targeted parallelism level of this pool.
2648 *
2649 * @return the targeted parallelism level of this pool
2650 */
2651 public int getParallelism() {
2652 return parallelism;
2653 }
2654
2655 /**
2656 * Returns the targeted parallelism level of the common pool.
2657 *
2658 * @return the targeted parallelism level of the common pool
2659 */
2660 public static int getCommonPoolParallelism() {
2661 return commonPoolParallelism;
2662 }
2663
2664 /**
2665 * Returns the number of worker threads that have started but not
2666 * yet terminated. The result returned by this method may differ
2667 * from {@link #getParallelism} when threads are created to
2668 * maintain parallelism when others are cooperatively blocked.
2669 *
2670 * @return the number of worker threads
2671 */
2672 public int getPoolSize() {
2673 return parallelism + (short)(ctl >>> TC_SHIFT);
2674 }
2675
2676 /**
2677 * Returns {@code true} if this pool uses local first-in-first-out
2678 * scheduling mode for forked tasks that are never joined.
2679 *
2680 * @return {@code true} if this pool uses async mode
2681 */
2682 public boolean getAsyncMode() {
2683 return localMode != 0;
2684 }
2685
2686 /**
2687 * Returns an estimate of the number of worker threads that are
2688 * not blocked waiting to join tasks or for other managed
2689 * synchronization. This method may overestimate the
2690 * number of running threads.
2691 *
2692 * @return the number of worker threads
2693 */
2694 public int getRunningThreadCount() {
2695 int rc = 0;
2696 WorkQueue[] ws; WorkQueue w;
2697 if ((ws = workQueues) != null) {
2698 for (int i = 1; i < ws.length; i += 2) {
2699 if ((w = ws[i]) != null && w.isApparentlyUnblocked())
2700 ++rc;
2701 }
2702 }
2703 return rc;
2704 }
2705
2706 /**
2707 * Returns an estimate of the number of threads that are currently
2708 * stealing or executing tasks. This method may overestimate the
2709 * number of active threads.
2710 *
2711 * @return the number of active threads
2712 */
2713 public int getActiveThreadCount() {
2714 int r = parallelism + (int)(ctl >> AC_SHIFT);
2715 return (r <= 0) ? 0 : r; // suppress momentarily negative values
2716 }
2717
2718 /**
2719 * Returns {@code true} if all worker threads are currently idle.
2720 * An idle worker is one that cannot obtain a task to execute
2721 * because none are available to steal from other threads, and
2722 * there are no pending submissions to the pool. This method is
2723 * conservative; it might not return {@code true} immediately upon
2724 * idleness of all threads, but will eventually become true if
2725 * threads remain inactive.
2726 *
2727 * @return {@code true} if all threads are currently idle
2728 */
2729 public boolean isQuiescent() {
2730 return (int)(ctl >> AC_SHIFT) + parallelism == 0;
2731 }
2732
2733 /**
2734 * Returns an estimate of the total number of tasks stolen from
2735 * one thread's work queue by another. The reported value
2736 * underestimates the actual total number of steals when the pool
2737 * is not quiescent. This value may be useful for monitoring and
2738 * tuning fork/join programs: in general, steal counts should be
2739 * high enough to keep threads busy, but low enough to avoid
2740 * overhead and contention across threads.
2741 *
2742 * @return the number of steals
2743 */
2744 public long getStealCount() {
2745 long count = stealCount;
2746 WorkQueue[] ws; WorkQueue w;
2747 if ((ws = workQueues) != null) {
2748 for (int i = 1; i < ws.length; i += 2) {
2749 if ((w = ws[i]) != null)
2750 count += w.nsteals;
2751 }
2752 }
2753 return count;
2754 }
2755
2756 /**
2757 * Returns an estimate of the total number of tasks currently held
2758 * in queues by worker threads (but not including tasks submitted
2759 * to the pool that have not begun executing). This value is only
2760 * an approximation, obtained by iterating across all threads in
2761 * the pool. This method may be useful for tuning task
2762 * granularities.
2763 *
2764 * @return the number of queued tasks
2765 */
2766 public long getQueuedTaskCount() {
2767 long count = 0;
2768 WorkQueue[] ws; WorkQueue w;
2769 if ((ws = workQueues) != null) {
2770 for (int i = 1; i < ws.length; i += 2) {
2771 if ((w = ws[i]) != null)
2772 count += w.queueSize();
2773 }
2774 }
2775 return count;
2776 }
2777
2778 /**
2779 * Returns an estimate of the number of tasks submitted to this
2780 * pool that have not yet begun executing. This method may take
2781 * time proportional to the number of submissions.
2782 *
2783 * @return the number of queued submissions
2784 */
2785 public int getQueuedSubmissionCount() {
2786 int count = 0;
2787 WorkQueue[] ws; WorkQueue w;
2788 if ((ws = workQueues) != null) {
2789 for (int i = 0; i < ws.length; i += 2) {
2790 if ((w = ws[i]) != null)
2791 count += w.queueSize();
2792 }
2793 }
2794 return count;
2795 }
2796
2797 /**
2798 * Returns {@code true} if there are any tasks submitted to this
2799 * pool that have not yet begun executing.
2800 *
2801 * @return {@code true} if there are any queued submissions
2802 */
2803 public boolean hasQueuedSubmissions() {
2804 WorkQueue[] ws; WorkQueue w;
2805 if ((ws = workQueues) != null) {
2806 for (int i = 0; i < ws.length; i += 2) {
2807 if ((w = ws[i]) != null && w.queueSize() != 0)
2808 return true;
2809 }
2810 }
2811 return false;
2812 }
2813
2814 /**
2815 * Removes and returns the next unexecuted submission if one is
2816 * available. This method may be useful in extensions to this
2817 * class that re-assign work in systems with multiple pools.
2818 *
2819 * @return the next submission, or {@code null} if none
2820 */
2821 protected ForkJoinTask<?> pollSubmission() {
2822 WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t;
2823 if ((ws = workQueues) != null) {
2824 for (int i = 0; i < ws.length; i += 2) {
2825 if ((w = ws[i]) != null && (t = w.poll()) != null)
2826 return t;
2827 }
2828 }
2829 return null;
2830 }
2831
2832 /**
2833 * Removes all available unexecuted submitted and forked tasks
2834 * from scheduling queues and adds them to the given collection,
2835 * without altering their execution status. These may include
2836 * artificially generated or wrapped tasks. This method is
2837 * designed to be invoked only when the pool is known to be
2838 * quiescent. Invocations at other times may not remove all
2839 * tasks. A failure encountered while attempting to add elements
2840 * to collection {@code c} may result in elements being in
2841 * neither, either or both collections when the associated
2842 * exception is thrown. The behavior of this operation is
2843 * undefined if the specified collection is modified while the
2844 * operation is in progress.
2845 *
2846 * @param c the collection to transfer elements into
2847 * @return the number of elements transferred
2848 */
2849 protected int drainTasksTo(Collection<? super ForkJoinTask<?>> c) {
2850 int count = 0;
2851 WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t;
2852 if ((ws = workQueues) != null) {
2853 for (int i = 0; i < ws.length; ++i) {
2854 if ((w = ws[i]) != null) {
2855 while ((t = w.poll()) != null) {
2856 c.add(t);
2857 ++count;
2858 }
2859 }
2860 }
2861 }
2862 return count;
2863 }
2864
2865 /**
2866 * Returns a string identifying this pool, as well as its state,
2867 * including indications of run state, parallelism level, and
2868 * worker and task counts.
2869 *
2870 * @return a string identifying this pool, as well as its state
2871 */
2872 public String toString() {
2873 // Use a single pass through workQueues to collect counts
2874 long qt = 0L, qs = 0L; int rc = 0;
2875 long st = stealCount;
2876 long c = ctl;
2877 WorkQueue[] ws; WorkQueue w;
2878 if ((ws = workQueues) != null) {
2879 for (int i = 0; i < ws.length; ++i) {
2880 if ((w = ws[i]) != null) {
2881 int size = w.queueSize();
2882 if ((i & 1) == 0)
2883 qs += size;
2884 else {
2885 qt += size;
2886 st += w.nsteals;
2887 if (w.isApparentlyUnblocked())
2888 ++rc;
2889 }
2890 }
2891 }
2892 }
2893 int pc = parallelism;
2894 int tc = pc + (short)(c >>> TC_SHIFT);
2895 int ac = pc + (int)(c >> AC_SHIFT);
2896 if (ac < 0) // ignore transient negative
2897 ac = 0;
2898 String level;
2899 if ((c & STOP_BIT) != 0)
2900 level = (tc == 0) ? "Terminated" : "Terminating";
2901 else
2902 level = plock < 0 ? "Shutting down" : "Running";
2903 return super.toString() +
2904 "[" + level +
2905 ", parallelism = " + pc +
2906 ", size = " + tc +
2907 ", active = " + ac +
2908 ", running = " + rc +
2909 ", steals = " + st +
2910 ", tasks = " + qt +
2911 ", submissions = " + qs +
2912 "]";
2913 }
2914
2915 /**
2916 * Possibly initiates an orderly shutdown in which previously
2917 * submitted tasks are executed, but no new tasks will be
2918 * accepted. Invocation has no effect on execution state if this
2919 * is the {@link #commonPool}, and no additional effect if
2920 * already shut down. Tasks that are in the process of being
2921 * submitted concurrently during the course of this method may or
2922 * may not be rejected.
2923 *
2924 * @throws SecurityException if a security manager exists and
2925 * the caller is not permitted to modify threads
2926 * because it does not hold {@link
2927 * java.lang.RuntimePermission}{@code ("modifyThread")}
2928 */
2929 public void shutdown() {
2930 checkPermission();
2931 tryTerminate(false, true);
2932 }
2933
2934 /**
2935 * Possibly attempts to cancel and/or stop all tasks, and reject
2936 * all subsequently submitted tasks. Invocation has no effect on
2937 * execution state if this is the {@link #commonPool}, and no
2938 * additional effect if already shut down. Otherwise, tasks that
2939 * are in the process of being submitted or executed concurrently
2940 * during the course of this method may or may not be
2941 * rejected. This method cancels both existing and unexecuted
2942 * tasks, in order to permit termination in the presence of task
2943 * dependencies. So the method always returns an empty list
2944 * (unlike the case for some other Executors).
2945 *
2946 * @return an empty list
2947 * @throws SecurityException if a security manager exists and
2948 * the caller is not permitted to modify threads
2949 * because it does not hold {@link
2950 * java.lang.RuntimePermission}{@code ("modifyThread")}
2951 */
2952 public List<Runnable> shutdownNow() {
2953 checkPermission();
2954 tryTerminate(true, true);
2955 return Collections.emptyList();
2956 }
2957
2958 /**
2959 * Returns {@code true} if all tasks have completed following shut down.
2960 *
2961 * @return {@code true} if all tasks have completed following shut down
2962 */
2963 public boolean isTerminated() {
2964 long c = ctl;
2965 return ((c & STOP_BIT) != 0L &&
2966 (short)(c >>> TC_SHIFT) == -parallelism);
2967 }
2968
2969 /**
2970 * Returns {@code true} if the process of termination has
2971 * commenced but not yet completed. This method may be useful for
2972 * debugging. A return of {@code true} reported a sufficient
2973 * period after shutdown may indicate that submitted tasks have
2974 * ignored or suppressed interruption, or are waiting for IO,
2975 * causing this executor not to properly terminate. (See the
2976 * advisory notes for class {@link ForkJoinTask} stating that
2977 * tasks should not normally entail blocking operations. But if
2978 * they do, they must abort them on interrupt.)
2979 *
2980 * @return {@code true} if terminating but not yet terminated
2981 */
2982 public boolean isTerminating() {
2983 long c = ctl;
2984 return ((c & STOP_BIT) != 0L &&
2985 (short)(c >>> TC_SHIFT) != -parallelism);
2986 }
2987
2988 /**
2989 * Returns {@code true} if this pool has been shut down.
2990 *
2991 * @return {@code true} if this pool has been shut down
2992 */
2993 public boolean isShutdown() {
2994 return plock < 0;
2995 }
2996
2997 /**
2998 * Blocks until all tasks have completed execution after a
2999 * shutdown request, or the timeout occurs, or the current thread
3000 * is interrupted, whichever happens first. Note that the {@link
3001 * #commonPool()} never terminates until program shutdown so
3002 * this method will always time out.
3003 *
3004 * @param timeout the maximum time to wait
3005 * @param unit the time unit of the timeout argument
3006 * @return {@code true} if this executor terminated and
3007 * {@code false} if the timeout elapsed before termination
3008 * @throws InterruptedException if interrupted while waiting
3009 */
3010 public boolean awaitTermination(long timeout, TimeUnit unit)
3011 throws InterruptedException {
3012 long nanos = unit.toNanos(timeout);
3013 if (isTerminated())
3014 return true;
3015 long startTime = System.nanoTime();
3016 boolean terminated = false;
3017 synchronized (this) {
3018 for (long waitTime = nanos, millis = 0L;;) {
3019 if (terminated = isTerminated() ||
3020 waitTime <= 0L ||
3021 (millis = unit.toMillis(waitTime)) <= 0L)
3022 break;
3023 wait(millis);
3024 waitTime = nanos - (System.nanoTime() - startTime);
3025 }
3026 }
3027 return terminated;
3028 }
3029
3030 /**
3031 * Interface for extending managed parallelism for tasks running
3032 * in {@link ForkJoinPool}s.
3033 *
3034 * <p>A {@code ManagedBlocker} provides two methods. Method
3035 * {@code isReleasable} must return {@code true} if blocking is
3036 * not necessary. Method {@code block} blocks the current thread
3037 * if necessary (perhaps internally invoking {@code isReleasable}
3038 * before actually blocking). These actions are performed by any
3039 * thread invoking {@link ForkJoinPool#managedBlock}. The
3040 * unusual methods in this API accommodate synchronizers that may,
3041 * but don't usually, block for long periods. Similarly, they
3042 * allow more efficient internal handling of cases in which
3043 * additional workers may be, but usually are not, needed to
3044 * ensure sufficient parallelism. Toward this end,
3045 * implementations of method {@code isReleasable} must be amenable
3046 * to repeated invocation.
3047 *
3048 * <p>For example, here is a ManagedBlocker based on a
3049 * ReentrantLock:
3050 * <pre> {@code
3051 * class ManagedLocker implements ManagedBlocker {
3052 * final ReentrantLock lock;
3053 * boolean hasLock = false;
3054 * ManagedLocker(ReentrantLock lock) { this.lock = lock; }
3055 * public boolean block() {
3056 * if (!hasLock)
3057 * lock.lock();
3058 * return true;
3059 * }
3060 * public boolean isReleasable() {
3061 * return hasLock || (hasLock = lock.tryLock());
3062 * }
3063 * }}</pre>
3064 *
3065 * <p>Here is a class that possibly blocks waiting for an
3066 * item on a given queue:
3067 * <pre> {@code
3068 * class QueueTaker<E> implements ManagedBlocker {
3069 * final BlockingQueue<E> queue;
3070 * volatile E item = null;
3071 * QueueTaker(BlockingQueue<E> q) { this.queue = q; }
3072 * public boolean block() throws InterruptedException {
3073 * if (item == null)
3074 * item = queue.take();
3075 * return true;
3076 * }
3077 * public boolean isReleasable() {
3078 * return item != null || (item = queue.poll()) != null;
3079 * }
3080 * public E getItem() { // call after pool.managedBlock completes
3081 * return item;
3082 * }
3083 * }}</pre>
3084 */
3085 public static interface ManagedBlocker {
3086 /**
3087 * Possibly blocks the current thread, for example waiting for
3088 * a lock or condition.
3089 *
3090 * @return {@code true} if no additional blocking is necessary
3091 * (i.e., if isReleasable would return true)
3092 * @throws InterruptedException if interrupted while waiting
3093 * (the method is not required to do so, but is allowed to)
3094 */
3095 boolean block() throws InterruptedException;
3096
3097 /**
3098 * Returns {@code true} if blocking is unnecessary.
3099 */
3100 boolean isReleasable();
3101 }
3102
3103 /**
3104 * Blocks in accord with the given blocker. If the current thread
3105 * is a {@link ForkJoinWorkerThread}, this method possibly
3106 * arranges for a spare thread to be activated if necessary to
3107 * ensure sufficient parallelism while the current thread is blocked.
3108 *
3109 * <p>If the caller is not a {@link ForkJoinTask}, this method is
3110 * behaviorally equivalent to
3111 * <pre> {@code
3112 * while (!blocker.isReleasable())
3113 * if (blocker.block())
3114 * return;
3115 * }</pre>
3116 *
3117 * If the caller is a {@code ForkJoinTask}, then the pool may
3118 * first be expanded to ensure parallelism, and later adjusted.
3119 *
3120 * @param blocker the blocker
3121 * @throws InterruptedException if blocker.block did so
3122 */
3123 public static void managedBlock(ManagedBlocker blocker)
3124 throws InterruptedException {
3125 Thread t = Thread.currentThread();
3126 if (t instanceof ForkJoinWorkerThread) {
3127 ForkJoinPool p = ((ForkJoinWorkerThread)t).pool;
3128 while (!blocker.isReleasable()) { // variant of helpSignal
3129 WorkQueue[] ws; WorkQueue q; int m, n;
3130 if ((ws = p.workQueues) != null && (m = ws.length - 1) >= 0) {
3131 for (int i = 0; i <= m; ++i) {
3132 if (blocker.isReleasable())
3133 return;
3134 if ((q = ws[i]) != null && (n = q.queueSize()) > 0) {
3135 p.signalWork(q, n);
3136 if ((int)(p.ctl >> AC_SHIFT) >= 0)
3137 break;
3138 }
3139 }
3140 }
3141 if (p.tryCompensate()) {
3142 try {
3143 do {} while (!blocker.isReleasable() &&
3144 !blocker.block());
3145 } finally {
3146 p.incrementActiveCount();
3147 }
3148 break;
3149 }
3150 }
3151 }
3152 else {
3153 do {} while (!blocker.isReleasable() &&
3154 !blocker.block());
3155 }
3156 }
3157
3158 // AbstractExecutorService overrides. These rely on undocumented
3159 // fact that ForkJoinTask.adapt returns ForkJoinTasks that also
3160 // implement RunnableFuture.
3161
3162 protected <T> RunnableFuture<T> newTaskFor(Runnable runnable, T value) {
3163 return new ForkJoinTask.AdaptedRunnable<T>(runnable, value);
3164 }
3165
3166 protected <T> RunnableFuture<T> newTaskFor(Callable<T> callable) {
3167 return new ForkJoinTask.AdaptedCallable<T>(callable);
3168 }
3169
3170 // Unsafe mechanics
3171 private static final sun.misc.Unsafe U;
3172 private static final long CTL;
3173 private static final long PARKBLOCKER;
3174 private static final int ABASE;
3175 private static final int ASHIFT;
3176 private static final long STEALCOUNT;
3177 private static final long PLOCK;
3178 private static final long INDEXSEED;
3179 private static final long QLOCK;
3180
3181 static {
3182 // Establish common pool parameters
3183 // TBD: limit or report ignored exceptions?
3184
3185 int par = 0;
3186 ForkJoinWorkerThreadFactory fac = null;
3187 Thread.UncaughtExceptionHandler handler = null;
3188 try {
3189 String pp = System.getProperty(propPrefix + "parallelism");
3190 String hp = System.getProperty(propPrefix + "exceptionHandler");
3191 String fp = System.getProperty(propPrefix + "threadFactory");
3192 if (fp != null)
3193 fac = ((ForkJoinWorkerThreadFactory)ClassLoader.
3194 getSystemClassLoader().loadClass(fp).newInstance());
3195 if (hp != null)
3196 handler = ((Thread.UncaughtExceptionHandler)ClassLoader.
3197 getSystemClassLoader().loadClass(hp).newInstance());
3198 if (pp != null)
3199 par = Integer.parseInt(pp);
3200 } catch (Exception ignore) {
3201 }
3202
3203 int s; // initialize field offsets for CAS etc
3204 try {
3205 U = getUnsafe();
3206 Class<?> k = ForkJoinPool.class;
3207 CTL = U.objectFieldOffset
3208 (k.getDeclaredField("ctl"));
3209 STEALCOUNT = U.objectFieldOffset
3210 (k.getDeclaredField("stealCount"));
3211 PLOCK = U.objectFieldOffset
3212 (k.getDeclaredField("plock"));
3213 INDEXSEED = U.objectFieldOffset
3214 (k.getDeclaredField("indexSeed"));
3215 Class<?> tk = Thread.class;
3216 PARKBLOCKER = U.objectFieldOffset
3217 (tk.getDeclaredField("parkBlocker"));
3218 Class<?> wk = WorkQueue.class;
3219 QLOCK = U.objectFieldOffset
3220 (wk.getDeclaredField("qlock"));
3221 Class<?> ak = ForkJoinTask[].class;
3222 ABASE = U.arrayBaseOffset(ak);
3223 s = U.arrayIndexScale(ak);
3224 ASHIFT = 31 - Integer.numberOfLeadingZeros(s);
3225 } catch (Exception e) {
3226 throw new Error(e);
3227 }
3228 if ((s & (s-1)) != 0)
3229 throw new Error("data type scale not a power of two");
3230
3231 /*
3232 * For extra caution, computations to set up pool state are
3233 * here; the constructor just assigns these values to fields.
3234 */
3235 ForkJoinWorkerThreadFactory defaultFac =
3236 defaultForkJoinWorkerThreadFactory =
3237 new DefaultForkJoinWorkerThreadFactory();
3238 if (fac == null)
3239 fac = defaultFac;
3240 if (par <= 0)
3241 par = Runtime.getRuntime().availableProcessors();
3242 if (par > MAX_CAP)
3243 par = MAX_CAP;
3244 long np = (long)(-par); // precompute initial ctl value
3245 long ct = ((np << AC_SHIFT) & AC_MASK) | ((np << TC_SHIFT) & TC_MASK);
3246
3247 commonPoolParallelism = par;
3248 commonPool = new ForkJoinPool(par, ct, fac, handler);
3249 modifyThreadPermission = new RuntimePermission("modifyThread");
3250 submitters = new ThreadLocal<Submitter>();
3251 }
3252
3253 /**
3254 * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
3255 * Replace with a simple call to Unsafe.getUnsafe when integrating
3256 * into a jdk.
3257 *
3258 * @return a sun.misc.Unsafe
3259 */
3260 private static sun.misc.Unsafe getUnsafe() {
3261 try {
3262 return sun.misc.Unsafe.getUnsafe();
3263 } catch (SecurityException se) {
3264 try {
3265 return java.security.AccessController.doPrivileged
3266 (new java.security
3267 .PrivilegedExceptionAction<sun.misc.Unsafe>() {
3268 public sun.misc.Unsafe run() throws Exception {
3269 java.lang.reflect.Field f = sun.misc
3270 .Unsafe.class.getDeclaredField("theUnsafe");
3271 f.setAccessible(true);
3272 return (sun.misc.Unsafe) f.get(null);
3273 }});
3274 } catch (java.security.PrivilegedActionException e) {
3275 throw new RuntimeException("Could not initialize intrinsics",
3276 e.getCause());
3277 }
3278 }
3279 }
3280
3281 }