5 |
|
*/ |
6 |
|
|
7 |
|
package jsr166y; |
8 |
– |
|
8 |
|
import java.util.ArrayList; |
9 |
|
import java.util.Arrays; |
10 |
|
import java.util.Collection; |
20 |
|
import java.util.concurrent.TimeUnit; |
21 |
|
import java.util.concurrent.atomic.AtomicInteger; |
22 |
|
import java.util.concurrent.atomic.AtomicLong; |
23 |
< |
import java.util.concurrent.locks.ReentrantLock; |
23 |
> |
import java.util.concurrent.locks.AbstractQueuedSynchronizer; |
24 |
|
import java.util.concurrent.locks.Condition; |
25 |
|
|
26 |
|
/** |
59 |
|
* convenient form for informal monitoring. |
60 |
|
* |
61 |
|
* <p> As is the case with other ExecutorServices, there are three |
62 |
< |
* main task execution methods summarized in the following |
63 |
< |
* table. These are designed to be used primarily by clients not |
64 |
< |
* already engaged in fork/join computations in the current pool. The |
65 |
< |
* main forms of these methods accept instances of {@code |
66 |
< |
* ForkJoinTask}, but overloaded forms also allow mixed execution of |
67 |
< |
* plain {@code Runnable}- or {@code Callable}- based activities as |
68 |
< |
* well. However, tasks that are already executing in a pool should |
69 |
< |
* normally instead use the within-computation forms listed in the |
70 |
< |
* table unless using async event-style tasks that are not usually |
71 |
< |
* joined, in which case there is little difference among choice of |
73 |
< |
* methods. |
62 |
> |
* main task execution methods summarized in the following table. |
63 |
> |
* These are designed to be used primarily by clients not already |
64 |
> |
* engaged in fork/join computations in the current pool. The main |
65 |
> |
* forms of these methods accept instances of {@code ForkJoinTask}, |
66 |
> |
* but overloaded forms also allow mixed execution of plain {@code |
67 |
> |
* Runnable}- or {@code Callable}- based activities as well. However, |
68 |
> |
* tasks that are already executing in a pool should normally instead |
69 |
> |
* use the within-computation forms listed in the table unless using |
70 |
> |
* async event-style tasks that are not usually joined, in which case |
71 |
> |
* there is little difference among choice of methods. |
72 |
|
* |
73 |
|
* <table BORDER CELLPADDING=3 CELLSPACING=1> |
74 |
|
* <tr> |
129 |
|
* |
130 |
|
* This class and its nested classes provide the main |
131 |
|
* functionality and control for a set of worker threads: |
132 |
< |
* Submissions from non-FJ threads enter into submission |
133 |
< |
* queues. Workers take these tasks and typically split them into |
134 |
< |
* subtasks that may be stolen by other workers. Preference rules |
135 |
< |
* give first priority to processing tasks from their own queues |
136 |
< |
* (LIFO or FIFO, depending on mode), then to randomized FIFO |
137 |
< |
* steals of tasks in other queues. |
132 |
> |
* Submissions from non-FJ threads enter into submission queues. |
133 |
> |
* Workers take these tasks and typically split them into subtasks |
134 |
> |
* that may be stolen by other workers. Preference rules give |
135 |
> |
* first priority to processing tasks from their own queues (LIFO |
136 |
> |
* or FIFO, depending on mode), then to randomized FIFO steals of |
137 |
> |
* tasks in other queues. |
138 |
|
* |
139 |
< |
* WorkQueues. |
139 |
> |
* WorkQueues |
140 |
|
* ========== |
141 |
|
* |
142 |
|
* Most operations occur within work-stealing queues (in nested |
154 |
|
* (http://research.sun.com/scalable/pubs/index.html) and |
155 |
|
* "Idempotent work stealing" by Michael, Saraswat, and Vechev, |
156 |
|
* PPoPP 2009 (http://portal.acm.org/citation.cfm?id=1504186). |
157 |
< |
* The main differences ultimately stem from gc requirements that |
157 |
> |
* The main differences ultimately stem from GC requirements that |
158 |
|
* we null out taken slots as soon as we can, to maintain as small |
159 |
|
* a footprint as possible even in programs generating huge |
160 |
|
* numbers of tasks. To accomplish this, we shift the CAS |
176 |
|
* If an attempted steal fails, a thief always chooses a different |
177 |
|
* random victim target to try next. So, in order for one thief to |
178 |
|
* progress, it suffices for any in-progress poll or new push on |
179 |
< |
* any empty queue to complete. |
179 |
> |
* any empty queue to complete. (This is why we normally use |
180 |
> |
* method pollAt and its variants that try once at the apparent |
181 |
> |
* base index, else consider alternative actions, rather than |
182 |
> |
* method poll.) |
183 |
|
* |
184 |
|
* This approach also enables support of a user mode in which local |
185 |
|
* task processing is in FIFO, not LIFO order, simply by using |
189 |
|
* rarely provide the best possible performance on a given |
190 |
|
* machine, but portably provide good throughput by averaging over |
191 |
|
* these factors. (Further, even if we did try to use such |
192 |
< |
* information, we do not usually have a basis for exploiting |
193 |
< |
* it. For example, some sets of tasks profit from cache |
194 |
< |
* affinities, but others are harmed by cache pollution effects.) |
192 |
> |
* information, we do not usually have a basis for exploiting it. |
193 |
> |
* For example, some sets of tasks profit from cache affinities, |
194 |
> |
* but others are harmed by cache pollution effects.) |
195 |
|
* |
196 |
|
* WorkQueues are also used in a similar way for tasks submitted |
197 |
|
* to the pool. We cannot mix these tasks in the same queues used |
198 |
|
* for work-stealing (this would contaminate lifo/fifo |
199 |
< |
* processing). Instead, we loosely associate (via hashing) |
200 |
< |
* submission queues with submitting threads, and randomly scan |
201 |
< |
* these queues as well when looking for work. In essence, |
202 |
< |
* submitters act like workers except that they never take tasks, |
203 |
< |
* and they are multiplexed on to a finite number of shared work |
204 |
< |
* queues. However, classes are set up so that future extensions |
205 |
< |
* could allow submitters to optionally help perform tasks as |
206 |
< |
* well. Pool submissions from internal workers are also allowed, |
207 |
< |
* but use randomized rather than thread-hashed queue indices to |
208 |
< |
* avoid imbalance. Insertion of tasks in shared mode requires a |
199 |
> |
* processing). Instead, we loosely associate submission queues |
200 |
> |
* with submitting threads, using a form of hashing. The |
201 |
> |
* ThreadLocal Submitter class contains a value initially used as |
202 |
> |
* a hash code for choosing existing queues, but may be randomly |
203 |
> |
* repositioned upon contention with other submitters. In |
204 |
> |
* essence, submitters act like workers except that they never |
205 |
> |
* take tasks, and they are multiplexed on to a finite number of |
206 |
> |
* shared work queues. However, classes are set up so that future |
207 |
> |
* extensions could allow submitters to optionally help perform |
208 |
> |
* tasks as well. Insertion of tasks in shared mode requires a |
209 |
|
* lock (mainly to protect in the case of resizing) but we use |
210 |
|
* only a simple spinlock (using bits in field runState), because |
211 |
< |
* submitters encountering a busy queue try or create others so |
212 |
< |
* never block. |
211 |
> |
* submitters encountering a busy queue move on to try or create |
212 |
> |
* other queues -- they block only when creating and registering |
213 |
> |
* new queues. |
214 |
|
* |
215 |
< |
* Management. |
215 |
> |
* Management |
216 |
|
* ========== |
217 |
|
* |
218 |
|
* The main throughput advantages of work-stealing stem from |
222 |
|
* tactic for avoiding bottlenecks is packing nearly all |
223 |
|
* essentially atomic control state into two volatile variables |
224 |
|
* that are by far most often read (not written) as status and |
225 |
< |
* consistency checks |
225 |
> |
* consistency checks. |
226 |
|
* |
227 |
|
* Field "ctl" contains 64 bits holding all the information needed |
228 |
|
* to atomically decide to add, inactivate, enqueue (on an event |
248 |
|
* readers must tolerate null slots. Shared (submission) queues |
249 |
|
* are at even indices, worker queues at odd indices. Grouping |
250 |
|
* them together in this way simplifies and speeds up task |
251 |
< |
* scanning. To avoid flailing during start-up, the array is |
252 |
< |
* presized to hold twice #parallelism workers (which is unlikely |
253 |
< |
* to need further resizing during execution). But to avoid |
254 |
< |
* dealing with so many null slots, variable runState includes a |
253 |
< |
* mask for the nearest power of two that contains all current |
254 |
< |
* workers. All worker thread creation is on-demand, triggered by |
255 |
< |
* task submissions, replacement of terminated workers, and/or |
251 |
> |
* scanning. |
252 |
> |
* |
253 |
> |
* All worker thread creation is on-demand, triggered by task |
254 |
> |
* submissions, replacement of terminated workers, and/or |
255 |
|
* compensation for blocked workers. However, all other support |
256 |
|
* code is set up to work with other policies. To ensure that we |
257 |
|
* do not hold on to worker references that would prevent GC, ALL |
264 |
|
* both index-check and null-check the IDs. All such accesses |
265 |
|
* ignore bad IDs by returning out early from what they are doing, |
266 |
|
* since this can only be associated with termination, in which |
267 |
< |
* case it is OK to give up. |
268 |
< |
* |
269 |
< |
* All uses of the workQueues array check that it is non-null |
270 |
< |
* (even if previously non-null). This allows nulling during |
271 |
< |
* termination, which is currently not necessary, but remains an |
272 |
< |
* option for resource-revocation-based shutdown schemes. It also |
274 |
< |
* helps reduce JIT issuance of uncommon-trap code, which tends to |
267 |
> |
* case it is OK to give up. All uses of the workQueues array |
268 |
> |
* also check that it is non-null (even if previously |
269 |
> |
* non-null). This allows nulling during termination, which is |
270 |
> |
* currently not necessary, but remains an option for |
271 |
> |
* resource-revocation-based shutdown schemes. It also helps |
272 |
> |
* reduce JIT issuance of uncommon-trap code, which tends to |
273 |
|
* unnecessarily complicate control flow in some methods. |
274 |
|
* |
275 |
|
* Event Queuing. Unlike HPC work-stealing frameworks, we cannot |
297 |
|
* some other queued worker rather than itself, which has the same |
298 |
|
* net effect. Because enqueued workers may actually be rescanning |
299 |
|
* rather than waiting, we set and clear the "parker" field of |
300 |
< |
* Workqueues to reduce unnecessary calls to unpark. (This |
300 |
> |
* WorkQueues to reduce unnecessary calls to unpark. (This |
301 |
|
* requires a secondary recheck to avoid missed signals.) Note |
302 |
|
* the unusual conventions about Thread.interrupts surrounding |
303 |
|
* parking and other blocking: Because interrupts are used solely |
325 |
|
* terminating all workers after long periods of non-use. |
326 |
|
* |
327 |
|
* Shutdown and Termination. A call to shutdownNow atomically sets |
328 |
< |
* a runState bit and then (non-atomically) sets each workers |
328 |
> |
* a runState bit and then (non-atomically) sets each worker's |
329 |
|
* runState status, cancels all unprocessed tasks, and wakes up |
330 |
|
* all waiting workers. Detecting whether termination should |
331 |
|
* commence after a non-abrupt shutdown() call requires more work |
334 |
|
* indication but non-abrupt shutdown still requires a rechecking |
335 |
|
* scan for any workers that are inactive but not queued. |
336 |
|
* |
337 |
< |
* Joining Tasks. |
338 |
< |
* ============== |
337 |
> |
* Joining Tasks |
338 |
> |
* ============= |
339 |
|
* |
340 |
|
* Any of several actions may be taken when one worker is waiting |
341 |
< |
* to join a task stolen (or always held by) another. Because we |
341 |
> |
* to join a task stolen (or always held) by another. Because we |
342 |
|
* are multiplexing many tasks on to a pool of workers, we can't |
343 |
|
* just let them block (as in Thread.join). We also cannot just |
344 |
|
* reassign the joiner's run-time stack with another and replace |
345 |
|
* it later, which would be a form of "continuation", that even if |
346 |
|
* possible is not necessarily a good idea since we sometimes need |
347 |
< |
* both an unblocked task and its continuation to |
348 |
< |
* progress. Instead we combine two tactics: |
347 |
> |
* both an unblocked task and its continuation to progress. |
348 |
> |
* Instead we combine two tactics: |
349 |
|
* |
350 |
|
* Helping: Arranging for the joiner to execute some task that it |
351 |
|
* would be running if the steal had not occurred. |
380 |
|
* (http://portal.acm.org/citation.cfm?id=155354). It differs in |
381 |
|
* that: (1) We only maintain dependency links across workers upon |
382 |
|
* steals, rather than use per-task bookkeeping. This sometimes |
383 |
< |
* requires a linear scan of workers array to locate stealers, but |
384 |
< |
* often doesn't because stealers leave hints (that may become |
383 |
> |
* requires a linear scan of workQueues array to locate stealers, |
384 |
> |
* but often doesn't because stealers leave hints (that may become |
385 |
|
* stale/wrong) of where to locate them. A stealHint is only a |
386 |
|
* hint because a worker might have had multiple steals and the |
387 |
|
* hint records only one of them (usually the most current). |
392 |
|
* which means that we miss links in the chain during long-lived |
393 |
|
* tasks, GC stalls etc (which is OK since blocking in such cases |
394 |
|
* is usually a good idea). (4) We bound the number of attempts |
395 |
< |
* to find work (see MAX_HELP_DEPTH) and fall back to suspending |
396 |
< |
* the worker and if necessary replacing it with another. |
395 |
> |
* to find work (see MAX_HELP) and fall back to suspending the |
396 |
> |
* worker and if necessary replacing it with another. |
397 |
|
* |
398 |
|
* It is impossible to keep exactly the target parallelism number |
399 |
|
* of threads running at any given time. Determining the |
400 |
|
* existence of conservatively safe helping targets, the |
401 |
|
* availability of already-created spares, and the apparent need |
402 |
|
* to create new spares are all racy, so we rely on multiple |
403 |
< |
* retries of each. Currently, in keeping with on-demand |
404 |
< |
* signalling policy, we compensate only if blocking would leave |
405 |
< |
* less than one active (non-waiting, non-blocked) worker. |
406 |
< |
* Additionally, to avoid some false alarms due to GC, lagging |
407 |
< |
* counters, system activity, etc, compensated blocking for joins |
408 |
< |
* is only attempted after rechecks stabilize in |
409 |
< |
* ForkJoinTask.awaitJoin. (Retries are interspersed with |
410 |
< |
* Thread.yield, for good citizenship.) |
403 |
> |
* retries of each. Compensation in the apparent absence of |
404 |
> |
* helping opportunities is challenging to control on JVMs, where |
405 |
> |
* GC and other activities can stall progress of tasks that in |
406 |
> |
* turn stall out many other dependent tasks, without us being |
407 |
> |
* able to determine whether they will ever require compensation. |
408 |
> |
* Even though work-stealing otherwise encounters little |
409 |
> |
* degradation in the presence of more threads than cores, |
410 |
> |
* aggressively adding new threads in such cases entails risk of |
411 |
> |
* unwanted positive feedback control loops in which more threads |
412 |
> |
* cause more dependent stalls (as well as delayed progress of |
413 |
> |
* unblocked threads to the point that we know they are available) |
414 |
> |
* leading to more situations requiring more threads, and so |
415 |
> |
* on. This aspect of control can be seen as an (analytically |
416 |
> |
* intractable) game with an opponent that may choose the worst |
417 |
> |
* (for us) active thread to stall at any time. We take several |
418 |
> |
* precautions to bound losses (and thus bound gains), mainly in |
419 |
> |
* methods tryCompensate and awaitJoin: (1) We only try |
420 |
> |
* compensation after attempting enough helping steps (measured |
421 |
> |
* via counting and timing) that we have already consumed the |
422 |
> |
* estimated cost of creating and activating a new thread. (2) We |
423 |
> |
* allow up to 50% of threads to be blocked before initially |
424 |
> |
* adding any others, and unless completely saturated, check that |
425 |
> |
* some work is available for a new worker before adding. Also, we |
426 |
> |
* create up to only 50% more threads until entering a mode that |
427 |
> |
* only adds a thread if all others are possibly blocked. All |
428 |
> |
* together, this means that we might be half as fast to react, |
429 |
> |
* and create half as many threads as possible in the ideal case, |
430 |
> |
* but present vastly fewer anomalies in all other cases compared |
431 |
> |
* to both more aggressive and more conservative alternatives. |
432 |
|
* |
433 |
|
* Style notes: There is a lot of representation-level coupling |
434 |
|
* among classes ForkJoinPool, ForkJoinWorkerThread, and |
436 |
|
* managed by ForkJoinPool, so are directly accessed. There is |
437 |
|
* little point trying to reduce this, since any associated future |
438 |
|
* changes in representations will need to be accompanied by |
439 |
< |
* algorithmic changes anyway. All together, these low-level |
440 |
< |
* implementation choices produce as much as a factor of 4 |
441 |
< |
* performance improvement compared to naive implementations, and |
442 |
< |
* enable the processing of billions of tasks per second, at the |
443 |
< |
* expense of some ugliness. |
444 |
< |
* |
445 |
< |
* Methods signalWork() and scan() are the main bottlenecks so are |
446 |
< |
* especially heavily micro-optimized/mangled. There are lots of |
447 |
< |
* inline assignments (of form "while ((local = field) != 0)") |
448 |
< |
* which are usually the simplest way to ensure the required read |
449 |
< |
* orderings (which are sometimes critical). This leads to a |
450 |
< |
* "C"-like style of listing declarations of these locals at the |
451 |
< |
* heads of methods or blocks. There are several occurrences of |
452 |
< |
* the unusual "do {} while (!cas...)" which is the simplest way |
453 |
< |
* to force an update of a CAS'ed variable. There are also other |
454 |
< |
* coding oddities that help some methods perform reasonably even |
455 |
< |
* when interpreted (not compiled). |
456 |
< |
* |
457 |
< |
* The order of declarations in this file is: (1) declarations of |
458 |
< |
* statics (2) fields (along with constants used when unpacking |
459 |
< |
* some of them), listed in an order that tends to reduce |
460 |
< |
* contention among them a bit under most JVMs; (3) nested |
461 |
< |
* classes; (4) internal control methods; (5) callbacks and other |
462 |
< |
* support for ForkJoinTask methods; (6) exported methods (plus a |
444 |
< |
* few little helpers); (7) static block initializing all statics |
445 |
< |
* in a minimally dependent order. |
439 |
> |
* algorithmic changes anyway. Several methods intrinsically |
440 |
> |
* sprawl because they must accumulate sets of consistent reads of |
441 |
> |
* volatiles held in local variables. Methods signalWork() and |
442 |
> |
* scan() are the main bottlenecks, so are especially heavily |
443 |
> |
* micro-optimized/mangled. There are lots of inline assignments |
444 |
> |
* (of form "while ((local = field) != 0)") which are usually the |
445 |
> |
* simplest way to ensure the required read orderings (which are |
446 |
> |
* sometimes critical). This leads to a "C"-like style of listing |
447 |
> |
* declarations of these locals at the heads of methods or blocks. |
448 |
> |
* There are several occurrences of the unusual "do {} while |
449 |
> |
* (!cas...)" which is the simplest way to force an update of a |
450 |
> |
* CAS'ed variable. There are also other coding oddities that help |
451 |
> |
* some methods perform reasonably even when interpreted (not |
452 |
> |
* compiled). |
453 |
> |
* |
454 |
> |
* The order of declarations in this file is: |
455 |
> |
* (1) Static utility functions |
456 |
> |
* (2) Nested (static) classes |
457 |
> |
* (3) Static fields |
458 |
> |
* (4) Fields, along with constants used when unpacking some of them |
459 |
> |
* (5) Internal control methods |
460 |
> |
* (6) Callbacks and other support for ForkJoinTask methods |
461 |
> |
* (7) Exported methods |
462 |
> |
* (8) Static block initializing statics in minimally dependent order |
463 |
|
*/ |
464 |
|
|
465 |
+ |
// Static utilities |
466 |
+ |
|
467 |
+ |
/** |
468 |
+ |
* If there is a security manager, makes sure caller has |
469 |
+ |
* permission to modify threads. |
470 |
+ |
*/ |
471 |
+ |
private static void checkPermission() { |
472 |
+ |
SecurityManager security = System.getSecurityManager(); |
473 |
+ |
if (security != null) |
474 |
+ |
security.checkPermission(modifyThreadPermission); |
475 |
+ |
} |
476 |
+ |
|
477 |
+ |
// Nested classes |
478 |
+ |
|
479 |
|
/** |
480 |
|
* Factory for creating new {@link ForkJoinWorkerThread}s. |
481 |
|
* A {@code ForkJoinWorkerThreadFactory} must be defined and used |
504 |
|
} |
505 |
|
|
506 |
|
/** |
507 |
< |
* Creates a new ForkJoinWorkerThread. This factory is used unless |
508 |
< |
* overridden in ForkJoinPool constructors. |
509 |
< |
*/ |
510 |
< |
public static final ForkJoinWorkerThreadFactory |
511 |
< |
defaultForkJoinWorkerThreadFactory; |
512 |
< |
|
513 |
< |
/** |
514 |
< |
* Permission required for callers of methods that may start or |
515 |
< |
* kill threads. |
516 |
< |
*/ |
517 |
< |
private static final RuntimePermission modifyThreadPermission; |
518 |
< |
|
519 |
< |
/** |
520 |
< |
* If there is a security manager, makes sure caller has |
521 |
< |
* permission to modify threads. |
522 |
< |
*/ |
523 |
< |
private static void checkPermission() { |
524 |
< |
SecurityManager security = System.getSecurityManager(); |
525 |
< |
if (security != null) |
526 |
< |
security.checkPermission(modifyThreadPermission); |
507 |
> |
* A simple non-reentrant lock used for exclusion when managing |
508 |
> |
* queues and workers. We use a custom lock so that we can readily |
509 |
> |
* probe lock state in constructions that check among alternative |
510 |
> |
* actions. The lock is normally only very briefly held, and |
511 |
> |
* sometimes treated as a spinlock, but other usages block to |
512 |
> |
* reduce overall contention in those cases where locked code |
513 |
> |
* bodies perform allocation/resizing. |
514 |
> |
*/ |
515 |
> |
static final class Mutex extends AbstractQueuedSynchronizer { |
516 |
> |
public final boolean tryAcquire(int ignore) { |
517 |
> |
return compareAndSetState(0, 1); |
518 |
> |
} |
519 |
> |
public final boolean tryRelease(int ignore) { |
520 |
> |
setState(0); |
521 |
> |
return true; |
522 |
> |
} |
523 |
> |
public final void lock() { acquire(0); } |
524 |
> |
public final void unlock() { release(0); } |
525 |
> |
public final boolean isHeldExclusively() { return getState() == 1; } |
526 |
> |
public final Condition newCondition() { return new ConditionObject(); } |
527 |
|
} |
528 |
|
|
529 |
|
/** |
530 |
< |
* Generator for assigning sequence numbers as pool names. |
531 |
< |
*/ |
532 |
< |
private static final AtomicInteger poolNumberGenerator; |
533 |
< |
|
503 |
< |
/** |
504 |
< |
* Bits and masks for control variables |
505 |
< |
* |
506 |
< |
* Field ctl is a long packed with: |
507 |
< |
* AC: Number of active running workers minus target parallelism (16 bits) |
508 |
< |
* TC: Number of total workers minus target parallelism (16 bits) |
509 |
< |
* ST: true if pool is terminating (1 bit) |
510 |
< |
* EC: the wait count of top waiting thread (15 bits) |
511 |
< |
* ID: ~(poolIndex >>> 1) of top of Treiber stack of waiters (16 bits) |
512 |
< |
* |
513 |
< |
* When convenient, we can extract the upper 32 bits of counts and |
514 |
< |
* the lower 32 bits of queue state, u = (int)(ctl >>> 32) and e = |
515 |
< |
* (int)ctl. The ec field is never accessed alone, but always |
516 |
< |
* together with id and st. The offsets of counts by the target |
517 |
< |
* parallelism and the positionings of fields makes it possible to |
518 |
< |
* perform the most common checks via sign tests of fields: When |
519 |
< |
* ac is negative, there are not enough active workers, when tc is |
520 |
< |
* negative, there are not enough total workers, when id is |
521 |
< |
* negative, there is at least one waiting worker, and when e is |
522 |
< |
* negative, the pool is terminating. To deal with these possibly |
523 |
< |
* negative fields, we use casts in and out of "short" and/or |
524 |
< |
* signed shifts to maintain signedness. |
525 |
< |
* |
526 |
< |
* When a thread is queued (inactivated), its eventCount field is |
527 |
< |
* negative, which is the only way to tell if a worker is |
528 |
< |
* prevented from executing tasks, even though it must continue to |
529 |
< |
* scan for them to avoid queuing races. |
530 |
< |
* |
531 |
< |
* Field runState is an int packed with: |
532 |
< |
* SHUTDOWN: true if shutdown is enabled (1 bit) |
533 |
< |
* SEQ: a sequence number updated upon (de)registering workers (15 bits) |
534 |
< |
* MASK: mask (power of 2 - 1) covering all registered poolIndexes (16 bits) |
535 |
< |
* |
536 |
< |
* The combination of mask and sequence number enables simple |
537 |
< |
* consistency checks: Staleness of read-only operations on the |
538 |
< |
* workers and queues arrays can be checked by comparing runState |
539 |
< |
* before vs after the reads. The low 16 bits (i.e, anding with |
540 |
< |
* SMASK) hold (the smallest power of two covering all worker |
541 |
< |
* indices, minus one. The mask for queues (vs workers) is twice |
542 |
< |
* this value plus 1. |
543 |
< |
*/ |
544 |
< |
|
545 |
< |
// bit positions/shifts for fields |
546 |
< |
private static final int AC_SHIFT = 48; |
547 |
< |
private static final int TC_SHIFT = 32; |
548 |
< |
private static final int ST_SHIFT = 31; |
549 |
< |
private static final int EC_SHIFT = 16; |
550 |
< |
|
551 |
< |
// bounds |
552 |
< |
private static final int MAX_ID = 0x7fff; // max poolIndex |
553 |
< |
private static final int SMASK = 0xffff; // mask short bits |
554 |
< |
private static final int SHORT_SIGN = 1 << 15; |
555 |
< |
private static final int INT_SIGN = 1 << 31; |
556 |
< |
|
557 |
< |
// masks |
558 |
< |
private static final long STOP_BIT = 0x0001L << ST_SHIFT; |
559 |
< |
private static final long AC_MASK = ((long)SMASK) << AC_SHIFT; |
560 |
< |
private static final long TC_MASK = ((long)SMASK) << TC_SHIFT; |
561 |
< |
|
562 |
< |
// units for incrementing and decrementing |
563 |
< |
private static final long TC_UNIT = 1L << TC_SHIFT; |
564 |
< |
private static final long AC_UNIT = 1L << AC_SHIFT; |
565 |
< |
|
566 |
< |
// masks and units for dealing with u = (int)(ctl >>> 32) |
567 |
< |
private static final int UAC_SHIFT = AC_SHIFT - 32; |
568 |
< |
private static final int UTC_SHIFT = TC_SHIFT - 32; |
569 |
< |
private static final int UAC_MASK = SMASK << UAC_SHIFT; |
570 |
< |
private static final int UTC_MASK = SMASK << UTC_SHIFT; |
571 |
< |
private static final int UAC_UNIT = 1 << UAC_SHIFT; |
572 |
< |
private static final int UTC_UNIT = 1 << UTC_SHIFT; |
573 |
< |
|
574 |
< |
// masks and units for dealing with e = (int)ctl |
575 |
< |
private static final int E_MASK = 0x7fffffff; // no STOP_BIT |
576 |
< |
private static final int E_SEQ = 1 << EC_SHIFT; |
577 |
< |
|
578 |
< |
// runState bits |
579 |
< |
private static final int SHUTDOWN = 1 << 31; |
580 |
< |
private static final int RS_SEQ = 1 << 16; |
581 |
< |
private static final int RS_SEQ_MASK = 0x7fff0000; |
582 |
< |
|
583 |
< |
// access mode for WorkQueue |
584 |
< |
static final int LIFO_QUEUE = 0; |
585 |
< |
static final int FIFO_QUEUE = 1; |
586 |
< |
static final int SHARED_QUEUE = -1; |
587 |
< |
|
588 |
< |
/** |
589 |
< |
* The wakeup interval (in nanoseconds) for a worker waiting for a |
590 |
< |
* task when the pool is quiescent to instead try to shrink the |
591 |
< |
* number of workers. The exact value does not matter too |
592 |
< |
* much. It must be short enough to release resources during |
593 |
< |
* sustained periods of idleness, but not so short that threads |
594 |
< |
* are continually re-created. |
595 |
< |
*/ |
596 |
< |
private static final long SHRINK_RATE = |
597 |
< |
4L * 1000L * 1000L * 1000L; // 4 seconds |
598 |
< |
|
599 |
< |
/** |
600 |
< |
* The timeout value for attempted shrinkage, includes |
601 |
< |
* some slop to cope with system timer imprecision. |
602 |
< |
*/ |
603 |
< |
private static final long SHRINK_TIMEOUT = SHRINK_RATE - (SHRINK_RATE / 10); |
604 |
< |
|
605 |
< |
/** |
606 |
< |
* The maximum stolen->joining link depth allowed in tryHelpStealer. |
607 |
< |
* Depths for legitimate chains are unbounded, but we use a fixed |
608 |
< |
* constant to avoid (otherwise unchecked) cycles and to bound |
609 |
< |
* staleness of traversal parameters at the expense of sometimes |
610 |
< |
* blocking when we could be helping. |
611 |
< |
*/ |
612 |
< |
private static final int MAX_HELP_DEPTH = 16; |
613 |
< |
|
614 |
< |
/* |
615 |
< |
* Field layout order in this class tends to matter more than one |
616 |
< |
* would like. Runtime layout order is only loosely related to |
617 |
< |
* declaration order and may differ across JVMs, but the following |
618 |
< |
* empirically works OK on current JVMs. |
530 |
> |
* Class for artificial tasks that are used to replace the target |
531 |
> |
* of local joins if they are removed from an interior queue slot |
532 |
> |
* in WorkQueue.tryRemoveAndExec. We don't need the proxy to |
533 |
> |
* actually do anything beyond having a unique identity. |
534 |
|
*/ |
535 |
< |
|
536 |
< |
volatile long ctl; // main pool control |
537 |
< |
final int parallelism; // parallelism level |
538 |
< |
final int localMode; // per-worker scheduling mode |
539 |
< |
int nextPoolIndex; // hint used in registerWorker |
540 |
< |
volatile int runState; // shutdown status, seq, and mask |
626 |
< |
WorkQueue[] workQueues; // main registry |
627 |
< |
final ReentrantLock lock; // for registration |
628 |
< |
final Condition termination; // for awaitTermination |
629 |
< |
final ForkJoinWorkerThreadFactory factory; // factory for new workers |
630 |
< |
final Thread.UncaughtExceptionHandler ueh; // per-worker UEH |
631 |
< |
final AtomicLong stealCount; // collect counts when terminated |
632 |
< |
final AtomicInteger nextWorkerNumber; // to create worker name string |
633 |
< |
final String workerNamePrefix; // Prefix for assigning worker names |
535 |
> |
static final class EmptyTask extends ForkJoinTask<Void> { |
536 |
> |
EmptyTask() { status = ForkJoinTask.NORMAL; } // force done |
537 |
> |
public final Void getRawResult() { return null; } |
538 |
> |
public final void setRawResult(Void x) {} |
539 |
> |
public final boolean exec() { return true; } |
540 |
> |
} |
541 |
|
|
542 |
|
/** |
543 |
|
* Queues supporting work-stealing as well as external task |
588 |
|
* avoiding really bad worst-case access. (Until better JVM |
589 |
|
* support is in place, this padding is dependent on transient |
590 |
|
* properties of JVM field layout rules.) We also take care in |
591 |
< |
* allocating and sizing and resizing the array. Non-shared queue |
591 |
> |
* allocating, sizing and resizing the array. Non-shared queue |
592 |
|
* arrays are initialized (via method growArray) by workers before |
593 |
|
* use. Others are allocated on first use. |
594 |
|
*/ |
595 |
|
static final class WorkQueue { |
596 |
|
/** |
597 |
|
* Capacity of work-stealing queue array upon initialization. |
598 |
< |
* Must be a power of two; at least 4, but set larger to |
599 |
< |
* reduce cacheline sharing among queues. |
598 |
> |
* Must be a power of two; at least 4, but should be larger to |
599 |
> |
* reduce or eliminate cacheline sharing among queues. |
600 |
> |
* Currently, it is much larger, as a partial workaround for |
601 |
> |
* the fact that JVMs often place arrays in locations that |
602 |
> |
* share GC bookkeeping (especially cardmarks) such that |
603 |
> |
* per-write accesses encounter serious memory contention. |
604 |
|
*/ |
605 |
< |
static final int INITIAL_QUEUE_CAPACITY = 1 << 8; |
605 |
> |
static final int INITIAL_QUEUE_CAPACITY = 1 << 13; |
606 |
|
|
607 |
|
/** |
608 |
|
* Maximum size for queue arrays. Must be a power of two less |
626 |
|
volatile int base; // index of next slot for poll |
627 |
|
int top; // index of next slot for push |
628 |
|
ForkJoinTask<?>[] array; // the elements (initially unallocated) |
629 |
+ |
final ForkJoinPool pool; // the containing pool (may be null) |
630 |
|
final ForkJoinWorkerThread owner; // owning thread or null if shared |
631 |
|
volatile Thread parker; // == owner during call to park; else null |
632 |
< |
ForkJoinTask<?> currentJoin; // task being joined in awaitJoin |
632 |
> |
volatile ForkJoinTask<?> currentJoin; // task being joined in awaitJoin |
633 |
|
ForkJoinTask<?> currentSteal; // current non-local task being executed |
634 |
|
// Heuristic padding to ameliorate unfortunate memory placements |
635 |
< |
Object p00, p01, p02, p03, p04, p05, p06, p07, p08, p09, p0a; |
635 |
> |
Object p00, p01, p02, p03, p04, p05, p06, p07; |
636 |
> |
Object p08, p09, p0a, p0b, p0c, p0d, p0e; |
637 |
|
|
638 |
< |
WorkQueue(ForkJoinWorkerThread owner, int mode) { |
726 |
< |
this.owner = owner; |
638 |
> |
WorkQueue(ForkJoinPool pool, ForkJoinWorkerThread owner, int mode) { |
639 |
|
this.mode = mode; |
640 |
+ |
this.pool = pool; |
641 |
+ |
this.owner = owner; |
642 |
|
// Place indices in the center of array (that is not yet allocated) |
643 |
|
base = top = INITIAL_QUEUE_CAPACITY >>> 1; |
644 |
|
} |
645 |
|
|
646 |
|
/** |
647 |
< |
* Returns number of tasks in the queue |
647 |
> |
* Returns the approximate number of tasks in the queue. |
648 |
|
*/ |
649 |
|
final int queueSize() { |
650 |
< |
int n = base - top; // non-owner callers must read base first |
651 |
< |
return (n >= 0) ? 0 : -n; |
650 |
> |
int n = base - top; // non-owner callers must read base first |
651 |
> |
return (n >= 0) ? 0 : -n; // ignore transient negative |
652 |
> |
} |
653 |
> |
|
654 |
> |
/** |
655 |
> |
* Provides a more accurate estimate of whether this queue has |
656 |
> |
* any tasks than does queueSize, by checking whether a |
657 |
> |
* near-empty queue has at least one unclaimed task. |
658 |
> |
*/ |
659 |
> |
final boolean isEmpty() { |
660 |
> |
ForkJoinTask<?>[] a; int m, s; |
661 |
> |
int n = base - (s = top); |
662 |
> |
return (n >= 0 || |
663 |
> |
(n == -1 && |
664 |
> |
((a = array) == null || |
665 |
> |
(m = a.length - 1) < 0 || |
666 |
> |
U.getObjectVolatile |
667 |
> |
(a, ((m & (s - 1)) << ASHIFT) + ABASE) == null))); |
668 |
|
} |
669 |
|
|
670 |
|
/** |
671 |
|
* Pushes a task. Call only by owner in unshared queues. |
672 |
|
* |
673 |
|
* @param task the task. Caller must ensure non-null. |
674 |
< |
* @param p, if non-null, pool to signal if necessary |
745 |
< |
* @throw RejectedExecutionException if array cannot |
746 |
< |
* be resized |
674 |
> |
* @throw RejectedExecutionException if array cannot be resized |
675 |
|
*/ |
676 |
< |
final void push(ForkJoinTask<?> task, ForkJoinPool p) { |
677 |
< |
ForkJoinTask<?>[] a; |
676 |
> |
final void push(ForkJoinTask<?> task) { |
677 |
> |
ForkJoinTask<?>[] a; ForkJoinPool p; |
678 |
|
int s = top, m, n; |
679 |
|
if ((a = array) != null) { // ignore if queue removed |
680 |
|
U.putOrderedObject |
681 |
|
(a, (((m = a.length - 1) & s) << ASHIFT) + ABASE, task); |
682 |
|
if ((n = (top = s + 1) - base) <= 2) { |
683 |
< |
if (p != null) |
683 |
> |
if ((p = pool) != null) |
684 |
|
p.signalWork(); |
685 |
|
} |
686 |
|
else if (n >= m) |
699 |
|
boolean submitted = false; |
700 |
|
if (runState == 0 && U.compareAndSwapInt(this, RUNSTATE, 0, 1)) { |
701 |
|
ForkJoinTask<?>[] a = array; |
702 |
< |
int s = top, n = s - base; |
702 |
> |
int s = top; |
703 |
|
try { |
704 |
< |
if ((a != null && n < a.length - 1) || |
704 |
> |
if ((a != null && a.length > s + 1 - base) || |
705 |
|
(a = growArray(false)) != null) { // must presize |
706 |
|
int j = (((a.length - 1) & s) << ASHIFT) + ABASE; |
707 |
|
U.putObject(a, (long)j, task); // don't need "ordered" |
716 |
|
} |
717 |
|
|
718 |
|
/** |
719 |
< |
* Takes next task, if one exists, in FIFO order. |
719 |
> |
* Takes next task, if one exists, in LIFO order. Call only |
720 |
> |
* by owner in unshared queues. (We do not have a shared |
721 |
> |
* version of this method because it is never needed.) |
722 |
|
*/ |
723 |
< |
final ForkJoinTask<?> poll() { |
724 |
< |
ForkJoinTask<?>[] a; int b, i; |
725 |
< |
while ((b = base) - top < 0 && (a = array) != null && |
726 |
< |
(i = (a.length - 1) & b) >= 0) { |
727 |
< |
int j = (i << ASHIFT) + ABASE; |
728 |
< |
ForkJoinTask<?> t = (ForkJoinTask<?>)U.getObjectVolatile(a, j); |
729 |
< |
if (t != null && base == b && |
723 |
> |
final ForkJoinTask<?> pop() { |
724 |
> |
ForkJoinTask<?>[] a; ForkJoinTask<?> t; int m; |
725 |
> |
if ((a = array) != null && (m = a.length - 1) >= 0) { |
726 |
> |
for (int s; (s = top - 1) - base >= 0;) { |
727 |
> |
long j = ((m & s) << ASHIFT) + ABASE; |
728 |
> |
if ((t = (ForkJoinTask<?>)U.getObject(a, j)) == null) |
729 |
> |
break; |
730 |
> |
if (U.compareAndSwapObject(a, j, t, null)) { |
731 |
> |
top = s; |
732 |
> |
return t; |
733 |
> |
} |
734 |
> |
} |
735 |
> |
} |
736 |
> |
return null; |
737 |
> |
} |
738 |
> |
|
739 |
> |
/** |
740 |
> |
* Takes a task in FIFO order if b is base of queue and a task |
741 |
> |
* can be claimed without contention. Specialized versions |
742 |
> |
* appear in ForkJoinPool methods scan and tryHelpStealer. |
743 |
> |
*/ |
744 |
> |
final ForkJoinTask<?> pollAt(int b) { |
745 |
> |
ForkJoinTask<?> t; ForkJoinTask<?>[] a; |
746 |
> |
if ((a = array) != null) { |
747 |
> |
int j = (((a.length - 1) & b) << ASHIFT) + ABASE; |
748 |
> |
if ((t = (ForkJoinTask<?>)U.getObjectVolatile(a, j)) != null && |
749 |
> |
base == b && |
750 |
|
U.compareAndSwapObject(a, j, t, null)) { |
751 |
|
base = b + 1; |
752 |
|
return t; |
756 |
|
} |
757 |
|
|
758 |
|
/** |
759 |
< |
* Takes next task, if one exists, in LIFO order. |
810 |
< |
* Call only by owner in unshared queues. |
759 |
> |
* Takes next task, if one exists, in FIFO order. |
760 |
|
*/ |
761 |
< |
final ForkJoinTask<?> pop() { |
762 |
< |
ForkJoinTask<?> t; int m; |
763 |
< |
ForkJoinTask<?>[] a = array; |
764 |
< |
if (a != null && (m = a.length - 1) >= 0) { |
765 |
< |
for (int s; (s = top - 1) - base >= 0;) { |
766 |
< |
int j = ((m & s) << ASHIFT) + ABASE; |
767 |
< |
if ((t = (ForkJoinTask<?>)U.getObjectVolatile(a, j)) == null) |
768 |
< |
break; |
769 |
< |
if (U.compareAndSwapObject(a, j, t, null)) { |
821 |
< |
top = s; |
761 |
> |
final ForkJoinTask<?> poll() { |
762 |
> |
ForkJoinTask<?>[] a; int b; ForkJoinTask<?> t; |
763 |
> |
while ((b = base) - top < 0 && (a = array) != null) { |
764 |
> |
int j = (((a.length - 1) & b) << ASHIFT) + ABASE; |
765 |
> |
t = (ForkJoinTask<?>)U.getObjectVolatile(a, j); |
766 |
> |
if (t != null) { |
767 |
> |
if (base == b && |
768 |
> |
U.compareAndSwapObject(a, j, t, null)) { |
769 |
> |
base = b + 1; |
770 |
|
return t; |
771 |
|
} |
772 |
|
} |
773 |
+ |
else if (base == b) { |
774 |
+ |
if (b + 1 == top) |
775 |
+ |
break; |
776 |
+ |
Thread.yield(); // wait for lagging update |
777 |
+ |
} |
778 |
|
} |
779 |
|
return null; |
780 |
|
} |
799 |
|
} |
800 |
|
|
801 |
|
/** |
849 |
– |
* Returns task at index b if b is current base of queue. |
850 |
– |
*/ |
851 |
– |
final ForkJoinTask<?> pollAt(int b) { |
852 |
– |
ForkJoinTask<?>[] a; int i; |
853 |
– |
ForkJoinTask<?> task = null; |
854 |
– |
if ((a = array) != null && (i = ((a.length - 1) & b)) >= 0) { |
855 |
– |
int j = (i << ASHIFT) + ABASE; |
856 |
– |
ForkJoinTask<?> t = (ForkJoinTask<?>)U.getObjectVolatile(a, j); |
857 |
– |
if (t != null && base == b && |
858 |
– |
U.compareAndSwapObject(a, j, t, null)) { |
859 |
– |
base = b + 1; |
860 |
– |
task = t; |
861 |
– |
} |
862 |
– |
} |
863 |
– |
return task; |
864 |
– |
} |
865 |
– |
|
866 |
– |
/** |
802 |
|
* Pops the given task only if it is at the current top. |
803 |
|
*/ |
804 |
|
final boolean tryUnpush(ForkJoinTask<?> t) { |
816 |
|
* Polls the given task only if it is at the current base. |
817 |
|
*/ |
818 |
|
final boolean pollFor(ForkJoinTask<?> task) { |
819 |
< |
ForkJoinTask<?>[] a; int b, i; |
820 |
< |
if ((b = base) - top < 0 && (a = array) != null && |
821 |
< |
(i = (a.length - 1) & b) >= 0) { |
887 |
< |
int j = (i << ASHIFT) + ABASE; |
819 |
> |
ForkJoinTask<?>[] a; int b; |
820 |
> |
if ((b = base) - top < 0 && (a = array) != null) { |
821 |
> |
int j = (((a.length - 1) & b) << ASHIFT) + ABASE; |
822 |
|
if (U.getObjectVolatile(a, j) == task && base == b && |
823 |
|
U.compareAndSwapObject(a, j, task, null)) { |
824 |
|
base = b + 1; |
829 |
|
} |
830 |
|
|
831 |
|
/** |
898 |
– |
* If present, removes from queue and executes the given task, or |
899 |
– |
* any other cancelled task. Returns (true) immediately on any CAS |
900 |
– |
* or consistency check failure so caller can retry. |
901 |
– |
* |
902 |
– |
* @return false if no progress can be made |
903 |
– |
*/ |
904 |
– |
final boolean tryRemoveAndExec(ForkJoinTask<?> task) { |
905 |
– |
boolean removed = false, empty = true, progress = true; |
906 |
– |
ForkJoinTask<?>[] a; int m, s, b, n; |
907 |
– |
if ((a = array) != null && (m = a.length - 1) >= 0 && |
908 |
– |
(n = (s = top) - (b = base)) > 0) { |
909 |
– |
for (ForkJoinTask<?> t;;) { // traverse from s to b |
910 |
– |
int j = ((--s & m) << ASHIFT) + ABASE; |
911 |
– |
t = (ForkJoinTask<?>)U.getObjectVolatile(a, j); |
912 |
– |
if (t == null) // inconsistent length |
913 |
– |
break; |
914 |
– |
else if (t == task) { |
915 |
– |
if (s + 1 == top) { // pop |
916 |
– |
if (!U.compareAndSwapObject(a, j, task, null)) |
917 |
– |
break; |
918 |
– |
top = s; |
919 |
– |
removed = true; |
920 |
– |
} |
921 |
– |
else if (base == b) // replace with proxy |
922 |
– |
removed = U.compareAndSwapObject(a, j, task, |
923 |
– |
new EmptyTask()); |
924 |
– |
break; |
925 |
– |
} |
926 |
– |
else if (t.status >= 0) |
927 |
– |
empty = false; |
928 |
– |
else if (s + 1 == top) { // pop and throw away |
929 |
– |
if (U.compareAndSwapObject(a, j, t, null)) |
930 |
– |
top = s; |
931 |
– |
break; |
932 |
– |
} |
933 |
– |
if (--n == 0) { |
934 |
– |
if (!empty && base == b) |
935 |
– |
progress = false; |
936 |
– |
break; |
937 |
– |
} |
938 |
– |
} |
939 |
– |
} |
940 |
– |
if (removed) |
941 |
– |
task.doExec(); |
942 |
– |
return progress; |
943 |
– |
} |
944 |
– |
|
945 |
– |
/** |
832 |
|
* Initializes or doubles the capacity of array. Call either |
833 |
|
* by owner or with lock held -- it is OK for base, but not |
834 |
|
* top, to move while resizings are in progress. |
864 |
|
} |
865 |
|
|
866 |
|
/** |
867 |
< |
* Removes and cancels all known tasks, ignoring any exceptions |
867 |
> |
* Removes and cancels all known tasks, ignoring any exceptions. |
868 |
|
*/ |
869 |
|
final void cancelAll() { |
870 |
|
ForkJoinTask.cancelIgnoringExceptions(currentJoin); |
873 |
|
ForkJoinTask.cancelIgnoringExceptions(t); |
874 |
|
} |
875 |
|
|
876 |
+ |
/** |
877 |
+ |
* Computes next value for random probes. Scans don't require |
878 |
+ |
* a very high quality generator, but also not a crummy one. |
879 |
+ |
* Marsaglia xor-shift is cheap and works well enough. Note: |
880 |
+ |
* This is manually inlined in its usages in ForkJoinPool to |
881 |
+ |
* avoid writes inside busy scan loops. |
882 |
+ |
*/ |
883 |
+ |
final int nextSeed() { |
884 |
+ |
int r = seed; |
885 |
+ |
r ^= r << 13; |
886 |
+ |
r ^= r >>> 17; |
887 |
+ |
return seed = r ^= r << 5; |
888 |
+ |
} |
889 |
+ |
|
890 |
|
// Execution methods |
891 |
|
|
892 |
|
/** |
893 |
< |
* Removes and runs tasks until empty, using local mode |
994 |
< |
* ordering. |
893 |
> |
* Pops and runs tasks until empty. |
894 |
|
*/ |
895 |
< |
final void runLocalTasks() { |
896 |
< |
if (base - top < 0) { |
897 |
< |
for (ForkJoinTask<?> t; (t = nextLocalTask()) != null; ) |
895 |
> |
private void popAndExecAll() { |
896 |
> |
// A bit faster than repeated pop calls |
897 |
> |
ForkJoinTask<?>[] a; int m, s; long j; ForkJoinTask<?> t; |
898 |
> |
while ((a = array) != null && (m = a.length - 1) >= 0 && |
899 |
> |
(s = top - 1) - base >= 0 && |
900 |
> |
(t = ((ForkJoinTask<?>) |
901 |
> |
U.getObject(a, j = ((m & s) << ASHIFT) + ABASE))) |
902 |
> |
!= null) { |
903 |
> |
if (U.compareAndSwapObject(a, j, t, null)) { |
904 |
> |
top = s; |
905 |
|
t.doExec(); |
906 |
+ |
} |
907 |
+ |
} |
908 |
+ |
} |
909 |
+ |
|
910 |
+ |
/** |
911 |
+ |
* Polls and runs tasks until empty. |
912 |
+ |
*/ |
913 |
+ |
private void pollAndExecAll() { |
914 |
+ |
for (ForkJoinTask<?> t; (t = poll()) != null;) |
915 |
+ |
t.doExec(); |
916 |
+ |
} |
917 |
+ |
|
918 |
+ |
/** |
919 |
+ |
* If present, removes from queue and executes the given task, or |
920 |
+ |
* any other cancelled task. Returns (true) immediately on any CAS |
921 |
+ |
* or consistency check failure so caller can retry. |
922 |
+ |
* |
923 |
+ |
* @return 0 if no progress can be made, else positive |
924 |
+ |
* (this unusual convention simplifies use with tryHelpStealer.) |
925 |
+ |
*/ |
926 |
+ |
final int tryRemoveAndExec(ForkJoinTask<?> task) { |
927 |
+ |
int stat = 1; |
928 |
+ |
boolean removed = false, empty = true; |
929 |
+ |
ForkJoinTask<?>[] a; int m, s, b, n; |
930 |
+ |
if ((a = array) != null && (m = a.length - 1) >= 0 && |
931 |
+ |
(n = (s = top) - (b = base)) > 0) { |
932 |
+ |
for (ForkJoinTask<?> t;;) { // traverse from s to b |
933 |
+ |
int j = ((--s & m) << ASHIFT) + ABASE; |
934 |
+ |
t = (ForkJoinTask<?>)U.getObjectVolatile(a, j); |
935 |
+ |
if (t == null) // inconsistent length |
936 |
+ |
break; |
937 |
+ |
else if (t == task) { |
938 |
+ |
if (s + 1 == top) { // pop |
939 |
+ |
if (!U.compareAndSwapObject(a, j, task, null)) |
940 |
+ |
break; |
941 |
+ |
top = s; |
942 |
+ |
removed = true; |
943 |
+ |
} |
944 |
+ |
else if (base == b) // replace with proxy |
945 |
+ |
removed = U.compareAndSwapObject(a, j, task, |
946 |
+ |
new EmptyTask()); |
947 |
+ |
break; |
948 |
+ |
} |
949 |
+ |
else if (t.status >= 0) |
950 |
+ |
empty = false; |
951 |
+ |
else if (s + 1 == top) { // pop and throw away |
952 |
+ |
if (U.compareAndSwapObject(a, j, t, null)) |
953 |
+ |
top = s; |
954 |
+ |
break; |
955 |
+ |
} |
956 |
+ |
if (--n == 0) { |
957 |
+ |
if (!empty && base == b) |
958 |
+ |
stat = 0; |
959 |
+ |
break; |
960 |
+ |
} |
961 |
+ |
} |
962 |
|
} |
963 |
+ |
if (removed) |
964 |
+ |
task.doExec(); |
965 |
+ |
return stat; |
966 |
|
} |
967 |
|
|
968 |
|
/** |
969 |
|
* Executes a top-level task and any local tasks remaining |
970 |
|
* after execution. |
1006 |
– |
* |
1007 |
– |
* @return true unless terminating |
971 |
|
*/ |
972 |
< |
final boolean runTask(ForkJoinTask<?> t) { |
1010 |
< |
boolean alive = true; |
972 |
> |
final void runTask(ForkJoinTask<?> t) { |
973 |
|
if (t != null) { |
974 |
|
currentSteal = t; |
975 |
|
t.doExec(); |
976 |
< |
runLocalTasks(); |
976 |
> |
if (top != base) { // process remaining local tasks |
977 |
> |
if (mode == 0) |
978 |
> |
popAndExecAll(); |
979 |
> |
else |
980 |
> |
pollAndExecAll(); |
981 |
> |
} |
982 |
|
++nsteals; |
983 |
|
currentSteal = null; |
984 |
|
} |
1018 |
– |
else if (runState < 0) // terminating |
1019 |
– |
alive = false; |
1020 |
– |
return alive; |
985 |
|
} |
986 |
|
|
987 |
|
/** |
988 |
< |
* Executes a non-top-level (stolen) task |
988 |
> |
* Executes a non-top-level (stolen) task. |
989 |
|
*/ |
990 |
|
final void runSubtask(ForkJoinTask<?> t) { |
991 |
|
if (t != null) { |
997 |
|
} |
998 |
|
|
999 |
|
/** |
1000 |
< |
* Computes next value for random probes. Scans don't require |
1037 |
< |
* a very high quality generator, but also not a crummy one. |
1038 |
< |
* Marsaglia xor-shift is cheap and works well enough. Note: |
1039 |
< |
* This is manually inlined in several usages in ForkJoinPool |
1040 |
< |
* to avoid writes inside busy scan loops. |
1000 |
> |
* Returns true if owned and not known to be blocked. |
1001 |
|
*/ |
1002 |
< |
final int nextSeed() { |
1003 |
< |
int r = seed; |
1004 |
< |
r ^= r << 13; |
1005 |
< |
r ^= r >>> 17; |
1006 |
< |
r ^= r << 5; |
1007 |
< |
return seed = r; |
1002 |
> |
final boolean isApparentlyUnblocked() { |
1003 |
> |
Thread wt; Thread.State s; |
1004 |
> |
return (eventCount >= 0 && |
1005 |
> |
(wt = owner) != null && |
1006 |
> |
(s = wt.getState()) != Thread.State.BLOCKED && |
1007 |
> |
s != Thread.State.WAITING && |
1008 |
> |
s != Thread.State.TIMED_WAITING); |
1009 |
> |
} |
1010 |
> |
|
1011 |
> |
/** |
1012 |
> |
* If this owned and is not already interrupted, try to |
1013 |
> |
* interrupt and/or unpark, ignoring exceptions. |
1014 |
> |
*/ |
1015 |
> |
final void interruptOwner() { |
1016 |
> |
Thread wt, p; |
1017 |
> |
if ((wt = owner) != null && !wt.isInterrupted()) { |
1018 |
> |
try { |
1019 |
> |
wt.interrupt(); |
1020 |
> |
} catch (SecurityException ignore) { |
1021 |
> |
} |
1022 |
> |
} |
1023 |
> |
if ((p = parker) != null) |
1024 |
> |
U.unpark(p); |
1025 |
|
} |
1026 |
|
|
1027 |
|
// Unsafe mechanics |
1049 |
|
} |
1050 |
|
|
1051 |
|
/** |
1052 |
< |
* Class for artificial tasks that are used to replace the target |
1053 |
< |
* of local joins if they are removed from an interior queue slot |
1054 |
< |
* in WorkQueue.tryRemoveAndExec. We don't need the proxy to |
1055 |
< |
* actually do anything beyond having a unique identity. |
1056 |
< |
*/ |
1057 |
< |
static final class EmptyTask extends ForkJoinTask<Void> { |
1058 |
< |
EmptyTask() { status = ForkJoinTask.NORMAL; } // force done |
1059 |
< |
public Void getRawResult() { return null; } |
1060 |
< |
public void setRawResult(Void x) {} |
1061 |
< |
public boolean exec() { return true; } |
1052 |
> |
* Per-thread records for threads that submit to pools. Currently |
1053 |
> |
* holds only pseudo-random seed / index that is used to choose |
1054 |
> |
* submission queues in method doSubmit. In the future, this may |
1055 |
> |
* also incorporate a means to implement different task rejection |
1056 |
> |
* and resubmission policies. |
1057 |
> |
* |
1058 |
> |
* Seeds for submitters and workers/workQueues work in basically |
1059 |
> |
* the same way but are initialized and updated using slightly |
1060 |
> |
* different mechanics. Both are initialized using the same |
1061 |
> |
* approach as in class ThreadLocal, where successive values are |
1062 |
> |
* unlikely to collide with previous values. This is done during |
1063 |
> |
* registration for workers, but requires a separate AtomicInteger |
1064 |
> |
* for submitters. Seeds are then randomly modified upon |
1065 |
> |
* collisions using xorshifts, which requires a non-zero seed. |
1066 |
> |
*/ |
1067 |
> |
static final class Submitter { |
1068 |
> |
int seed; |
1069 |
> |
Submitter() { |
1070 |
> |
int s = nextSubmitterSeed.getAndAdd(SEED_INCREMENT); |
1071 |
> |
seed = (s == 0) ? 1 : s; // ensure non-zero |
1072 |
> |
} |
1073 |
|
} |
1074 |
|
|
1075 |
+ |
/** ThreadLocal class for Submitters */ |
1076 |
+ |
static final class ThreadSubmitter extends ThreadLocal<Submitter> { |
1077 |
+ |
public Submitter initialValue() { return new Submitter(); } |
1078 |
+ |
} |
1079 |
+ |
|
1080 |
+ |
// static fields (initialized in static initializer below) |
1081 |
+ |
|
1082 |
|
/** |
1083 |
< |
* Computes a hash code for the given thread. This method is |
1084 |
< |
* expected to provide higher-quality hash codes than those using |
1090 |
< |
* method hashCode(). |
1083 |
> |
* Creates a new ForkJoinWorkerThread. This factory is used unless |
1084 |
> |
* overridden in ForkJoinPool constructors. |
1085 |
|
*/ |
1086 |
< |
static final int hashThread(Thread t) { |
1087 |
< |
long id = (t == null) ? 0L : t.getId(); // Use MurmurHash of thread id |
1094 |
< |
int h = (int)id ^ (int)(id >>> 32); |
1095 |
< |
h ^= h >>> 16; |
1096 |
< |
h *= 0x85ebca6b; |
1097 |
< |
h ^= h >>> 13; |
1098 |
< |
h *= 0xc2b2ae35; |
1099 |
< |
return h ^ (h >>> 16); |
1100 |
< |
} |
1086 |
> |
public static final ForkJoinWorkerThreadFactory |
1087 |
> |
defaultForkJoinWorkerThreadFactory; |
1088 |
|
|
1089 |
|
/** |
1090 |
< |
* Top-level runloop for workers |
1090 |
> |
* Generator for assigning sequence numbers as pool names. |
1091 |
|
*/ |
1092 |
< |
final void runWorker(ForkJoinWorkerThread wt) { |
1106 |
< |
WorkQueue w = wt.workQueue; |
1107 |
< |
w.growArray(false); // Initialize queue array and seed in this thread |
1108 |
< |
w.seed = hashThread(Thread.currentThread()) | (1 << 31); // force < 0 |
1092 |
> |
private static final AtomicInteger poolNumberGenerator; |
1093 |
|
|
1094 |
< |
do {} while (w.runTask(scan(w))); |
1095 |
< |
} |
1094 |
> |
/** |
1095 |
> |
* Generator for initial hashes/seeds for submitters. Accessed by |
1096 |
> |
* Submitter class constructor. |
1097 |
> |
*/ |
1098 |
> |
static final AtomicInteger nextSubmitterSeed; |
1099 |
> |
|
1100 |
> |
/** |
1101 |
> |
* Permission required for callers of methods that may start or |
1102 |
> |
* kill threads. |
1103 |
> |
*/ |
1104 |
> |
private static final RuntimePermission modifyThreadPermission; |
1105 |
> |
|
1106 |
> |
/** |
1107 |
> |
* Per-thread submission bookkeeping. Shared across all pools |
1108 |
> |
* to reduce ThreadLocal pollution and because random motion |
1109 |
> |
* to avoid contention in one pool is likely to hold for others. |
1110 |
> |
*/ |
1111 |
> |
private static final ThreadSubmitter submitters; |
1112 |
> |
|
1113 |
> |
// static constants |
1114 |
> |
|
1115 |
> |
/** |
1116 |
> |
* The wakeup interval (in nanoseconds) for a worker waiting for a |
1117 |
> |
* task when the pool is quiescent to instead try to shrink the |
1118 |
> |
* number of workers. The exact value does not matter too |
1119 |
> |
* much. It must be short enough to release resources during |
1120 |
> |
* sustained periods of idleness, but not so short that threads |
1121 |
> |
* are continually re-created. |
1122 |
> |
*/ |
1123 |
> |
private static final long SHRINK_RATE = |
1124 |
> |
4L * 1000L * 1000L * 1000L; // 4 seconds |
1125 |
> |
|
1126 |
> |
/** |
1127 |
> |
* The timeout value for attempted shrinkage, includes |
1128 |
> |
* some slop to cope with system timer imprecision. |
1129 |
> |
*/ |
1130 |
> |
private static final long SHRINK_TIMEOUT = SHRINK_RATE - (SHRINK_RATE / 10); |
1131 |
> |
|
1132 |
> |
/** |
1133 |
> |
* The maximum stolen->joining link depth allowed in method |
1134 |
> |
* tryHelpStealer. Must be a power of two. This value also |
1135 |
> |
* controls the maximum number of times to try to help join a task |
1136 |
> |
* without any apparent progress or change in pool state before |
1137 |
> |
* giving up and blocking (see awaitJoin). Depths for legitimate |
1138 |
> |
* chains are unbounded, but we use a fixed constant to avoid |
1139 |
> |
* (otherwise unchecked) cycles and to bound staleness of |
1140 |
> |
* traversal parameters at the expense of sometimes blocking when |
1141 |
> |
* we could be helping. |
1142 |
> |
*/ |
1143 |
> |
private static final int MAX_HELP = 64; |
1144 |
> |
|
1145 |
> |
/** |
1146 |
> |
* Secondary time-based bound (in nanosecs) for helping attempts |
1147 |
> |
* before trying compensated blocking in awaitJoin. Used in |
1148 |
> |
* conjunction with MAX_HELP to reduce variance due to different |
1149 |
> |
* polling rates associated with different helping options. The |
1150 |
> |
* value should roughly approximate the time required to create |
1151 |
> |
* and/or activate a worker thread. |
1152 |
> |
*/ |
1153 |
> |
private static final long COMPENSATION_DELAY = 1L << 18; // ~0.25 millisec |
1154 |
> |
|
1155 |
> |
/** |
1156 |
> |
* Increment for seed generators. See class ThreadLocal for |
1157 |
> |
* explanation. |
1158 |
> |
*/ |
1159 |
> |
private static final int SEED_INCREMENT = 0x61c88647; |
1160 |
> |
|
1161 |
> |
/** |
1162 |
> |
* Bits and masks for control variables |
1163 |
> |
* |
1164 |
> |
* Field ctl is a long packed with: |
1165 |
> |
* AC: Number of active running workers minus target parallelism (16 bits) |
1166 |
> |
* TC: Number of total workers minus target parallelism (16 bits) |
1167 |
> |
* ST: true if pool is terminating (1 bit) |
1168 |
> |
* EC: the wait count of top waiting thread (15 bits) |
1169 |
> |
* ID: poolIndex of top of Treiber stack of waiters (16 bits) |
1170 |
> |
* |
1171 |
> |
* When convenient, we can extract the upper 32 bits of counts and |
1172 |
> |
* the lower 32 bits of queue state, u = (int)(ctl >>> 32) and e = |
1173 |
> |
* (int)ctl. The ec field is never accessed alone, but always |
1174 |
> |
* together with id and st. The offsets of counts by the target |
1175 |
> |
* parallelism and the positionings of fields makes it possible to |
1176 |
> |
* perform the most common checks via sign tests of fields: When |
1177 |
> |
* ac is negative, there are not enough active workers, when tc is |
1178 |
> |
* negative, there are not enough total workers, and when e is |
1179 |
> |
* negative, the pool is terminating. To deal with these possibly |
1180 |
> |
* negative fields, we use casts in and out of "short" and/or |
1181 |
> |
* signed shifts to maintain signedness. |
1182 |
> |
* |
1183 |
> |
* When a thread is queued (inactivated), its eventCount field is |
1184 |
> |
* set negative, which is the only way to tell if a worker is |
1185 |
> |
* prevented from executing tasks, even though it must continue to |
1186 |
> |
* scan for them to avoid queuing races. Note however that |
1187 |
> |
* eventCount updates lag releases so usage requires care. |
1188 |
> |
* |
1189 |
> |
* Field runState is an int packed with: |
1190 |
> |
* SHUTDOWN: true if shutdown is enabled (1 bit) |
1191 |
> |
* SEQ: a sequence number updated upon (de)registering workers (30 bits) |
1192 |
> |
* INIT: set true after workQueues array construction (1 bit) |
1193 |
> |
* |
1194 |
> |
* The sequence number enables simple consistency checks: |
1195 |
> |
* Staleness of read-only operations on the workQueues array can |
1196 |
> |
* be checked by comparing runState before vs after the reads. |
1197 |
> |
*/ |
1198 |
> |
|
1199 |
> |
// bit positions/shifts for fields |
1200 |
> |
private static final int AC_SHIFT = 48; |
1201 |
> |
private static final int TC_SHIFT = 32; |
1202 |
> |
private static final int ST_SHIFT = 31; |
1203 |
> |
private static final int EC_SHIFT = 16; |
1204 |
> |
|
1205 |
> |
// bounds |
1206 |
> |
private static final int SMASK = 0xffff; // short bits |
1207 |
> |
private static final int MAX_CAP = 0x7fff; // max #workers - 1 |
1208 |
> |
private static final int SQMASK = 0xfffe; // even short bits |
1209 |
> |
private static final int SHORT_SIGN = 1 << 15; |
1210 |
> |
private static final int INT_SIGN = 1 << 31; |
1211 |
> |
|
1212 |
> |
// masks |
1213 |
> |
private static final long STOP_BIT = 0x0001L << ST_SHIFT; |
1214 |
> |
private static final long AC_MASK = ((long)SMASK) << AC_SHIFT; |
1215 |
> |
private static final long TC_MASK = ((long)SMASK) << TC_SHIFT; |
1216 |
> |
|
1217 |
> |
// units for incrementing and decrementing |
1218 |
> |
private static final long TC_UNIT = 1L << TC_SHIFT; |
1219 |
> |
private static final long AC_UNIT = 1L << AC_SHIFT; |
1220 |
> |
|
1221 |
> |
// masks and units for dealing with u = (int)(ctl >>> 32) |
1222 |
> |
private static final int UAC_SHIFT = AC_SHIFT - 32; |
1223 |
> |
private static final int UTC_SHIFT = TC_SHIFT - 32; |
1224 |
> |
private static final int UAC_MASK = SMASK << UAC_SHIFT; |
1225 |
> |
private static final int UTC_MASK = SMASK << UTC_SHIFT; |
1226 |
> |
private static final int UAC_UNIT = 1 << UAC_SHIFT; |
1227 |
> |
private static final int UTC_UNIT = 1 << UTC_SHIFT; |
1228 |
> |
|
1229 |
> |
// masks and units for dealing with e = (int)ctl |
1230 |
> |
private static final int E_MASK = 0x7fffffff; // no STOP_BIT |
1231 |
> |
private static final int E_SEQ = 1 << EC_SHIFT; |
1232 |
|
|
1233 |
< |
// Creating, registering and deregistering workers |
1233 |
> |
// runState bits |
1234 |
> |
private static final int SHUTDOWN = 1 << 31; |
1235 |
> |
|
1236 |
> |
// access mode for WorkQueue |
1237 |
> |
static final int LIFO_QUEUE = 0; |
1238 |
> |
static final int FIFO_QUEUE = 1; |
1239 |
> |
static final int SHARED_QUEUE = -1; |
1240 |
> |
|
1241 |
> |
// Instance fields |
1242 |
> |
|
1243 |
> |
/* |
1244 |
> |
* Field layout order in this class tends to matter more than one |
1245 |
> |
* would like. Runtime layout order is only loosely related to |
1246 |
> |
* declaration order and may differ across JVMs, but the following |
1247 |
> |
* empirically works OK on current JVMs. |
1248 |
> |
*/ |
1249 |
> |
|
1250 |
> |
volatile long ctl; // main pool control |
1251 |
> |
final int parallelism; // parallelism level |
1252 |
> |
final int localMode; // per-worker scheduling mode |
1253 |
> |
final int submitMask; // submit queue index bound |
1254 |
> |
int nextSeed; // for initializing worker seeds |
1255 |
> |
volatile int runState; // shutdown status and seq |
1256 |
> |
WorkQueue[] workQueues; // main registry |
1257 |
> |
final Mutex lock; // for registration |
1258 |
> |
final Condition termination; // for awaitTermination |
1259 |
> |
final ForkJoinWorkerThreadFactory factory; // factory for new workers |
1260 |
> |
final Thread.UncaughtExceptionHandler ueh; // per-worker UEH |
1261 |
> |
final AtomicLong stealCount; // collect counts when terminated |
1262 |
> |
final AtomicInteger nextWorkerNumber; // to create worker name string |
1263 |
> |
final String workerNamePrefix; // to create worker name string |
1264 |
> |
|
1265 |
> |
// Creating, registering, and deregistering workers |
1266 |
|
|
1267 |
|
/** |
1268 |
|
* Tries to create and start a worker |
1269 |
|
*/ |
1270 |
|
private void addWorker() { |
1271 |
|
Throwable ex = null; |
1272 |
< |
ForkJoinWorkerThread w = null; |
1272 |
> |
ForkJoinWorkerThread wt = null; |
1273 |
|
try { |
1274 |
< |
if ((w = factory.newThread(this)) != null) { |
1275 |
< |
w.start(); |
1274 |
> |
if ((wt = factory.newThread(this)) != null) { |
1275 |
> |
wt.start(); |
1276 |
|
return; |
1277 |
|
} |
1278 |
|
} catch (Throwable e) { |
1279 |
|
ex = e; |
1280 |
|
} |
1281 |
< |
deregisterWorker(w, ex); |
1281 |
> |
deregisterWorker(wt, ex); // adjust counts etc on failure |
1282 |
|
} |
1283 |
|
|
1284 |
|
/** |
1293 |
|
} |
1294 |
|
|
1295 |
|
/** |
1296 |
< |
* Callback from ForkJoinWorkerThread constructor to establish and |
1297 |
< |
* record its WorkQueue |
1296 |
> |
* Callback from ForkJoinWorkerThread constructor to establish its |
1297 |
> |
* poolIndex and record its WorkQueue. To avoid scanning bias due |
1298 |
> |
* to packing entries in front of the workQueues array, we treat |
1299 |
> |
* the array as a simple power-of-two hash table using per-thread |
1300 |
> |
* seed as hash, expanding as needed. |
1301 |
|
* |
1302 |
< |
* @param wt the worker thread |
1302 |
> |
* @param w the worker's queue |
1303 |
|
*/ |
1304 |
< |
final void registerWorker(ForkJoinWorkerThread wt) { |
1305 |
< |
WorkQueue w = wt.workQueue; |
1306 |
< |
ReentrantLock lock = this.lock; |
1304 |
> |
|
1305 |
> |
final void registerWorker(WorkQueue w) { |
1306 |
> |
Mutex lock = this.lock; |
1307 |
|
lock.lock(); |
1308 |
|
try { |
1154 |
– |
int k = nextPoolIndex; |
1309 |
|
WorkQueue[] ws = workQueues; |
1310 |
< |
if (ws != null) { // ignore on shutdown |
1311 |
< |
int n = ws.length; |
1312 |
< |
if (k < 0 || (k & 1) == 0 || k >= n || ws[k] != null) { |
1313 |
< |
for (k = 1; k < n && ws[k] != null; k += 2) |
1314 |
< |
; // workers are at odd indices |
1315 |
< |
if (k >= n) // resize |
1316 |
< |
workQueues = ws = Arrays.copyOf(ws, n << 1); |
1317 |
< |
} |
1318 |
< |
w.poolIndex = k; |
1319 |
< |
w.eventCount = ~(k >>> 1) & SMASK; // Set up wait count |
1320 |
< |
ws[k] = w; // record worker |
1321 |
< |
nextPoolIndex = k + 2; |
1322 |
< |
int rs = runState; |
1323 |
< |
int m = rs & SMASK; // recalculate runState mask |
1324 |
< |
if (k > m) |
1325 |
< |
m = (m << 1) + 1; |
1326 |
< |
runState = (rs & SHUTDOWN) | ((rs + RS_SEQ) & RS_SEQ_MASK) | m; |
1310 |
> |
if (w != null && ws != null) { // skip on shutdown/failure |
1311 |
> |
int rs, n = ws.length, m = n - 1; |
1312 |
> |
int s = nextSeed += SEED_INCREMENT; // rarely-colliding sequence |
1313 |
> |
w.seed = (s == 0) ? 1 : s; // ensure non-zero seed |
1314 |
> |
int r = (s << 1) | 1; // use odd-numbered indices |
1315 |
> |
if (ws[r &= m] != null) { // collision |
1316 |
> |
int probes = 0; // step by approx half size |
1317 |
> |
int step = (n <= 4) ? 2 : ((n >>> 1) & SQMASK) + 2; |
1318 |
> |
while (ws[r = (r + step) & m] != null) { |
1319 |
> |
if (++probes >= n) { |
1320 |
> |
workQueues = ws = Arrays.copyOf(ws, n <<= 1); |
1321 |
> |
m = n - 1; |
1322 |
> |
probes = 0; |
1323 |
> |
} |
1324 |
> |
} |
1325 |
> |
} |
1326 |
> |
w.eventCount = w.poolIndex = r; // establish before recording |
1327 |
> |
ws[r] = w; // also update seq |
1328 |
> |
runState = ((rs = runState) & SHUTDOWN) | ((rs + 2) & ~SHUTDOWN); |
1329 |
|
} |
1330 |
|
} finally { |
1331 |
|
lock.unlock(); |
1333 |
|
} |
1334 |
|
|
1335 |
|
/** |
1336 |
< |
* Final callback from terminating worker, as well as failure to |
1337 |
< |
* construct or start a worker in addWorker. Removes record of |
1336 |
> |
* Final callback from terminating worker, as well as upon failure |
1337 |
> |
* to construct or start a worker in addWorker. Removes record of |
1338 |
|
* worker from array, and adjusts counts. If pool is shutting |
1339 |
|
* down, tries to complete termination. |
1340 |
|
* |
1342 |
|
* @param ex the exception causing failure, or null if none |
1343 |
|
*/ |
1344 |
|
final void deregisterWorker(ForkJoinWorkerThread wt, Throwable ex) { |
1345 |
+ |
Mutex lock = this.lock; |
1346 |
|
WorkQueue w = null; |
1347 |
|
if (wt != null && (w = wt.workQueue) != null) { |
1348 |
|
w.runState = -1; // ensure runState is set |
1349 |
|
stealCount.getAndAdd(w.totalSteals + w.nsteals); |
1350 |
|
int idx = w.poolIndex; |
1194 |
– |
ReentrantLock lock = this.lock; |
1351 |
|
lock.lock(); |
1352 |
|
try { // remove record from array |
1353 |
|
WorkQueue[] ws = workQueues; |
1354 |
|
if (ws != null && idx >= 0 && idx < ws.length && ws[idx] == w) |
1355 |
< |
ws[nextPoolIndex = idx] = null; |
1355 |
> |
ws[idx] = null; |
1356 |
|
} finally { |
1357 |
|
lock.unlock(); |
1358 |
|
} |
1364 |
|
((c - TC_UNIT) & TC_MASK) | |
1365 |
|
(c & ~(AC_MASK|TC_MASK))))); |
1366 |
|
|
1367 |
< |
if (!tryTerminate(false) && w != null) { |
1367 |
> |
if (!tryTerminate(false, false) && w != null) { |
1368 |
|
w.cancelAll(); // cancel remaining tasks |
1369 |
|
if (w.array != null) // suppress signal if never ran |
1370 |
|
signalWork(); // wake up or create replacement |
1371 |
+ |
if (ex == null) // help clean refs on way out |
1372 |
+ |
ForkJoinTask.helpExpungeStaleExceptions(); |
1373 |
|
} |
1374 |
|
|
1375 |
|
if (ex != null) // rethrow |
1377 |
|
} |
1378 |
|
|
1379 |
|
|
1380 |
< |
// Maintaining ctl counts |
1223 |
< |
|
1224 |
< |
/** |
1225 |
< |
* Increments active count; mainly called upon return from blocking |
1226 |
< |
*/ |
1227 |
< |
final void incrementActiveCount() { |
1228 |
< |
long c; |
1229 |
< |
do {} while (!U.compareAndSwapLong(this, CTL, c = ctl, c + AC_UNIT)); |
1230 |
< |
} |
1380 |
> |
// Submissions |
1381 |
|
|
1382 |
|
/** |
1383 |
< |
* Activates or creates a worker |
1383 |
> |
* Unless shutting down, adds the given task to a submission queue |
1384 |
> |
* at submitter's current queue index (modulo submission |
1385 |
> |
* range). If no queue exists at the index, one is created. If |
1386 |
> |
* the queue is busy, another index is randomly chosen. The |
1387 |
> |
* submitMask bounds the effective number of queues to the |
1388 |
> |
* (nearest power of two for) parallelism level. |
1389 |
> |
* |
1390 |
> |
* @param task the task. Caller must ensure non-null. |
1391 |
|
*/ |
1392 |
< |
final void signalWork() { |
1393 |
< |
/* |
1394 |
< |
* The while condition is true if: (there is are too few total |
1395 |
< |
* workers OR there is at least one waiter) AND (there are too |
1396 |
< |
* few active workers OR the pool is terminating). The value |
1397 |
< |
* of e distinguishes the remaining cases: zero (no waiters) |
1398 |
< |
* for create, negative if terminating (in which case do |
1399 |
< |
* nothing), else release a waiter. The secondary checks for |
1400 |
< |
* release (non-null array etc) can fail if the pool begins |
1401 |
< |
* terminating after the test, and don't impose any added cost |
1402 |
< |
* because JVMs must perform null and bounds checks anyway. |
1403 |
< |
*/ |
1404 |
< |
long c; int e, u; |
1405 |
< |
while ((((e = (int)(c = ctl)) | (u = (int)(c >>> 32))) & |
1406 |
< |
(INT_SIGN|SHORT_SIGN)) == (INT_SIGN|SHORT_SIGN)) { |
1407 |
< |
WorkQueue[] ws = workQueues; int i; WorkQueue w; Thread p; |
1408 |
< |
if (e == 0) { // add a new worker |
1409 |
< |
if (U.compareAndSwapLong |
1410 |
< |
(this, CTL, c, (long)(((u + UTC_UNIT) & UTC_MASK) | |
1254 |
< |
((u + UAC_UNIT) & UAC_MASK)) << 32)) { |
1255 |
< |
addWorker(); |
1256 |
< |
break; |
1392 |
> |
private void doSubmit(ForkJoinTask<?> task) { |
1393 |
> |
Submitter s = submitters.get(); |
1394 |
> |
for (int r = s.seed, m = submitMask;;) { |
1395 |
> |
WorkQueue[] ws; WorkQueue q; |
1396 |
> |
int k = r & m & SQMASK; // use only even indices |
1397 |
> |
if (runState < 0 || (ws = workQueues) == null || ws.length <= k) |
1398 |
> |
throw new RejectedExecutionException(); // shutting down |
1399 |
> |
else if ((q = ws[k]) == null) { // create new queue |
1400 |
> |
WorkQueue nq = new WorkQueue(this, null, SHARED_QUEUE); |
1401 |
> |
Mutex lock = this.lock; // construct outside lock |
1402 |
> |
lock.lock(); |
1403 |
> |
try { // recheck under lock |
1404 |
> |
int rs = runState; // to update seq |
1405 |
> |
if (ws == workQueues && ws[k] == null) { |
1406 |
> |
ws[k] = nq; |
1407 |
> |
runState = ((rs & SHUTDOWN) | ((rs + 2) & ~SHUTDOWN)); |
1408 |
> |
} |
1409 |
> |
} finally { |
1410 |
> |
lock.unlock(); |
1411 |
|
} |
1412 |
|
} |
1413 |
< |
else if (e > 0 && ws != null && |
1414 |
< |
(i = ((~e << 1) | 1) & SMASK) < ws.length && |
1415 |
< |
(w = ws[i]) != null && |
1416 |
< |
w.eventCount == (e | INT_SIGN)) { |
1417 |
< |
if (U.compareAndSwapLong |
1418 |
< |
(this, CTL, c, (((long)(w.nextWait & E_MASK)) | |
1419 |
< |
((long)(u + UAC_UNIT) << 32)))) { |
1420 |
< |
w.eventCount = (e + E_SEQ) & E_MASK; |
1267 |
< |
if ((p = w.parker) != null) |
1268 |
< |
U.unpark(p); // release a waiting worker |
1269 |
< |
break; |
1270 |
< |
} |
1413 |
> |
else if (q.trySharedPush(task)) { |
1414 |
> |
signalWork(); |
1415 |
> |
return; |
1416 |
> |
} |
1417 |
> |
else if (m > 1) { // move to a different index |
1418 |
> |
r ^= r << 13; // same xorshift as WorkQueues |
1419 |
> |
r ^= r >>> 17; |
1420 |
> |
s.seed = r ^= r << 5; |
1421 |
|
} |
1422 |
|
else |
1423 |
< |
break; |
1423 |
> |
Thread.yield(); // yield if no alternatives |
1424 |
|
} |
1425 |
|
} |
1426 |
|
|
1427 |
+ |
// Maintaining ctl counts |
1428 |
+ |
|
1429 |
|
/** |
1430 |
< |
* Tries to decrement active count (sometimes implicitly) and |
1279 |
< |
* possibly release or create a compensating worker in preparation |
1280 |
< |
* for blocking. Fails on contention or termination. |
1281 |
< |
* |
1282 |
< |
* @return true if the caller can block, else should recheck and retry |
1430 |
> |
* Increments active count; mainly called upon return from blocking. |
1431 |
|
*/ |
1432 |
< |
final boolean tryCompensate() { |
1433 |
< |
WorkQueue[] ws; WorkQueue w; Thread p; |
1434 |
< |
int pc = parallelism, e, u, ac, tc, i; |
1287 |
< |
long c = ctl; |
1288 |
< |
|
1289 |
< |
if ((e = (int)c) >= 0) { |
1290 |
< |
if ((ac = ((u = (int)(c >>> 32)) >> UAC_SHIFT)) <= 0 && |
1291 |
< |
e != 0 && (ws = workQueues) != null && |
1292 |
< |
(i = ((~e << 1) | 1) & SMASK) < ws.length && |
1293 |
< |
(w = ws[i]) != null) { |
1294 |
< |
if (w.eventCount == (e | INT_SIGN) && |
1295 |
< |
U.compareAndSwapLong |
1296 |
< |
(this, CTL, c, ((long)(w.nextWait & E_MASK) | |
1297 |
< |
(c & (AC_MASK|TC_MASK))))) { |
1298 |
< |
w.eventCount = (e + E_SEQ) & E_MASK; |
1299 |
< |
if ((p = w.parker) != null) |
1300 |
< |
U.unpark(p); |
1301 |
< |
return true; // release an idle worker |
1302 |
< |
} |
1303 |
< |
} |
1304 |
< |
else if ((tc = (short)(u >>> UTC_SHIFT)) >= 0 && ac + pc > 1) { |
1305 |
< |
long nc = ((c - AC_UNIT) & AC_MASK) | (c & ~AC_MASK); |
1306 |
< |
if (U.compareAndSwapLong(this, CTL, c, nc)) |
1307 |
< |
return true; // no compensation needed |
1308 |
< |
} |
1309 |
< |
else if (tc + pc < MAX_ID) { |
1310 |
< |
long nc = ((c + TC_UNIT) & TC_MASK) | (c & ~TC_MASK); |
1311 |
< |
if (U.compareAndSwapLong(this, CTL, c, nc)) { |
1312 |
< |
addWorker(); |
1313 |
< |
return true; // create replacement |
1314 |
< |
} |
1315 |
< |
} |
1316 |
< |
} |
1317 |
< |
return false; |
1432 |
> |
final void incrementActiveCount() { |
1433 |
> |
long c; |
1434 |
> |
do {} while (!U.compareAndSwapLong(this, CTL, c = ctl, c + AC_UNIT)); |
1435 |
|
} |
1436 |
|
|
1320 |
– |
// Submissions |
1321 |
– |
|
1437 |
|
/** |
1438 |
< |
* Unless shutting down, adds the given task to some submission |
1324 |
< |
* queue; using a randomly chosen queue index if the caller is a |
1325 |
< |
* ForkJoinWorkerThread, else one based on caller thread's hash |
1326 |
< |
* code. If no queue exists at the index, one is created. If the |
1327 |
< |
* queue is busy, another is chosen by sweeping through the queues |
1328 |
< |
* array. |
1438 |
> |
* Tries to activate or create a worker if too few are active. |
1439 |
|
*/ |
1440 |
< |
private void doSubmit(ForkJoinTask<?> task) { |
1441 |
< |
if (task == null) |
1442 |
< |
throw new NullPointerException(); |
1443 |
< |
Thread t = Thread.currentThread(); |
1444 |
< |
int r = ((t instanceof ForkJoinWorkerThread) ? |
1445 |
< |
((ForkJoinWorkerThread)t).workQueue.nextSeed() : hashThread(t)); |
1446 |
< |
for (;;) { |
1447 |
< |
int rs = runState, m = rs & SMASK; |
1448 |
< |
int j = r &= (m & ~1); // even numbered queues |
1449 |
< |
WorkQueue[] ws = workQueues; |
1450 |
< |
if (rs < 0 || ws == null) |
1451 |
< |
throw new RejectedExecutionException(); // shutting down |
1452 |
< |
if (ws.length > m) { // consistency check |
1343 |
< |
for (WorkQueue q;;) { // circular sweep |
1344 |
< |
if (((q = ws[j]) != null || |
1345 |
< |
(q = tryAddSharedQueue(j)) != null) && |
1346 |
< |
q.trySharedPush(task)) { |
1347 |
< |
signalWork(); |
1348 |
< |
return; |
1349 |
< |
} |
1350 |
< |
if ((j = (j + 2) & m) == r) { |
1351 |
< |
Thread.yield(); // all queues busy |
1440 |
> |
final void signalWork() { |
1441 |
> |
long c; int u; |
1442 |
> |
while ((u = (int)((c = ctl) >>> 32)) < 0) { // too few active |
1443 |
> |
WorkQueue[] ws = workQueues; int e, i; WorkQueue w; Thread p; |
1444 |
> |
if ((e = (int)c) > 0) { // at least one waiting |
1445 |
> |
if (ws != null && (i = e & SMASK) < ws.length && |
1446 |
> |
(w = ws[i]) != null && w.eventCount == (e | INT_SIGN)) { |
1447 |
> |
long nc = (((long)(w.nextWait & E_MASK)) | |
1448 |
> |
((long)(u + UAC_UNIT) << 32)); |
1449 |
> |
if (U.compareAndSwapLong(this, CTL, c, nc)) { |
1450 |
> |
w.eventCount = (e + E_SEQ) & E_MASK; |
1451 |
> |
if ((p = w.parker) != null) |
1452 |
> |
U.unpark(p); // activate and release |
1453 |
|
break; |
1454 |
|
} |
1455 |
|
} |
1456 |
+ |
else |
1457 |
+ |
break; |
1458 |
|
} |
1459 |
< |
} |
1460 |
< |
} |
1461 |
< |
|
1462 |
< |
/** |
1463 |
< |
* Tries to add and register a new queue at the given index. |
1464 |
< |
* |
1362 |
< |
* @param idx the workQueues array index to register the queue |
1363 |
< |
* @return the queue, or null if could not add because could |
1364 |
< |
* not acquire lock or idx is unusable |
1365 |
< |
*/ |
1366 |
< |
private WorkQueue tryAddSharedQueue(int idx) { |
1367 |
< |
WorkQueue q = null; |
1368 |
< |
ReentrantLock lock = this.lock; |
1369 |
< |
if (idx >= 0 && (idx & 1) == 0 && !lock.isLocked()) { |
1370 |
< |
// create queue outside of lock but only if apparently free |
1371 |
< |
WorkQueue nq = new WorkQueue(null, SHARED_QUEUE); |
1372 |
< |
if (lock.tryLock()) { |
1373 |
< |
try { |
1374 |
< |
WorkQueue[] ws = workQueues; |
1375 |
< |
if (ws != null && idx < ws.length) { |
1376 |
< |
if ((q = ws[idx]) == null) { |
1377 |
< |
int rs; // update runState seq |
1378 |
< |
ws[idx] = q = nq; |
1379 |
< |
runState = (((rs = runState) & SHUTDOWN) | |
1380 |
< |
((rs + RS_SEQ) & ~SHUTDOWN)); |
1381 |
< |
} |
1382 |
< |
} |
1383 |
< |
} finally { |
1384 |
< |
lock.unlock(); |
1459 |
> |
else if (e == 0 && (u & SHORT_SIGN) != 0) { // too few total |
1460 |
> |
long nc = (long)(((u + UTC_UNIT) & UTC_MASK) | |
1461 |
> |
((u + UAC_UNIT) & UAC_MASK)) << 32; |
1462 |
> |
if (U.compareAndSwapLong(this, CTL, c, nc)) { |
1463 |
> |
addWorker(); |
1464 |
> |
break; |
1465 |
|
} |
1466 |
|
} |
1467 |
+ |
else |
1468 |
+ |
break; |
1469 |
|
} |
1388 |
– |
return q; |
1470 |
|
} |
1471 |
|
|
1472 |
|
// Scanning for tasks |
1473 |
|
|
1474 |
|
/** |
1475 |
+ |
* Top-level runloop for workers, called by ForkJoinWorkerThread.run. |
1476 |
+ |
*/ |
1477 |
+ |
final void runWorker(WorkQueue w) { |
1478 |
+ |
w.growArray(false); // initialize queue array in this thread |
1479 |
+ |
do { w.runTask(scan(w)); } while (w.runState >= 0); |
1480 |
+ |
} |
1481 |
+ |
|
1482 |
+ |
/** |
1483 |
|
* Scans for and, if found, returns one task, else possibly |
1484 |
|
* inactivates the worker. This method operates on single reads of |
1485 |
< |
* volatile state and is designed to be re-invoked continuously in |
1486 |
< |
* part because it returns upon detecting inconsistencies, |
1485 |
> |
* volatile state and is designed to be re-invoked continuously, |
1486 |
> |
* in part because it returns upon detecting inconsistencies, |
1487 |
|
* contention, or state changes that indicate possible success on |
1488 |
|
* re-invocation. |
1489 |
|
* |
1490 |
< |
* The scan searches for tasks across queues, randomly selecting |
1491 |
< |
* the first #queues probes, favoring steals 2:1 over submissions |
1492 |
< |
* (by exploiting even/odd indexing), and then performing a |
1493 |
< |
* circular sweep of all queues. The scan terminates upon either |
1494 |
< |
* finding a non-empty queue, or completing a full sweep. If the |
1495 |
< |
* worker is not inactivated, it takes and returns a task from |
1496 |
< |
* this queue. On failure to find a task, we take one of the |
1497 |
< |
* following actions, after which the caller will retry calling |
1498 |
< |
* this method unless terminated. |
1490 |
> |
* The scan searches for tasks across a random permutation of |
1491 |
> |
* queues (starting at a random index and stepping by a random |
1492 |
> |
* relative prime, checking each at least once). The scan |
1493 |
> |
* terminates upon either finding a non-empty queue, or completing |
1494 |
> |
* the sweep. If the worker is not inactivated, it takes and |
1495 |
> |
* returns a task from this queue. On failure to find a task, we |
1496 |
> |
* take one of the following actions, after which the caller will |
1497 |
> |
* retry calling this method unless terminated. |
1498 |
> |
* |
1499 |
> |
* * If pool is terminating, terminate the worker. |
1500 |
|
* |
1501 |
|
* * If not a complete sweep, try to release a waiting worker. If |
1502 |
|
* the scan terminated because the worker is inactivated, then the |
1505 |
|
* another worker, but with same net effect. Releasing in other |
1506 |
|
* cases as well ensures that we have enough workers running. |
1507 |
|
* |
1418 |
– |
* * If the caller has run a task since the the last empty scan, |
1419 |
– |
* return (to allow rescan) if other workers are not also yet |
1420 |
– |
* enqueued. Field WorkQueue.rescans counts down on each scan to |
1421 |
– |
* ensure eventual inactivation, and occasional calls to |
1422 |
– |
* Thread.yield to help avoid interference with more useful |
1423 |
– |
* activities on the system. |
1424 |
– |
* |
1425 |
– |
* * If pool is terminating, terminate the worker |
1426 |
– |
* |
1508 |
|
* * If not already enqueued, try to inactivate and enqueue the |
1509 |
< |
* worker on wait queue. |
1509 |
> |
* worker on wait queue. Or, if inactivating has caused the pool |
1510 |
> |
* to be quiescent, relay to idleAwaitWork to check for |
1511 |
> |
* termination and possibly shrink pool. |
1512 |
> |
* |
1513 |
> |
* * If already inactive, and the caller has run a task since the |
1514 |
> |
* last empty scan, return (to allow rescan) unless others are |
1515 |
> |
* also inactivated. Field WorkQueue.rescans counts down on each |
1516 |
> |
* scan to ensure eventual inactivation and blocking. |
1517 |
|
* |
1518 |
< |
* * If already enqueued and none of the above apply, either park |
1519 |
< |
* awaiting signal, or if this is the most recent waiter and pool |
1432 |
< |
* is quiescent, relay to idleAwaitWork to check for termination |
1433 |
< |
* and possibly shrink pool. |
1518 |
> |
* * If already enqueued and none of the above apply, park |
1519 |
> |
* awaiting signal, |
1520 |
|
* |
1521 |
|
* @param w the worker (via its WorkQueue) |
1522 |
|
* @return a task or null of none found |
1523 |
|
*/ |
1524 |
|
private final ForkJoinTask<?> scan(WorkQueue w) { |
1525 |
< |
boolean swept = false; // true after full empty scan |
1526 |
< |
WorkQueue[] ws; // volatile read order matters |
1527 |
< |
int r = w.seed, ec = w.eventCount; // ec is negative if inactive |
1528 |
< |
int rs = runState, m = rs & SMASK; |
1529 |
< |
if ((ws = workQueues) != null && ws.length > m) { |
1530 |
< |
ForkJoinTask<?> task = null; |
1531 |
< |
for (int k = 0, j = -2 - m; ; ++j) { |
1532 |
< |
WorkQueue q; int b; |
1533 |
< |
if (j < 0) { // random probes while j negative |
1534 |
< |
r ^= r << 13; r ^= r >>> 17; k = (r ^= r << 5) | (j & 1); |
1535 |
< |
} // worker (not submit) for odd j |
1536 |
< |
else // cyclic scan when j >= 0 |
1537 |
< |
k += (m >>> 1) | 1; // step by half to reduce bias |
1538 |
< |
|
1539 |
< |
if ((q = ws[k & m]) != null && (b = q.base) - q.top < 0) { |
1540 |
< |
if (ec >= 0) |
1541 |
< |
task = q.pollAt(b); // steal |
1542 |
< |
break; |
1525 |
> |
WorkQueue[] ws; // first update random seed |
1526 |
> |
int r = w.seed; r ^= r << 13; r ^= r >>> 17; w.seed = r ^= r << 5; |
1527 |
> |
int rs = runState, m; // volatile read order matters |
1528 |
> |
if ((ws = workQueues) != null && (m = ws.length - 1) > 0) { |
1529 |
> |
int ec = w.eventCount; // ec is negative if inactive |
1530 |
> |
int step = (r >>> 16) | 1; // relative prime |
1531 |
> |
for (int j = (m + 1) << 2; ; r += step) { |
1532 |
> |
WorkQueue q; ForkJoinTask<?> t; ForkJoinTask<?>[] a; int b; |
1533 |
> |
if ((q = ws[r & m]) != null && (b = q.base) - q.top < 0 && |
1534 |
> |
(a = q.array) != null) { // probably nonempty |
1535 |
> |
int i = (((a.length - 1) & b) << ASHIFT) + ABASE; |
1536 |
> |
t = (ForkJoinTask<?>)U.getObjectVolatile(a, i); |
1537 |
> |
if (q.base == b && ec >= 0 && t != null && |
1538 |
> |
U.compareAndSwapObject(a, i, t, null)) { |
1539 |
> |
if (q.top - (q.base = b + 1) > 1) |
1540 |
> |
signalWork(); // help pushes signal |
1541 |
> |
return t; |
1542 |
> |
} |
1543 |
> |
else if (ec < 0 || j <= m) { |
1544 |
> |
rs = 0; // mark scan as imcomplete |
1545 |
> |
break; // caller can retry after release |
1546 |
> |
} |
1547 |
|
} |
1548 |
< |
else if (j > m) { |
1459 |
< |
if (rs == runState) // staleness check |
1460 |
< |
swept = true; |
1548 |
> |
if (--j < 0) |
1549 |
|
break; |
1550 |
+ |
} |
1551 |
+ |
|
1552 |
+ |
long c = ctl; int e = (int)c, a = (int)(c >> AC_SHIFT), nr, ns; |
1553 |
+ |
if (e < 0) // decode ctl on empty scan |
1554 |
+ |
w.runState = -1; // pool is terminating |
1555 |
+ |
else if (rs == 0 || rs != runState) { // incomplete scan |
1556 |
+ |
WorkQueue v; Thread p; // try to release a waiter |
1557 |
+ |
if (e > 0 && a < 0 && w.eventCount == ec && |
1558 |
+ |
(v = ws[e & m]) != null && v.eventCount == (e | INT_SIGN)) { |
1559 |
+ |
long nc = ((long)(v.nextWait & E_MASK) | |
1560 |
+ |
((c + AC_UNIT) & (AC_MASK|TC_MASK))); |
1561 |
+ |
if (ctl == c && U.compareAndSwapLong(this, CTL, c, nc)) { |
1562 |
+ |
v.eventCount = (e + E_SEQ) & E_MASK; |
1563 |
+ |
if ((p = v.parker) != null) |
1564 |
+ |
U.unpark(p); |
1565 |
+ |
} |
1566 |
|
} |
1567 |
|
} |
1568 |
< |
w.seed = r; // save seed for next scan |
1569 |
< |
if (task != null) |
1570 |
< |
return task; |
1571 |
< |
} |
1572 |
< |
|
1573 |
< |
// Decode ctl on empty scan |
1574 |
< |
long c = ctl; int e = (int)c, a = (int)(c >> AC_SHIFT), nr, ns; |
1575 |
< |
if (!swept) { // try to release a waiter |
1576 |
< |
WorkQueue v; Thread p; |
1577 |
< |
if (e > 0 && a < 0 && ws != null && |
1578 |
< |
(v = ws[((~e << 1) | 1) & m]) != null && |
1579 |
< |
v.eventCount == (e | INT_SIGN) && U.compareAndSwapLong |
1580 |
< |
(this, CTL, c, ((long)(v.nextWait & E_MASK) | |
1581 |
< |
((c + AC_UNIT) & (AC_MASK|TC_MASK))))) { |
1582 |
< |
v.eventCount = (e + E_SEQ) & E_MASK; |
1583 |
< |
if ((p = v.parker) != null) |
1584 |
< |
U.unpark(p); |
1585 |
< |
} |
1586 |
< |
} |
1587 |
< |
else if ((nr = w.rescans) > 0) { // continue rescanning |
1588 |
< |
int ac = a + parallelism; |
1589 |
< |
if ((w.rescans = (ac < nr) ? ac : nr - 1) > 0 && w.seed < 0 && |
1590 |
< |
w.eventCount == ec) |
1591 |
< |
Thread.yield(); // 1 bit randomness for yield call |
1592 |
< |
} |
1593 |
< |
else if (e < 0) // pool is terminating |
1594 |
< |
w.runState = -1; |
1595 |
< |
else if (ec >= 0) { // try to enqueue |
1596 |
< |
long nc = (long)ec | ((c - AC_UNIT) & (AC_MASK|TC_MASK)); |
1597 |
< |
w.nextWait = e; |
1598 |
< |
w.eventCount = ec | INT_SIGN; // mark as inactive |
1599 |
< |
if (!U.compareAndSwapLong(this, CTL, c, nc)) |
1496 |
< |
w.eventCount = ec; // back out on CAS failure |
1497 |
< |
else if ((ns = w.nsteals) != 0) { // set rescans if ran task |
1498 |
< |
if (a <= 0) // ... unless too many active |
1499 |
< |
w.rescans = a + parallelism; |
1500 |
< |
w.nsteals = 0; |
1501 |
< |
w.totalSteals += ns; |
1502 |
< |
} |
1503 |
< |
} |
1504 |
< |
else{ // already queued |
1505 |
< |
if (parallelism == -a) |
1506 |
< |
idleAwaitWork(w); // quiescent |
1507 |
< |
if (w.eventCount == ec) { |
1508 |
< |
Thread.interrupted(); // clear status |
1509 |
< |
ForkJoinWorkerThread wt = w.owner; |
1510 |
< |
U.putObject(wt, PARKBLOCKER, this); |
1511 |
< |
w.parker = wt; // emulate LockSupport.park |
1512 |
< |
if (w.eventCount == ec) // recheck |
1513 |
< |
U.park(false, 0L); // block |
1514 |
< |
w.parker = null; |
1515 |
< |
U.putObject(wt, PARKBLOCKER, null); |
1568 |
> |
else if (ec >= 0) { // try to enqueue/inactivate |
1569 |
> |
long nc = (long)ec | ((c - AC_UNIT) & (AC_MASK|TC_MASK)); |
1570 |
> |
w.nextWait = e; |
1571 |
> |
w.eventCount = ec | INT_SIGN; // mark as inactive |
1572 |
> |
if (ctl != c || !U.compareAndSwapLong(this, CTL, c, nc)) |
1573 |
> |
w.eventCount = ec; // unmark on CAS failure |
1574 |
> |
else { |
1575 |
> |
if ((ns = w.nsteals) != 0) { |
1576 |
> |
w.nsteals = 0; // set rescans if ran task |
1577 |
> |
w.rescans = (a > 0) ? 0 : a + parallelism; |
1578 |
> |
w.totalSteals += ns; |
1579 |
> |
} |
1580 |
> |
if (a == 1 - parallelism) // quiescent |
1581 |
> |
idleAwaitWork(w, nc, c); |
1582 |
> |
} |
1583 |
> |
} |
1584 |
> |
else if (w.eventCount < 0) { // already queued |
1585 |
> |
if ((nr = w.rescans) > 0) { // continue rescanning |
1586 |
> |
int ac = a + parallelism; |
1587 |
> |
if (((w.rescans = (ac < nr) ? ac : nr - 1) & 3) == 0) |
1588 |
> |
Thread.yield(); // yield before block |
1589 |
> |
} |
1590 |
> |
else { |
1591 |
> |
Thread.interrupted(); // clear status |
1592 |
> |
Thread wt = Thread.currentThread(); |
1593 |
> |
U.putObject(wt, PARKBLOCKER, this); |
1594 |
> |
w.parker = wt; // emulate LockSupport.park |
1595 |
> |
if (w.eventCount < 0) // recheck |
1596 |
> |
U.park(false, 0L); |
1597 |
> |
w.parker = null; |
1598 |
> |
U.putObject(wt, PARKBLOCKER, null); |
1599 |
> |
} |
1600 |
|
} |
1601 |
|
} |
1602 |
|
return null; |
1603 |
|
} |
1604 |
|
|
1605 |
|
/** |
1606 |
< |
* If inactivating worker w has caused pool to become quiescent, |
1607 |
< |
* check for pool termination, and, so long as this is not the |
1608 |
< |
* only worker, wait for event for up to SHRINK_RATE nanosecs On |
1609 |
< |
* timeout, if ctl has not changed, terminate the worker, which |
1610 |
< |
* will in turn wake up another worker to possibly repeat this |
1611 |
< |
* process. |
1606 |
> |
* If inactivating worker w has caused the pool to become |
1607 |
> |
* quiescent, checks for pool termination, and, so long as this is |
1608 |
> |
* not the only worker, waits for event for up to SHRINK_RATE |
1609 |
> |
* nanosecs. On timeout, if ctl has not changed, terminates the |
1610 |
> |
* worker, which will in turn wake up another worker to possibly |
1611 |
> |
* repeat this process. |
1612 |
|
* |
1613 |
|
* @param w the calling worker |
1614 |
+ |
* @param currentCtl the ctl value triggering possible quiescence |
1615 |
+ |
* @param prevCtl the ctl value to restore if thread is terminated |
1616 |
|
*/ |
1617 |
< |
private void idleAwaitWork(WorkQueue w) { |
1618 |
< |
long c; int nw, ec; |
1619 |
< |
if (!tryTerminate(false) && |
1620 |
< |
(int)((c = ctl) >> AC_SHIFT) + parallelism == 0 && |
1621 |
< |
(ec = w.eventCount) == ((int)c | INT_SIGN) && |
1622 |
< |
(nw = w.nextWait) != 0) { |
1537 |
< |
long nc = ((long)(nw & E_MASK) | // ctl to restore on timeout |
1538 |
< |
((c + AC_UNIT) & AC_MASK) | (c & TC_MASK)); |
1539 |
< |
ForkJoinTask.helpExpungeStaleExceptions(); // help clean |
1540 |
< |
ForkJoinWorkerThread wt = w.owner; |
1541 |
< |
while (ctl == c) { |
1617 |
> |
private void idleAwaitWork(WorkQueue w, long currentCtl, long prevCtl) { |
1618 |
> |
if (w.eventCount < 0 && !tryTerminate(false, false) && |
1619 |
> |
(int)prevCtl != 0 && !hasQueuedSubmissions() && ctl == currentCtl) { |
1620 |
> |
Thread wt = Thread.currentThread(); |
1621 |
> |
Thread.yield(); // yield before block |
1622 |
> |
while (ctl == currentCtl) { |
1623 |
|
long startTime = System.nanoTime(); |
1624 |
|
Thread.interrupted(); // timed variant of version in scan() |
1625 |
|
U.putObject(wt, PARKBLOCKER, this); |
1626 |
|
w.parker = wt; |
1627 |
< |
if (ctl == c) |
1627 |
> |
if (ctl == currentCtl) |
1628 |
|
U.park(false, SHRINK_RATE); |
1629 |
|
w.parker = null; |
1630 |
|
U.putObject(wt, PARKBLOCKER, null); |
1631 |
< |
if (ctl != c) |
1631 |
> |
if (ctl != currentCtl) |
1632 |
|
break; |
1633 |
|
if (System.nanoTime() - startTime >= SHRINK_TIMEOUT && |
1634 |
< |
U.compareAndSwapLong(this, CTL, c, nc)) { |
1635 |
< |
w.runState = -1; // shrink |
1636 |
< |
w.eventCount = (ec + E_SEQ) | E_MASK; |
1634 |
> |
U.compareAndSwapLong(this, CTL, currentCtl, prevCtl)) { |
1635 |
> |
w.eventCount = (w.eventCount + E_SEQ) | E_MASK; |
1636 |
> |
w.runState = -1; // shrink |
1637 |
|
break; |
1638 |
|
} |
1639 |
|
} |
1651 |
|
* leaves hints in workers to speed up subsequent calls. The |
1652 |
|
* implementation is very branchy to cope with potential |
1653 |
|
* inconsistencies or loops encountering chains that are stale, |
1654 |
< |
* unknown, or of length greater than MAX_HELP_DEPTH links. All |
1574 |
< |
* of these cases are dealt with by just retrying by caller. |
1654 |
> |
* unknown, or so long that they are likely cyclic. |
1655 |
|
* |
1656 |
|
* @param joiner the joining worker |
1657 |
|
* @param task the task to join |
1658 |
< |
* @return true if found or ran a task (and so is immediately retryable) |
1658 |
> |
* @return 0 if no progress can be made, negative if task |
1659 |
> |
* known complete, else positive |
1660 |
|
*/ |
1661 |
< |
final boolean tryHelpStealer(WorkQueue joiner, ForkJoinTask<?> task) { |
1662 |
< |
ForkJoinTask<?> subtask; // current target |
1663 |
< |
boolean progress = false; |
1664 |
< |
int depth = 0; // current chain depth |
1665 |
< |
int m = runState & SMASK; |
1666 |
< |
WorkQueue[] ws = workQueues; |
1667 |
< |
|
1668 |
< |
if (ws != null && ws.length > m && (subtask = task).status >= 0) { |
1669 |
< |
outer:for (WorkQueue j = joiner;;) { |
1670 |
< |
// Try to find the stealer of subtask, by first using hint |
1590 |
< |
WorkQueue stealer = null; |
1591 |
< |
WorkQueue v = ws[j.stealHint & m]; |
1592 |
< |
if (v != null && v.currentSteal == subtask) |
1593 |
< |
stealer = v; |
1594 |
< |
else { |
1595 |
< |
for (int i = 1; i <= m; i += 2) { |
1596 |
< |
if ((v = ws[i]) != null && v.currentSteal == subtask) { |
1597 |
< |
stealer = v; |
1598 |
< |
j.stealHint = i; // save hint |
1599 |
< |
break; |
1600 |
< |
} |
1661 |
> |
private int tryHelpStealer(WorkQueue joiner, ForkJoinTask<?> task) { |
1662 |
> |
int stat = 0, steps = 0; // bound to avoid cycles |
1663 |
> |
if (joiner != null && task != null) { // hoist null checks |
1664 |
> |
restart: for (;;) { |
1665 |
> |
ForkJoinTask<?> subtask = task; // current target |
1666 |
> |
for (WorkQueue j = joiner, v;;) { // v is stealer of subtask |
1667 |
> |
WorkQueue[] ws; int m, s, h; |
1668 |
> |
if ((s = task.status) < 0) { |
1669 |
> |
stat = s; |
1670 |
> |
break restart; |
1671 |
|
} |
1672 |
< |
if (stealer == null) |
1673 |
< |
break; |
1674 |
< |
} |
1675 |
< |
|
1676 |
< |
for (WorkQueue q = stealer;;) { // Try to help stealer |
1677 |
< |
ForkJoinTask<?> t; int b; |
1678 |
< |
if (task.status < 0) |
1679 |
< |
break outer; |
1680 |
< |
if ((b = q.base) - q.top < 0) { |
1681 |
< |
progress = true; |
1682 |
< |
if (subtask.status < 0) |
1683 |
< |
break outer; // stale |
1684 |
< |
if ((t = q.pollAt(b)) != null) { |
1685 |
< |
stealer.stealHint = joiner.poolIndex; |
1686 |
< |
joiner.runSubtask(t); |
1672 |
> |
if ((ws = workQueues) == null || (m = ws.length - 1) <= 0) |
1673 |
> |
break restart; // shutting down |
1674 |
> |
if ((v = ws[h = (j.stealHint | 1) & m]) == null || |
1675 |
> |
v.currentSteal != subtask) { |
1676 |
> |
for (int origin = h;;) { // find stealer |
1677 |
> |
if (((h = (h + 2) & m) & 15) == 1 && |
1678 |
> |
(subtask.status < 0 || j.currentJoin != subtask)) |
1679 |
> |
continue restart; // occasional staleness check |
1680 |
> |
if ((v = ws[h]) != null && |
1681 |
> |
v.currentSteal == subtask) { |
1682 |
> |
j.stealHint = h; // save hint |
1683 |
> |
break; |
1684 |
> |
} |
1685 |
> |
if (h == origin) |
1686 |
> |
break restart; // cannot find stealer |
1687 |
|
} |
1688 |
|
} |
1689 |
< |
else { // empty - try to descend to find stealer's stealer |
1690 |
< |
ForkJoinTask<?> next = stealer.currentJoin; |
1691 |
< |
if (++depth == MAX_HELP_DEPTH || subtask.status < 0 || |
1692 |
< |
next == null || next == subtask) |
1693 |
< |
break outer; // max depth, stale, dead-end, cyclic |
1694 |
< |
subtask = next; |
1695 |
< |
j = stealer; |
1696 |
< |
break; |
1689 |
> |
for (;;) { // help stealer or descend to its stealer |
1690 |
> |
ForkJoinTask[] a; int b; |
1691 |
> |
if (subtask.status < 0) // surround probes with |
1692 |
> |
continue restart; // consistency checks |
1693 |
> |
if ((b = v.base) - v.top < 0 && (a = v.array) != null) { |
1694 |
> |
int i = (((a.length - 1) & b) << ASHIFT) + ABASE; |
1695 |
> |
ForkJoinTask<?> t = |
1696 |
> |
(ForkJoinTask<?>)U.getObjectVolatile(a, i); |
1697 |
> |
if (subtask.status < 0 || j.currentJoin != subtask || |
1698 |
> |
v.currentSteal != subtask) |
1699 |
> |
continue restart; // stale |
1700 |
> |
stat = 1; // apparent progress |
1701 |
> |
if (t != null && v.base == b && |
1702 |
> |
U.compareAndSwapObject(a, i, t, null)) { |
1703 |
> |
v.base = b + 1; // help stealer |
1704 |
> |
joiner.runSubtask(t); |
1705 |
> |
} |
1706 |
> |
else if (v.base == b && ++steps == MAX_HELP) |
1707 |
> |
break restart; // v apparently stalled |
1708 |
> |
} |
1709 |
> |
else { // empty -- try to descend |
1710 |
> |
ForkJoinTask<?> next = v.currentJoin; |
1711 |
> |
if (subtask.status < 0 || j.currentJoin != subtask || |
1712 |
> |
v.currentSteal != subtask) |
1713 |
> |
continue restart; // stale |
1714 |
> |
else if (next == null || ++steps == MAX_HELP) |
1715 |
> |
break restart; // dead-end or maybe cyclic |
1716 |
> |
else { |
1717 |
> |
subtask = next; |
1718 |
> |
j = v; |
1719 |
> |
break; |
1720 |
> |
} |
1721 |
> |
} |
1722 |
|
} |
1723 |
|
} |
1724 |
|
} |
1725 |
|
} |
1726 |
< |
return progress; |
1726 |
> |
return stat; |
1727 |
|
} |
1728 |
|
|
1729 |
|
/** |
1732 |
|
* @param joiner the joining worker |
1733 |
|
* @param task the task |
1734 |
|
*/ |
1735 |
< |
final void tryPollForAndExec(WorkQueue joiner, ForkJoinTask<?> task) { |
1735 |
> |
private void tryPollForAndExec(WorkQueue joiner, ForkJoinTask<?> task) { |
1736 |
|
WorkQueue[] ws; |
1737 |
< |
int m = runState & SMASK; |
1738 |
< |
if ((ws = workQueues) != null && ws.length > m) { |
1644 |
< |
for (int j = 1; j <= m && task.status >= 0; j += 2) { |
1737 |
> |
if ((ws = workQueues) != null) { |
1738 |
> |
for (int j = 1; j < ws.length && task.status >= 0; j += 2) { |
1739 |
|
WorkQueue q = ws[j]; |
1740 |
|
if (q != null && q.pollFor(task)) { |
1741 |
|
joiner.runSubtask(task); |
1746 |
|
} |
1747 |
|
|
1748 |
|
/** |
1749 |
< |
* Returns a non-empty steal queue, if one is found during a random, |
1750 |
< |
* then cyclic scan, else null. This method must be retried by |
1751 |
< |
* caller if, by the time it tries to use the queue, it is empty. |
1749 |
> |
* Tries to decrement active count (sometimes implicitly) and |
1750 |
> |
* possibly release or create a compensating worker in preparation |
1751 |
> |
* for blocking. Fails on contention or termination. Otherwise, |
1752 |
> |
* adds a new thread if no idle workers are available and either |
1753 |
> |
* pool would become completely starved or: (at least half |
1754 |
> |
* starved, and fewer than 50% spares exist, and there is at least |
1755 |
> |
* one task apparently available). Even though the availability |
1756 |
> |
* check requires a full scan, it is worthwhile in reducing false |
1757 |
> |
* alarms. |
1758 |
> |
* |
1759 |
> |
* @param task if non-null, a task being waited for |
1760 |
> |
* @param blocker if non-null, a blocker being waited for |
1761 |
> |
* @return true if the caller can block, else should recheck and retry |
1762 |
> |
*/ |
1763 |
> |
final boolean tryCompensate(ForkJoinTask<?> task, ManagedBlocker blocker) { |
1764 |
> |
int pc = parallelism, e; |
1765 |
> |
long c = ctl; |
1766 |
> |
WorkQueue[] ws = workQueues; |
1767 |
> |
if ((e = (int)c) >= 0 && ws != null) { |
1768 |
> |
int u, a, ac, hc; |
1769 |
> |
int tc = (short)((u = (int)(c >>> 32)) >>> UTC_SHIFT) + pc; |
1770 |
> |
boolean replace = false; |
1771 |
> |
if ((a = u >> UAC_SHIFT) <= 0) { |
1772 |
> |
if ((ac = a + pc) <= 1) |
1773 |
> |
replace = true; |
1774 |
> |
else if ((e > 0 || (task != null && |
1775 |
> |
ac <= (hc = pc >>> 1) && tc < pc + hc))) { |
1776 |
> |
WorkQueue w; |
1777 |
> |
for (int j = 0; j < ws.length; ++j) { |
1778 |
> |
if ((w = ws[j]) != null && !w.isEmpty()) { |
1779 |
> |
replace = true; |
1780 |
> |
break; // in compensation range and tasks available |
1781 |
> |
} |
1782 |
> |
} |
1783 |
> |
} |
1784 |
> |
} |
1785 |
> |
if ((task == null || task.status >= 0) && // recheck need to block |
1786 |
> |
(blocker == null || !blocker.isReleasable()) && ctl == c) { |
1787 |
> |
if (!replace) { // no compensation |
1788 |
> |
long nc = ((c - AC_UNIT) & AC_MASK) | (c & ~AC_MASK); |
1789 |
> |
if (U.compareAndSwapLong(this, CTL, c, nc)) |
1790 |
> |
return true; |
1791 |
> |
} |
1792 |
> |
else if (e != 0) { // release an idle worker |
1793 |
> |
WorkQueue w; Thread p; int i; |
1794 |
> |
if ((i = e & SMASK) < ws.length && (w = ws[i]) != null) { |
1795 |
> |
long nc = ((long)(w.nextWait & E_MASK) | |
1796 |
> |
(c & (AC_MASK|TC_MASK))); |
1797 |
> |
if (w.eventCount == (e | INT_SIGN) && |
1798 |
> |
U.compareAndSwapLong(this, CTL, c, nc)) { |
1799 |
> |
w.eventCount = (e + E_SEQ) & E_MASK; |
1800 |
> |
if ((p = w.parker) != null) |
1801 |
> |
U.unpark(p); |
1802 |
> |
return true; |
1803 |
> |
} |
1804 |
> |
} |
1805 |
> |
} |
1806 |
> |
else if (tc < MAX_CAP) { // create replacement |
1807 |
> |
long nc = ((c + TC_UNIT) & TC_MASK) | (c & ~TC_MASK); |
1808 |
> |
if (U.compareAndSwapLong(this, CTL, c, nc)) { |
1809 |
> |
addWorker(); |
1810 |
> |
return true; |
1811 |
> |
} |
1812 |
> |
} |
1813 |
> |
} |
1814 |
> |
} |
1815 |
> |
return false; |
1816 |
> |
} |
1817 |
> |
|
1818 |
> |
/** |
1819 |
> |
* Helps and/or blocks until the given task is done. |
1820 |
> |
* |
1821 |
> |
* @param joiner the joining worker |
1822 |
> |
* @param task the task |
1823 |
> |
* @return task status on exit |
1824 |
> |
*/ |
1825 |
> |
final int awaitJoin(WorkQueue joiner, ForkJoinTask<?> task) { |
1826 |
> |
int s; |
1827 |
> |
if ((s = task.status) >= 0) { |
1828 |
> |
ForkJoinTask<?> prevJoin = joiner.currentJoin; |
1829 |
> |
joiner.currentJoin = task; |
1830 |
> |
long startTime = 0L; |
1831 |
> |
for (int k = 0;;) { |
1832 |
> |
if ((s = (joiner.isEmpty() ? // try to help |
1833 |
> |
tryHelpStealer(joiner, task) : |
1834 |
> |
joiner.tryRemoveAndExec(task))) == 0 && |
1835 |
> |
(s = task.status) >= 0) { |
1836 |
> |
if (k == 0) { |
1837 |
> |
startTime = System.nanoTime(); |
1838 |
> |
tryPollForAndExec(joiner, task); // check uncommon case |
1839 |
> |
} |
1840 |
> |
else if ((k & (MAX_HELP - 1)) == 0 && |
1841 |
> |
System.nanoTime() - startTime >= |
1842 |
> |
COMPENSATION_DELAY && |
1843 |
> |
tryCompensate(task, null)) { |
1844 |
> |
if (task.trySetSignal()) { |
1845 |
> |
synchronized (task) { |
1846 |
> |
if (task.status >= 0) { |
1847 |
> |
try { // see ForkJoinTask |
1848 |
> |
task.wait(); // for explanation |
1849 |
> |
} catch (InterruptedException ie) { |
1850 |
> |
} |
1851 |
> |
} |
1852 |
> |
else |
1853 |
> |
task.notifyAll(); |
1854 |
> |
} |
1855 |
> |
} |
1856 |
> |
long c; // re-activate |
1857 |
> |
do {} while (!U.compareAndSwapLong |
1858 |
> |
(this, CTL, c = ctl, c + AC_UNIT)); |
1859 |
> |
} |
1860 |
> |
} |
1861 |
> |
if (s < 0 || (s = task.status) < 0) { |
1862 |
> |
joiner.currentJoin = prevJoin; |
1863 |
> |
break; |
1864 |
> |
} |
1865 |
> |
else if ((k++ & (MAX_HELP - 1)) == MAX_HELP >>> 1) |
1866 |
> |
Thread.yield(); // for politeness |
1867 |
> |
} |
1868 |
> |
} |
1869 |
> |
return s; |
1870 |
> |
} |
1871 |
> |
|
1872 |
> |
/** |
1873 |
> |
* Stripped-down variant of awaitJoin used by timed joins. Tries |
1874 |
> |
* to help join only while there is continuous progress. (Caller |
1875 |
> |
* will then enter a timed wait.) |
1876 |
> |
* |
1877 |
> |
* @param joiner the joining worker |
1878 |
> |
* @param task the task |
1879 |
> |
* @return task status on exit |
1880 |
> |
*/ |
1881 |
> |
final int helpJoinOnce(WorkQueue joiner, ForkJoinTask<?> task) { |
1882 |
> |
int s; |
1883 |
> |
while ((s = task.status) >= 0 && |
1884 |
> |
(joiner.isEmpty() ? |
1885 |
> |
tryHelpStealer(joiner, task) : |
1886 |
> |
joiner.tryRemoveAndExec(task)) != 0) |
1887 |
> |
; |
1888 |
> |
return s; |
1889 |
> |
} |
1890 |
> |
|
1891 |
> |
/** |
1892 |
> |
* Returns a (probably) non-empty steal queue, if one is found |
1893 |
> |
* during a random, then cyclic scan, else null. This method must |
1894 |
> |
* be retried by caller if, by the time it tries to use the queue, |
1895 |
> |
* it is empty. |
1896 |
|
*/ |
1897 |
|
private WorkQueue findNonEmptyStealQueue(WorkQueue w) { |
1898 |
< |
int r = w.seed; // Same idea as scan(), but ignoring submissions |
1898 |
> |
// Similar to loop in scan(), but ignoring submissions |
1899 |
> |
int r = w.seed; r ^= r << 13; r ^= r >>> 17; w.seed = r ^= r << 5; |
1900 |
> |
int step = (r >>> 16) | 1; |
1901 |
|
for (WorkQueue[] ws;;) { |
1902 |
< |
int m = runState & SMASK; |
1903 |
< |
if ((ws = workQueues) == null) |
1902 |
> |
int rs = runState, m; |
1903 |
> |
if ((ws = workQueues) == null || (m = ws.length - 1) < 1) |
1904 |
|
return null; |
1905 |
< |
if (ws.length > m) { |
1906 |
< |
WorkQueue q; |
1907 |
< |
for (int n = m << 2, k = r, j = -n;;) { |
1908 |
< |
r ^= r << 13; r ^= r >>> 17; r ^= r << 5; |
1909 |
< |
if ((q = ws[(k | 1) & m]) != null && q.base - q.top < 0) { |
1910 |
< |
w.seed = r; |
1671 |
< |
return q; |
1672 |
< |
} |
1673 |
< |
else if (j > n) |
1905 |
> |
for (int j = (m + 1) << 2; ; r += step) { |
1906 |
> |
WorkQueue q = ws[((r << 1) | 1) & m]; |
1907 |
> |
if (q != null && !q.isEmpty()) |
1908 |
> |
return q; |
1909 |
> |
else if (--j < 0) { |
1910 |
> |
if (runState == rs) |
1911 |
|
return null; |
1912 |
< |
else |
1676 |
< |
k = (j++ < 0) ? r : k + ((m >>> 1) | 1); |
1677 |
< |
|
1912 |
> |
break; |
1913 |
|
} |
1914 |
|
} |
1915 |
|
} |
1916 |
|
} |
1917 |
|
|
1918 |
+ |
|
1919 |
|
/** |
1920 |
|
* Runs tasks until {@code isQuiescent()}. We piggyback on |
1921 |
|
* active count ctl maintenance, but rather than blocking |
1924 |
|
*/ |
1925 |
|
final void helpQuiescePool(WorkQueue w) { |
1926 |
|
for (boolean active = true;;) { |
1927 |
< |
w.runLocalTasks(); // exhaust local queue |
1927 |
> |
ForkJoinTask<?> localTask; // exhaust local queue |
1928 |
> |
while ((localTask = w.nextLocalTask()) != null) |
1929 |
> |
localTask.doExec(); |
1930 |
|
WorkQueue q = findNonEmptyStealQueue(w); |
1931 |
|
if (q != null) { |
1932 |
< |
ForkJoinTask<?> t; |
1932 |
> |
ForkJoinTask<?> t; int b; |
1933 |
|
if (!active) { // re-establish active count |
1934 |
|
long c; |
1935 |
|
active = true; |
1936 |
|
do {} while (!U.compareAndSwapLong |
1937 |
|
(this, CTL, c = ctl, c + AC_UNIT)); |
1938 |
|
} |
1939 |
< |
if ((t = q.poll()) != null) |
1939 |
> |
if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null) |
1940 |
|
w.runSubtask(t); |
1941 |
|
} |
1942 |
|
else { |
1958 |
|
} |
1959 |
|
|
1960 |
|
/** |
1961 |
< |
* Gets and removes a local or stolen task for the given worker |
1961 |
> |
* Gets and removes a local or stolen task for the given worker. |
1962 |
|
* |
1963 |
|
* @return a task, if available |
1964 |
|
*/ |
1965 |
|
final ForkJoinTask<?> nextTaskFor(WorkQueue w) { |
1966 |
|
for (ForkJoinTask<?> t;;) { |
1967 |
< |
WorkQueue q; |
1967 |
> |
WorkQueue q; int b; |
1968 |
|
if ((t = w.nextLocalTask()) != null) |
1969 |
|
return t; |
1970 |
|
if ((q = findNonEmptyStealQueue(w)) == null) |
1971 |
|
return null; |
1972 |
< |
if ((t = q.poll()) != null) |
1972 |
> |
if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null) |
1973 |
|
return t; |
1974 |
|
} |
1975 |
|
} |
1990 |
|
8); |
1991 |
|
} |
1992 |
|
|
1993 |
< |
// Termination |
1993 |
> |
// Termination |
1994 |
|
|
1995 |
|
/** |
1996 |
< |
* Sets SHUTDOWN bit of runState under lock |
1997 |
< |
*/ |
1998 |
< |
private void enableShutdown() { |
1999 |
< |
ReentrantLock lock = this.lock; |
2000 |
< |
if (runState >= 0) { |
2001 |
< |
lock.lock(); // don't need try/finally |
2002 |
< |
runState |= SHUTDOWN; |
1765 |
< |
lock.unlock(); |
1766 |
< |
} |
1767 |
< |
} |
1768 |
< |
|
1769 |
< |
/** |
1770 |
< |
* Possibly initiates and/or completes termination. Upon |
1771 |
< |
* termination, cancels all queued tasks and then |
1996 |
> |
* Possibly initiates and/or completes termination. The caller |
1997 |
> |
* triggering termination runs three passes through workQueues: |
1998 |
> |
* (0) Setting termination status, followed by wakeups of queued |
1999 |
> |
* workers; (1) cancelling all tasks; (2) interrupting lagging |
2000 |
> |
* threads (likely in external tasks, but possibly also blocked in |
2001 |
> |
* joins). Each pass repeats previous steps because of potential |
2002 |
> |
* lagging thread creation. |
2003 |
|
* |
2004 |
|
* @param now if true, unconditionally terminate, else only |
2005 |
|
* if no work and no active workers |
2006 |
+ |
* @param enable if true, enable shutdown when next possible |
2007 |
|
* @return true if now terminating or terminated |
2008 |
|
*/ |
2009 |
< |
private boolean tryTerminate(boolean now) { |
2009 |
> |
private boolean tryTerminate(boolean now, boolean enable) { |
2010 |
> |
Mutex lock = this.lock; |
2011 |
|
for (long c;;) { |
2012 |
|
if (((c = ctl) & STOP_BIT) != 0) { // already terminating |
2013 |
|
if ((short)(c >>> TC_SHIFT) == -parallelism) { |
1781 |
– |
ReentrantLock lock = this.lock; // signal when no workers |
2014 |
|
lock.lock(); // don't need try/finally |
2015 |
|
termination.signalAll(); // signal when 0 workers |
2016 |
|
lock.unlock(); |
2017 |
|
} |
2018 |
|
return true; |
2019 |
|
} |
2020 |
< |
if (!now) { |
2021 |
< |
if ((int)(c >> AC_SHIFT) != -parallelism || runState >= 0 || |
2020 |
> |
if (runState >= 0) { // not yet enabled |
2021 |
> |
if (!enable) |
2022 |
> |
return false; |
2023 |
> |
lock.lock(); |
2024 |
> |
runState |= SHUTDOWN; |
2025 |
> |
lock.unlock(); |
2026 |
> |
} |
2027 |
> |
if (!now) { // check if idle & no tasks |
2028 |
> |
if ((int)(c >> AC_SHIFT) != -parallelism || |
2029 |
|
hasQueuedSubmissions()) |
2030 |
|
return false; |
2031 |
|
// Check for unqueued inactive workers. One pass suffices. |
2032 |
|
WorkQueue[] ws = workQueues; WorkQueue w; |
2033 |
|
if (ws != null) { |
2034 |
< |
int n = ws.length; |
1796 |
< |
for (int i = 1; i < n; i += 2) { |
2034 |
> |
for (int i = 1; i < ws.length; i += 2) { |
2035 |
|
if ((w = ws[i]) != null && w.eventCount >= 0) |
2036 |
|
return false; |
2037 |
|
} |
2038 |
|
} |
2039 |
|
} |
2040 |
< |
if (U.compareAndSwapLong(this, CTL, c, c | STOP_BIT)) |
2041 |
< |
startTerminating(); |
2042 |
< |
} |
2043 |
< |
} |
2044 |
< |
|
2045 |
< |
/** |
2046 |
< |
* Initiates termination: Runs three passes through workQueues: |
2047 |
< |
* (0) Setting termination status, followed by wakeups of queued |
2048 |
< |
* workers; (1) cancelling all tasks; (2) interrupting lagging |
2049 |
< |
* threads (likely in external tasks, but possibly also blocked in |
2050 |
< |
* joins). Each pass repeats previous steps because of potential |
2051 |
< |
* lagging thread creation. |
2052 |
< |
*/ |
1815 |
< |
private void startTerminating() { |
1816 |
< |
for (int pass = 0; pass < 3; ++pass) { |
1817 |
< |
WorkQueue[] ws = workQueues; |
1818 |
< |
if (ws != null) { |
1819 |
< |
WorkQueue w; Thread wt; |
1820 |
< |
int n = ws.length; |
1821 |
< |
for (int j = 0; j < n; ++j) { |
1822 |
< |
if ((w = ws[j]) != null) { |
1823 |
< |
w.runState = -1; |
1824 |
< |
if (pass > 0) { |
1825 |
< |
w.cancelAll(); |
1826 |
< |
if (pass > 1 && (wt = w.owner) != null && |
1827 |
< |
!wt.isInterrupted()) { |
1828 |
< |
try { |
1829 |
< |
wt.interrupt(); |
1830 |
< |
} catch (SecurityException ignore) { |
2040 |
> |
if (U.compareAndSwapLong(this, CTL, c, c | STOP_BIT)) { |
2041 |
> |
for (int pass = 0; pass < 3; ++pass) { |
2042 |
> |
WorkQueue[] ws = workQueues; |
2043 |
> |
if (ws != null) { |
2044 |
> |
WorkQueue w; |
2045 |
> |
int n = ws.length; |
2046 |
> |
for (int i = 0; i < n; ++i) { |
2047 |
> |
if ((w = ws[i]) != null) { |
2048 |
> |
w.runState = -1; |
2049 |
> |
if (pass > 0) { |
2050 |
> |
w.cancelAll(); |
2051 |
> |
if (pass > 1) |
2052 |
> |
w.interruptOwner(); |
2053 |
|
} |
2054 |
|
} |
2055 |
|
} |
2056 |
< |
} |
2057 |
< |
} |
2058 |
< |
// Wake up workers parked on event queue |
2059 |
< |
int i, e; long c; Thread p; |
2060 |
< |
while ((i = ((~(e = (int)(c = ctl)) << 1) | 1) & SMASK) < n && |
2061 |
< |
(w = ws[i]) != null && |
2062 |
< |
w.eventCount == (e | INT_SIGN)) { |
2063 |
< |
long nc = ((long)(w.nextWait & E_MASK) | |
2064 |
< |
((c + AC_UNIT) & AC_MASK) | |
2065 |
< |
(c & (TC_MASK|STOP_BIT))); |
2066 |
< |
if (U.compareAndSwapLong(this, CTL, c, nc)) { |
2067 |
< |
w.eventCount = (e + E_SEQ) & E_MASK; |
2068 |
< |
if ((p = w.parker) != null) |
2069 |
< |
U.unpark(p); |
2056 |
> |
// Wake up workers parked on event queue |
2057 |
> |
int i, e; long cc; Thread p; |
2058 |
> |
while ((e = (int)(cc = ctl) & E_MASK) != 0 && |
2059 |
> |
(i = e & SMASK) < n && |
2060 |
> |
(w = ws[i]) != null) { |
2061 |
> |
long nc = ((long)(w.nextWait & E_MASK) | |
2062 |
> |
((cc + AC_UNIT) & AC_MASK) | |
2063 |
> |
(cc & (TC_MASK|STOP_BIT))); |
2064 |
> |
if (w.eventCount == (e | INT_SIGN) && |
2065 |
> |
U.compareAndSwapLong(this, CTL, cc, nc)) { |
2066 |
> |
w.eventCount = (e + E_SEQ) & E_MASK; |
2067 |
> |
w.runState = -1; |
2068 |
> |
if ((p = w.parker) != null) |
2069 |
> |
U.unpark(p); |
2070 |
> |
} |
2071 |
> |
} |
2072 |
|
} |
2073 |
|
} |
2074 |
|
} |
2144 |
|
checkPermission(); |
2145 |
|
if (factory == null) |
2146 |
|
throw new NullPointerException(); |
2147 |
< |
if (parallelism <= 0 || parallelism > MAX_ID) |
2147 |
> |
if (parallelism <= 0 || parallelism > MAX_CAP) |
2148 |
|
throw new IllegalArgumentException(); |
2149 |
|
this.parallelism = parallelism; |
2150 |
|
this.factory = factory; |
2151 |
|
this.ueh = handler; |
2152 |
|
this.localMode = asyncMode ? FIFO_QUEUE : LIFO_QUEUE; |
1929 |
– |
this.nextPoolIndex = 1; |
2153 |
|
long np = (long)(-parallelism); // offset ctl counts |
2154 |
|
this.ctl = ((np << AC_SHIFT) & AC_MASK) | ((np << TC_SHIFT) & TC_MASK); |
2155 |
< |
// initialize workQueues array with room for 2*parallelism if possible |
2156 |
< |
int n = parallelism << 1; |
2157 |
< |
if (n >= MAX_ID) |
2158 |
< |
n = MAX_ID; |
2159 |
< |
else { // See Hackers Delight, sec 3.2, where n < (1 << 16) |
2160 |
< |
n |= n >>> 1; n |= n >>> 2; n |= n >>> 4; n |= n >>> 8; |
2161 |
< |
} |
1939 |
< |
this.workQueues = new WorkQueue[(n + 1) << 1]; |
1940 |
< |
ReentrantLock lck = this.lock = new ReentrantLock(); |
1941 |
< |
this.termination = lck.newCondition(); |
2155 |
> |
// Use nearest power 2 for workQueues size. See Hackers Delight sec 3.2. |
2156 |
> |
int n = parallelism - 1; |
2157 |
> |
n |= n >>> 1; n |= n >>> 2; n |= n >>> 4; n |= n >>> 8; n |= n >>> 16; |
2158 |
> |
int size = (n + 1) << 1; // #slots = 2*#workers |
2159 |
> |
this.submitMask = size - 1; // room for max # of submit queues |
2160 |
> |
this.workQueues = new WorkQueue[size]; |
2161 |
> |
this.termination = (this.lock = new Mutex()).newCondition(); |
2162 |
|
this.stealCount = new AtomicLong(); |
2163 |
|
this.nextWorkerNumber = new AtomicInteger(); |
2164 |
+ |
int pn = poolNumberGenerator.incrementAndGet(); |
2165 |
|
StringBuilder sb = new StringBuilder("ForkJoinPool-"); |
2166 |
< |
sb.append(poolNumberGenerator.incrementAndGet()); |
2166 |
> |
sb.append(Integer.toString(pn)); |
2167 |
|
sb.append("-worker-"); |
2168 |
|
this.workerNamePrefix = sb.toString(); |
2169 |
< |
// Create initial submission queue |
2170 |
< |
WorkQueue sq = tryAddSharedQueue(0); |
2171 |
< |
if (sq != null) |
1951 |
< |
sq.growArray(false); |
2169 |
> |
lock.lock(); |
2170 |
> |
this.runState = 1; // set init flag |
2171 |
> |
lock.unlock(); |
2172 |
|
} |
2173 |
|
|
2174 |
|
// Execution methods |
2190 |
|
* scheduled for execution |
2191 |
|
*/ |
2192 |
|
public <T> T invoke(ForkJoinTask<T> task) { |
2193 |
+ |
if (task == null) |
2194 |
+ |
throw new NullPointerException(); |
2195 |
|
doSubmit(task); |
2196 |
|
return task.join(); |
2197 |
|
} |
2205 |
|
* scheduled for execution |
2206 |
|
*/ |
2207 |
|
public void execute(ForkJoinTask<?> task) { |
2208 |
+ |
if (task == null) |
2209 |
+ |
throw new NullPointerException(); |
2210 |
|
doSubmit(task); |
2211 |
|
} |
2212 |
|
|
2224 |
|
if (task instanceof ForkJoinTask<?>) // avoid re-wrap |
2225 |
|
job = (ForkJoinTask<?>) task; |
2226 |
|
else |
2227 |
< |
job = ForkJoinTask.adapt(task, null); |
2227 |
> |
job = new ForkJoinTask.AdaptedRunnableAction(task); |
2228 |
|
doSubmit(job); |
2229 |
|
} |
2230 |
|
|
2238 |
|
* scheduled for execution |
2239 |
|
*/ |
2240 |
|
public <T> ForkJoinTask<T> submit(ForkJoinTask<T> task) { |
2241 |
+ |
if (task == null) |
2242 |
+ |
throw new NullPointerException(); |
2243 |
|
doSubmit(task); |
2244 |
|
return task; |
2245 |
|
} |
2250 |
|
* scheduled for execution |
2251 |
|
*/ |
2252 |
|
public <T> ForkJoinTask<T> submit(Callable<T> task) { |
2253 |
< |
if (task == null) |
2028 |
< |
throw new NullPointerException(); |
2029 |
< |
ForkJoinTask<T> job = ForkJoinTask.adapt(task); |
2253 |
> |
ForkJoinTask<T> job = new ForkJoinTask.AdaptedCallable<T>(task); |
2254 |
|
doSubmit(job); |
2255 |
|
return job; |
2256 |
|
} |
2261 |
|
* scheduled for execution |
2262 |
|
*/ |
2263 |
|
public <T> ForkJoinTask<T> submit(Runnable task, T result) { |
2264 |
< |
if (task == null) |
2041 |
< |
throw new NullPointerException(); |
2042 |
< |
ForkJoinTask<T> job = ForkJoinTask.adapt(task, result); |
2264 |
> |
ForkJoinTask<T> job = new ForkJoinTask.AdaptedRunnable<T>(task, result); |
2265 |
|
doSubmit(job); |
2266 |
|
return job; |
2267 |
|
} |
2278 |
|
if (task instanceof ForkJoinTask<?>) // avoid re-wrap |
2279 |
|
job = (ForkJoinTask<?>) task; |
2280 |
|
else |
2281 |
< |
job = ForkJoinTask.adapt(task, null); |
2281 |
> |
job = new ForkJoinTask.AdaptedRunnableAction(task); |
2282 |
|
doSubmit(job); |
2283 |
|
return job; |
2284 |
|
} |
2288 |
|
* @throws RejectedExecutionException {@inheritDoc} |
2289 |
|
*/ |
2290 |
|
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) { |
2291 |
< |
ArrayList<ForkJoinTask<T>> forkJoinTasks = |
2292 |
< |
new ArrayList<ForkJoinTask<T>>(tasks.size()); |
2293 |
< |
for (Callable<T> task : tasks) |
2294 |
< |
forkJoinTasks.add(ForkJoinTask.adapt(task)); |
2295 |
< |
invoke(new InvokeAll<T>(forkJoinTasks)); |
2296 |
< |
|
2291 |
> |
// In previous versions of this class, this method constructed |
2292 |
> |
// a task to run ForkJoinTask.invokeAll, but now external |
2293 |
> |
// invocation of multiple tasks is at least as efficient. |
2294 |
> |
List<ForkJoinTask<T>> fs = new ArrayList<ForkJoinTask<T>>(tasks.size()); |
2295 |
> |
// Workaround needed because method wasn't declared with |
2296 |
> |
// wildcards in return type but should have been. |
2297 |
|
@SuppressWarnings({"unchecked", "rawtypes"}) |
2298 |
< |
List<Future<T>> futures = (List<Future<T>>) (List) forkJoinTasks; |
2077 |
< |
return futures; |
2078 |
< |
} |
2298 |
> |
List<Future<T>> futures = (List<Future<T>>) (List) fs; |
2299 |
|
|
2300 |
< |
static final class InvokeAll<T> extends RecursiveAction { |
2301 |
< |
final ArrayList<ForkJoinTask<T>> tasks; |
2302 |
< |
InvokeAll(ArrayList<ForkJoinTask<T>> tasks) { this.tasks = tasks; } |
2303 |
< |
public void compute() { |
2304 |
< |
try { invokeAll(tasks); } |
2305 |
< |
catch (Exception ignore) {} |
2300 |
> |
boolean done = false; |
2301 |
> |
try { |
2302 |
> |
for (Callable<T> t : tasks) { |
2303 |
> |
ForkJoinTask<T> f = new ForkJoinTask.AdaptedCallable<T>(t); |
2304 |
> |
doSubmit(f); |
2305 |
> |
fs.add(f); |
2306 |
> |
} |
2307 |
> |
for (ForkJoinTask<T> f : fs) |
2308 |
> |
f.quietlyJoin(); |
2309 |
> |
done = true; |
2310 |
> |
return futures; |
2311 |
> |
} finally { |
2312 |
> |
if (!done) |
2313 |
> |
for (ForkJoinTask<T> f : fs) |
2314 |
> |
f.cancel(false); |
2315 |
|
} |
2087 |
– |
private static final long serialVersionUID = -7914297376763021607L; |
2316 |
|
} |
2317 |
|
|
2318 |
|
/** |
2377 |
|
int rc = 0; |
2378 |
|
WorkQueue[] ws; WorkQueue w; |
2379 |
|
if ((ws = workQueues) != null) { |
2380 |
< |
int n = ws.length; |
2381 |
< |
for (int i = 1; i < n; i += 2) { |
2154 |
< |
Thread.State s; ForkJoinWorkerThread wt; |
2155 |
< |
if ((w = ws[i]) != null && (wt = w.owner) != null && |
2156 |
< |
w.eventCount >= 0 && |
2157 |
< |
(s = wt.getState()) != Thread.State.BLOCKED && |
2158 |
< |
s != Thread.State.WAITING && |
2159 |
< |
s != Thread.State.TIMED_WAITING) |
2380 |
> |
for (int i = 1; i < ws.length; i += 2) { |
2381 |
> |
if ((w = ws[i]) != null && w.isApparentlyUnblocked()) |
2382 |
|
++rc; |
2383 |
|
} |
2384 |
|
} |
2427 |
|
long count = stealCount.get(); |
2428 |
|
WorkQueue[] ws; WorkQueue w; |
2429 |
|
if ((ws = workQueues) != null) { |
2430 |
< |
int n = ws.length; |
2209 |
< |
for (int i = 1; i < n; i += 2) { |
2430 |
> |
for (int i = 1; i < ws.length; i += 2) { |
2431 |
|
if ((w = ws[i]) != null) |
2432 |
|
count += w.totalSteals; |
2433 |
|
} |
2449 |
|
long count = 0; |
2450 |
|
WorkQueue[] ws; WorkQueue w; |
2451 |
|
if ((ws = workQueues) != null) { |
2452 |
< |
int n = ws.length; |
2232 |
< |
for (int i = 1; i < n; i += 2) { |
2452 |
> |
for (int i = 1; i < ws.length; i += 2) { |
2453 |
|
if ((w = ws[i]) != null) |
2454 |
|
count += w.queueSize(); |
2455 |
|
} |
2468 |
|
int count = 0; |
2469 |
|
WorkQueue[] ws; WorkQueue w; |
2470 |
|
if ((ws = workQueues) != null) { |
2471 |
< |
int n = ws.length; |
2252 |
< |
for (int i = 0; i < n; i += 2) { |
2471 |
> |
for (int i = 0; i < ws.length; i += 2) { |
2472 |
|
if ((w = ws[i]) != null) |
2473 |
|
count += w.queueSize(); |
2474 |
|
} |
2485 |
|
public boolean hasQueuedSubmissions() { |
2486 |
|
WorkQueue[] ws; WorkQueue w; |
2487 |
|
if ((ws = workQueues) != null) { |
2488 |
< |
int n = ws.length; |
2489 |
< |
for (int i = 0; i < n; i += 2) { |
2271 |
< |
if ((w = ws[i]) != null && w.queueSize() != 0) |
2488 |
> |
for (int i = 0; i < ws.length; i += 2) { |
2489 |
> |
if ((w = ws[i]) != null && !w.isEmpty()) |
2490 |
|
return true; |
2491 |
|
} |
2492 |
|
} |
2503 |
|
protected ForkJoinTask<?> pollSubmission() { |
2504 |
|
WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t; |
2505 |
|
if ((ws = workQueues) != null) { |
2506 |
< |
int n = ws.length; |
2289 |
< |
for (int i = 0; i < n; i += 2) { |
2506 |
> |
for (int i = 0; i < ws.length; i += 2) { |
2507 |
|
if ((w = ws[i]) != null && (t = w.poll()) != null) |
2508 |
|
return t; |
2509 |
|
} |
2532 |
|
int count = 0; |
2533 |
|
WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t; |
2534 |
|
if ((ws = workQueues) != null) { |
2535 |
< |
int n = ws.length; |
2319 |
< |
for (int i = 0; i < n; ++i) { |
2535 |
> |
for (int i = 0; i < ws.length; ++i) { |
2536 |
|
if ((w = ws[i]) != null) { |
2537 |
|
while ((t = w.poll()) != null) { |
2538 |
|
c.add(t); |
2552 |
|
* @return a string identifying this pool, as well as its state |
2553 |
|
*/ |
2554 |
|
public String toString() { |
2555 |
< |
long st = getStealCount(); |
2556 |
< |
long qt = getQueuedTaskCount(); |
2557 |
< |
long qs = getQueuedSubmissionCount(); |
2342 |
< |
int rc = getRunningThreadCount(); |
2343 |
< |
int pc = parallelism; |
2555 |
> |
// Use a single pass through workQueues to collect counts |
2556 |
> |
long qt = 0L, qs = 0L; int rc = 0; |
2557 |
> |
long st = stealCount.get(); |
2558 |
|
long c = ctl; |
2559 |
+ |
WorkQueue[] ws; WorkQueue w; |
2560 |
+ |
if ((ws = workQueues) != null) { |
2561 |
+ |
for (int i = 0; i < ws.length; ++i) { |
2562 |
+ |
if ((w = ws[i]) != null) { |
2563 |
+ |
int size = w.queueSize(); |
2564 |
+ |
if ((i & 1) == 0) |
2565 |
+ |
qs += size; |
2566 |
+ |
else { |
2567 |
+ |
qt += size; |
2568 |
+ |
st += w.totalSteals; |
2569 |
+ |
if (w.isApparentlyUnblocked()) |
2570 |
+ |
++rc; |
2571 |
+ |
} |
2572 |
+ |
} |
2573 |
+ |
} |
2574 |
+ |
} |
2575 |
+ |
int pc = parallelism; |
2576 |
|
int tc = pc + (short)(c >>> TC_SHIFT); |
2577 |
|
int ac = pc + (int)(c >> AC_SHIFT); |
2578 |
|
if (ac < 0) // ignore transient negative |
2608 |
|
*/ |
2609 |
|
public void shutdown() { |
2610 |
|
checkPermission(); |
2611 |
< |
enableShutdown(); |
2381 |
< |
tryTerminate(false); |
2611 |
> |
tryTerminate(false, true); |
2612 |
|
} |
2613 |
|
|
2614 |
|
/** |
2629 |
|
*/ |
2630 |
|
public List<Runnable> shutdownNow() { |
2631 |
|
checkPermission(); |
2632 |
< |
enableShutdown(); |
2403 |
< |
tryTerminate(true); |
2632 |
> |
tryTerminate(true, true); |
2633 |
|
return Collections.emptyList(); |
2634 |
|
} |
2635 |
|
|
2686 |
|
public boolean awaitTermination(long timeout, TimeUnit unit) |
2687 |
|
throws InterruptedException { |
2688 |
|
long nanos = unit.toNanos(timeout); |
2689 |
< |
final ReentrantLock lock = this.lock; |
2689 |
> |
final Mutex lock = this.lock; |
2690 |
|
lock.lock(); |
2691 |
|
try { |
2692 |
|
for (;;) { |
2800 |
|
ForkJoinPool p = ((t instanceof ForkJoinWorkerThread) ? |
2801 |
|
((ForkJoinWorkerThread)t).pool : null); |
2802 |
|
while (!blocker.isReleasable()) { |
2803 |
< |
if (p == null || p.tryCompensate()) { |
2803 |
> |
if (p == null || p.tryCompensate(null, blocker)) { |
2804 |
|
try { |
2805 |
|
do {} while (!blocker.isReleasable() && !blocker.block()); |
2806 |
|
} finally { |
2817 |
|
// implement RunnableFuture. |
2818 |
|
|
2819 |
|
protected <T> RunnableFuture<T> newTaskFor(Runnable runnable, T value) { |
2820 |
< |
return (RunnableFuture<T>) ForkJoinTask.adapt(runnable, value); |
2820 |
> |
return new ForkJoinTask.AdaptedRunnable<T>(runnable, value); |
2821 |
|
} |
2822 |
|
|
2823 |
|
protected <T> RunnableFuture<T> newTaskFor(Callable<T> callable) { |
2824 |
< |
return (RunnableFuture<T>) ForkJoinTask.adapt(callable); |
2824 |
> |
return new ForkJoinTask.AdaptedCallable<T>(callable); |
2825 |
|
} |
2826 |
|
|
2827 |
|
// Unsafe mechanics |
2828 |
|
private static final sun.misc.Unsafe U; |
2829 |
|
private static final long CTL; |
2601 |
– |
private static final long RUNSTATE; |
2830 |
|
private static final long PARKBLOCKER; |
2831 |
+ |
private static final int ABASE; |
2832 |
+ |
private static final int ASHIFT; |
2833 |
|
|
2834 |
|
static { |
2835 |
|
poolNumberGenerator = new AtomicInteger(); |
2836 |
+ |
nextSubmitterSeed = new AtomicInteger(0x55555555); |
2837 |
|
modifyThreadPermission = new RuntimePermission("modifyThread"); |
2838 |
|
defaultForkJoinWorkerThreadFactory = |
2839 |
|
new DefaultForkJoinWorkerThreadFactory(); |
2840 |
+ |
submitters = new ThreadSubmitter(); |
2841 |
|
int s; |
2842 |
|
try { |
2843 |
|
U = getUnsafe(); |
2844 |
|
Class<?> k = ForkJoinPool.class; |
2845 |
< |
Class<?> tk = Thread.class; |
2845 |
> |
Class<?> ak = ForkJoinTask[].class; |
2846 |
|
CTL = U.objectFieldOffset |
2847 |
|
(k.getDeclaredField("ctl")); |
2848 |
< |
RUNSTATE = U.objectFieldOffset |
2617 |
< |
(k.getDeclaredField("runState")); |
2848 |
> |
Class<?> tk = Thread.class; |
2849 |
|
PARKBLOCKER = U.objectFieldOffset |
2850 |
|
(tk.getDeclaredField("parkBlocker")); |
2851 |
+ |
ABASE = U.arrayBaseOffset(ak); |
2852 |
+ |
s = U.arrayIndexScale(ak); |
2853 |
|
} catch (Exception e) { |
2854 |
|
throw new Error(e); |
2855 |
|
} |
2856 |
+ |
if ((s & (s-1)) != 0) |
2857 |
+ |
throw new Error("data type scale not a power of two"); |
2858 |
+ |
ASHIFT = 31 - Integer.numberOfLeadingZeros(s); |
2859 |
|
} |
2860 |
|
|
2861 |
|
/** |