6 |
|
|
7 |
|
package jsr166y; |
8 |
|
|
9 |
– |
import java.util.concurrent.*; |
10 |
– |
|
9 |
|
import java.util.Random; |
10 |
|
import java.util.Collection; |
11 |
|
import java.util.concurrent.locks.LockSupport; |
12 |
+ |
import java.util.concurrent.RejectedExecutionException; |
13 |
|
|
14 |
|
/** |
15 |
|
* A thread managed by a {@link ForkJoinPool}. This class is |
84 |
|
* |
85 |
|
* When a worker would otherwise be blocked waiting to join a |
86 |
|
* task, it first tries a form of linear helping: Each worker |
87 |
< |
* records (in field stolen) the most recent task it stole |
88 |
< |
* from some other worker. Plus, it records (in field joining) the |
89 |
< |
* task it is currently actively joining. Method joinTask uses |
87 |
> |
* records (in field currentSteal) the most recent task it stole |
88 |
> |
* from some other worker. Plus, it records (in field currentJoin) |
89 |
> |
* the task it is currently actively joining. Method joinTask uses |
90 |
|
* these markers to try to find a worker to help (i.e., steal back |
91 |
|
* a task from and execute it) that could hasten completion of the |
92 |
|
* actively joined task. In essence, the joiner executes a task |
96 |
|
* technique for implementing efficient futures" SIGPLAN Notices, |
97 |
|
* 1993 (http://portal.acm.org/citation.cfm?id=155354). It differs |
98 |
|
* in that: (1) We only maintain dependency links across workers |
99 |
< |
* upon steals, rather than maintain per-task bookkeeping. This |
100 |
< |
* requires a linear scan of workers array to locate stealers, |
101 |
< |
* which isolates cost to when it is needed, rather than adding to |
102 |
< |
* per-task overhead. (2) It is "shallow", ignoring nesting and |
103 |
< |
* potentially cyclic mutual steals. (3) It is intentionally |
104 |
< |
* racy: field joining is updated only while actively joining, |
105 |
< |
* which means that we could miss links in the chain during |
106 |
< |
* long-lived tasks, GC stalls etc. (4) We fall back to |
107 |
< |
* suspending the worker and if necessary replacing it with a |
108 |
< |
* spare (see ForkJoinPool.tryAwaitJoin). |
99 |
> |
* upon steals, rather than use per-task bookkeeping. This may |
100 |
> |
* require a linear scan of workers array to locate stealers, but |
101 |
> |
* usually doesn't because stealers leave hints (that may become |
102 |
> |
* stale/wrong) of where to locate them. This isolates cost to |
103 |
> |
* when it is needed, rather than adding to per-task overhead. |
104 |
> |
* (2) It is "shallow", ignoring nesting and potentially cyclic |
105 |
> |
* mutual steals. (3) It is intentionally racy: field currentJoin |
106 |
> |
* is updated only while actively joining, which means that we |
107 |
> |
* miss links in the chain during long-lived tasks, GC stalls etc |
108 |
> |
* (which is OK since blocking in such cases is usually a good |
109 |
> |
* idea). (4) We bound the number of attempts to find work (see |
110 |
> |
* MAX_HELP_DEPTH) and fall back to suspending the worker and if |
111 |
> |
* necessary replacing it with a spare (see |
112 |
> |
* ForkJoinPool.awaitJoin). |
113 |
|
* |
114 |
< |
* Efficient implementation of these algorithms currently relies on |
115 |
< |
* an uncomfortable amount of "Unsafe" mechanics. To maintain |
114 |
> |
* Efficient implementation of these algorithms currently relies |
115 |
> |
* on an uncomfortable amount of "Unsafe" mechanics. To maintain |
116 |
|
* correct orderings, reads and writes of variable base require |
117 |
|
* volatile ordering. Variable sp does not require volatile |
118 |
|
* writes but still needs store-ordering, which we accomplish by |
119 |
|
* pre-incrementing sp before filling the slot with an ordered |
120 |
|
* store. (Pre-incrementing also enables backouts used in |
121 |
< |
* scanWhileJoining.) Because they are protected by volatile base |
122 |
< |
* reads, reads of the queue array and its slots by other threads |
123 |
< |
* do not need volatile load semantics, but writes (in push) |
124 |
< |
* require store order and CASes (in pop and deq) require |
125 |
< |
* (volatile) CAS semantics. (Michael, Saraswat, and Vechev's |
126 |
< |
* algorithm has similar properties, but without support for |
127 |
< |
* nulling slots.) Since these combinations aren't supported |
128 |
< |
* using ordinary volatiles, the only way to accomplish these |
129 |
< |
* efficiently is to use direct Unsafe calls. (Using external |
130 |
< |
* AtomicIntegers and AtomicReferenceArrays for the indices and |
131 |
< |
* array is significantly slower because of memory locality and |
132 |
< |
* indirection effects.) |
121 |
> |
* joinTask.) Because they are protected by volatile base reads, |
122 |
> |
* reads of the queue array and its slots by other threads do not |
123 |
> |
* need volatile load semantics, but writes (in push) require |
124 |
> |
* store order and CASes (in pop and deq) require (volatile) CAS |
125 |
> |
* semantics. (Michael, Saraswat, and Vechev's algorithm has |
126 |
> |
* similar properties, but without support for nulling slots.) |
127 |
> |
* Since these combinations aren't supported using ordinary |
128 |
> |
* volatiles, the only way to accomplish these efficiently is to |
129 |
> |
* use direct Unsafe calls. (Using external AtomicIntegers and |
130 |
> |
* AtomicReferenceArrays for the indices and array is |
131 |
> |
* significantly slower because of memory locality and indirection |
132 |
> |
* effects.) |
133 |
|
* |
134 |
|
* Further, performance on most platforms is very sensitive to |
135 |
|
* placement and sizing of the (resizable) queue array. Even |
154 |
|
private static final Random seedGenerator = new Random(); |
155 |
|
|
156 |
|
/** |
157 |
< |
* The timeout value for suspending spares. Spare workers that |
158 |
< |
* remain unsignalled for more than this time may be trimmed |
159 |
< |
* (killed and removed from pool). Since our goal is to avoid |
160 |
< |
* long-term thread buildup, the exact value of timeout does not |
161 |
< |
* matter too much so long as it avoids most false-alarm timeouts |
159 |
< |
* under GC stalls or momentarily high system load. |
157 |
> |
* The maximum stolen->joining link depth allowed in helpJoinTask. |
158 |
> |
* Depths for legitimate chains are unbounded, but we use a fixed |
159 |
> |
* constant to avoid (otherwise unchecked) cycles and bound |
160 |
> |
* staleness of traversal parameters at the expense of sometimes |
161 |
> |
* blocking when we could be helping. |
162 |
|
*/ |
163 |
< |
private static final long SPARE_KEEPALIVE_NANOS = |
162 |
< |
5L * 1000L * 1000L * 1000L; // 5 secs |
163 |
> |
private static final int MAX_HELP_DEPTH = 8; |
164 |
|
|
165 |
|
/** |
166 |
|
* Capacity of work-stealing queue array upon initialization. |
171 |
|
|
172 |
|
/** |
173 |
|
* Maximum work-stealing queue array size. Must be less than or |
174 |
< |
* equal to 1 << 28 to ensure lack of index wraparound. (This |
175 |
< |
* is less than usual bounds, because we need leftshift by 3 |
176 |
< |
* to be in int range). |
174 |
> |
* equal to 1 << (31 - width of array entry) to ensure lack of |
175 |
> |
* index wraparound. The value is set in the static block |
176 |
> |
* at the end of this file after obtaining width. |
177 |
|
*/ |
178 |
< |
private static final int MAXIMUM_QUEUE_CAPACITY = 1 << 28; |
178 |
> |
private static final int MAXIMUM_QUEUE_CAPACITY; |
179 |
|
|
180 |
|
/** |
181 |
|
* The pool this thread works in. Accessed directly by ForkJoinTask. |
183 |
|
final ForkJoinPool pool; |
184 |
|
|
185 |
|
/** |
185 |
– |
* The task most recently stolen from another worker |
186 |
– |
*/ |
187 |
– |
private volatile ForkJoinTask<?> stolen; |
188 |
– |
|
189 |
– |
/** |
190 |
– |
* The task currently being joined, set only when actively |
191 |
– |
* trying to helpStealer. |
192 |
– |
*/ |
193 |
– |
private volatile ForkJoinTask<?> joining; |
194 |
– |
|
195 |
– |
/** |
186 |
|
* The work-stealing queue array. Size must be a power of two. |
187 |
|
* Initialized in onStart, to improve memory locality. |
188 |
|
*/ |
204 |
|
private int sp; |
205 |
|
|
206 |
|
/** |
207 |
+ |
* The index of most recent stealer, used as a hint to avoid |
208 |
+ |
* traversal in method helpJoinTask. This is only a hint because a |
209 |
+ |
* worker might have had multiple steals and this only holds one |
210 |
+ |
* of them (usually the most current). Declared non-volatile, |
211 |
+ |
* relying on other prevailing sync to keep reasonably current. |
212 |
+ |
*/ |
213 |
+ |
private int stealHint; |
214 |
+ |
|
215 |
+ |
/** |
216 |
|
* Run state of this worker. In addition to the usual run levels, |
217 |
|
* tracks if this worker is suspended as a spare, and if it was |
218 |
|
* killed (trimmed) while suspended. However, "active" status is |
219 |
< |
* maintained separately. |
219 |
> |
* maintained separately and modified only in conjunction with |
220 |
> |
* CASes of the pool's runState (which are currently sadly |
221 |
> |
* manually inlined for performance.) Accessed directly by pool |
222 |
> |
* to simplify checks for normal (zero) status. |
223 |
|
*/ |
224 |
< |
private volatile int runState; |
224 |
> |
volatile int runState; |
225 |
|
|
226 |
|
private static final int TERMINATING = 0x01; |
227 |
|
private static final int TERMINATED = 0x02; |
229 |
|
private static final int TRIMMED = 0x08; // killed while suspended |
230 |
|
|
231 |
|
/** |
232 |
< |
* Number of LockSupport.park calls to block this thread for |
233 |
< |
* suspension or event waits. Used for internal instrumention; |
232 |
< |
* currently not exported but included because volatile write upon |
233 |
< |
* park also provides a workaround for a JVM bug. |
234 |
< |
*/ |
235 |
< |
volatile int parkCount; |
236 |
< |
|
237 |
< |
/** |
238 |
< |
* Number of steals, transferred and reset in pool callbacks pool |
239 |
< |
* when idle Accessed directly by pool. |
232 |
> |
* Number of steals. Directly accessed (and reset) by |
233 |
> |
* pool.tryAccumulateStealCount when idle. |
234 |
|
*/ |
235 |
|
int stealCount; |
236 |
|
|
248 |
|
|
249 |
|
/** |
250 |
|
* True if use local fifo, not default lifo, for local polling. |
251 |
< |
* Shadows value from ForkJoinPool, which resets it if changed |
258 |
< |
* pool-wide. |
251 |
> |
* Shadows value from ForkJoinPool. |
252 |
|
*/ |
253 |
|
private final boolean locallyFifo; |
254 |
< |
|
254 |
> |
|
255 |
|
/** |
256 |
|
* Index of this worker in pool array. Set once by pool before |
257 |
|
* running, and accessed directly by pool to locate this worker in |
266 |
|
int lastEventCount; |
267 |
|
|
268 |
|
/** |
269 |
< |
* Encoded index and event count of next event waiter. Used only |
270 |
< |
* by ForkJoinPool for managing event waiters. |
269 |
> |
* Encoded index and event count of next event waiter. Accessed |
270 |
> |
* only by ForkJoinPool for managing event waiters. |
271 |
|
*/ |
272 |
|
volatile long nextWaiter; |
273 |
|
|
274 |
|
/** |
275 |
+ |
* Number of times this thread suspended as spare. Accessed only |
276 |
+ |
* by pool. |
277 |
+ |
*/ |
278 |
+ |
int spareCount; |
279 |
+ |
|
280 |
+ |
/** |
281 |
+ |
* Encoded index and count of next spare waiter. Accessed only |
282 |
+ |
* by ForkJoinPool for managing spares. |
283 |
+ |
*/ |
284 |
+ |
volatile int nextSpare; |
285 |
+ |
|
286 |
+ |
/** |
287 |
+ |
* The task currently being joined, set only when actively trying |
288 |
+ |
* to help other stealers in helpJoinTask. Written only by this |
289 |
+ |
* thread, but read by others. |
290 |
+ |
*/ |
291 |
+ |
private volatile ForkJoinTask<?> currentJoin; |
292 |
+ |
|
293 |
+ |
/** |
294 |
+ |
* The task most recently stolen from another worker (or |
295 |
+ |
* submission queue). Written only by this thread, but read by |
296 |
+ |
* others. |
297 |
+ |
*/ |
298 |
+ |
private volatile ForkJoinTask<?> currentSteal; |
299 |
+ |
|
300 |
+ |
/** |
301 |
|
* Creates a ForkJoinWorkerThread operating in the given pool. |
302 |
|
* |
303 |
|
* @param pool the pool this thread works in |
306 |
|
protected ForkJoinWorkerThread(ForkJoinPool pool) { |
307 |
|
this.pool = pool; |
308 |
|
this.locallyFifo = pool.locallyFifo; |
309 |
+ |
setDaemon(true); |
310 |
|
// To avoid exposing construction details to subclasses, |
311 |
|
// remaining initialization is in start() and onStart() |
312 |
|
} |
313 |
|
|
314 |
|
/** |
315 |
< |
* Performs additional initialization and starts this thread |
315 |
> |
* Performs additional initialization and starts this thread. |
316 |
|
*/ |
317 |
|
final void start(int poolIndex, UncaughtExceptionHandler ueh) { |
318 |
|
this.poolIndex = poolIndex; |
319 |
|
if (ueh != null) |
320 |
|
setUncaughtExceptionHandler(ueh); |
301 |
– |
setDaemon(true); |
321 |
|
start(); |
322 |
|
} |
323 |
|
|
348 |
|
/** |
349 |
|
* Initializes internal state after construction but before |
350 |
|
* processing any tasks. If you override this method, you must |
351 |
< |
* invoke super.onStart() at the beginning of the method. |
351 |
> |
* invoke @code{super.onStart()} at the beginning of the method. |
352 |
|
* Initialization requires care: Most fields must have legal |
353 |
|
* default values, to ensure that attempted accesses from other |
354 |
|
* threads work correctly even before this thread starts |
376 |
|
*/ |
377 |
|
protected void onTermination(Throwable exception) { |
378 |
|
try { |
379 |
< |
stolen = null; |
380 |
< |
joining = null; |
379 |
> |
ForkJoinPool p = pool; |
380 |
> |
if (active) { |
381 |
> |
int a; // inline p.tryDecrementActiveCount |
382 |
> |
active = false; |
383 |
> |
do {} while (!UNSAFE.compareAndSwapInt |
384 |
> |
(p, poolRunStateOffset, a = p.runState, a - 1)); |
385 |
> |
} |
386 |
|
cancelTasks(); |
387 |
|
setTerminated(); |
388 |
< |
pool.workerTerminated(this); |
388 |
> |
p.workerTerminated(this); |
389 |
|
} catch (Throwable ex) { // Shouldn't ever happen |
390 |
|
if (exception == null) // but if so, at least rethrown |
391 |
|
exception = ex; |
415 |
|
// helpers for run() |
416 |
|
|
417 |
|
/** |
418 |
< |
* Find and execute tasks and check status while running |
418 |
> |
* Finds and executes tasks, and checks status while running. |
419 |
|
*/ |
420 |
|
private void mainLoop() { |
421 |
< |
boolean ran = false; // true if ran task in last loop iter |
398 |
< |
boolean prevRan = false; // true if ran on last or previous step |
421 |
> |
boolean ran = false; // true if ran a task on last step |
422 |
|
ForkJoinPool p = pool; |
423 |
|
for (;;) { |
424 |
< |
p.preStep(this, prevRan); |
424 |
> |
p.preStep(this, ran); |
425 |
|
if (runState != 0) |
426 |
< |
return; |
427 |
< |
ForkJoinTask<?> t; // try to get and run stolen or submitted task |
405 |
< |
if ((t = scan()) != null || (t = pollSubmission()) != null) { |
406 |
< |
t.tryExec(); |
407 |
< |
if (base != sp) |
408 |
< |
runLocalTasks(); |
409 |
< |
stolen = null; |
410 |
< |
prevRan = ran = true; |
411 |
< |
} |
412 |
< |
else { |
413 |
< |
prevRan = ran; |
414 |
< |
ran = false; |
415 |
< |
} |
426 |
> |
break; |
427 |
> |
ran = tryExecSteal() || tryExecSubmission(); |
428 |
|
} |
429 |
|
} |
430 |
|
|
431 |
|
/** |
432 |
< |
* Runs local tasks until queue is empty or shut down. Call only |
433 |
< |
* while active. |
432 |
> |
* Tries to steal a task and execute it. |
433 |
> |
* |
434 |
> |
* @return true if ran a task |
435 |
|
*/ |
436 |
< |
private void runLocalTasks() { |
437 |
< |
while (runState == 0) { |
438 |
< |
ForkJoinTask<?> t = locallyFifo? locallyDeqTask() : popTask(); |
439 |
< |
if (t != null) |
440 |
< |
t.tryExec(); |
441 |
< |
else if (base == sp) |
442 |
< |
break; |
436 |
> |
private boolean tryExecSteal() { |
437 |
> |
ForkJoinTask<?> t; |
438 |
> |
if ((t = scan()) != null) { |
439 |
> |
t.quietlyExec(); |
440 |
> |
UNSAFE.putOrderedObject(this, currentStealOffset, null); |
441 |
> |
if (sp != base) |
442 |
> |
execLocalTasks(); |
443 |
> |
return true; |
444 |
|
} |
445 |
+ |
return false; |
446 |
|
} |
447 |
|
|
448 |
|
/** |
449 |
< |
* If a submission exists, try to activate and take it |
449 |
> |
* If a submission exists, try to activate and run it. |
450 |
|
* |
451 |
< |
* @return a task, if available |
451 |
> |
* @return true if ran a task |
452 |
|
*/ |
453 |
< |
private ForkJoinTask<?> pollSubmission() { |
453 |
> |
private boolean tryExecSubmission() { |
454 |
|
ForkJoinPool p = pool; |
455 |
+ |
// This loop is needed in case attempt to activate fails, in |
456 |
+ |
// which case we only retry if there still appears to be a |
457 |
+ |
// submission. |
458 |
|
while (p.hasQueuedSubmissions()) { |
459 |
< |
if (active || (active = p.tryIncrementActiveCount())) { |
460 |
< |
ForkJoinTask<?> t = p.pollSubmission(); |
461 |
< |
return t != null ? t : scan(); // if missed, rescan |
459 |
> |
ForkJoinTask<?> t; int a; |
460 |
> |
if (active || // inline p.tryIncrementActiveCount |
461 |
> |
(active = UNSAFE.compareAndSwapInt(p, poolRunStateOffset, |
462 |
> |
a = p.runState, a + 1))) { |
463 |
> |
if ((t = p.pollSubmission()) != null) { |
464 |
> |
UNSAFE.putOrderedObject(this, currentStealOffset, t); |
465 |
> |
t.quietlyExec(); |
466 |
> |
UNSAFE.putOrderedObject(this, currentStealOffset, null); |
467 |
> |
if (sp != base) |
468 |
> |
execLocalTasks(); |
469 |
> |
return true; |
470 |
> |
} |
471 |
|
} |
472 |
|
} |
473 |
< |
return null; |
473 |
> |
return false; |
474 |
> |
} |
475 |
> |
|
476 |
> |
/** |
477 |
> |
* Runs local tasks until queue is empty or shut down. Call only |
478 |
> |
* while active. |
479 |
> |
*/ |
480 |
> |
private void execLocalTasks() { |
481 |
> |
while (runState == 0) { |
482 |
> |
ForkJoinTask<?> t = locallyFifo ? locallyDeqTask() : popTask(); |
483 |
> |
if (t != null) |
484 |
> |
t.quietlyExec(); |
485 |
> |
else if (sp == base) |
486 |
> |
break; |
487 |
> |
} |
488 |
|
} |
489 |
|
|
490 |
|
/* |
491 |
|
* Intrinsics-based atomic writes for queue slots. These are |
492 |
< |
* basically the same as methods in AtomicObjectArray, but |
492 |
> |
* basically the same as methods in AtomicReferenceArray, but |
493 |
|
* specialized for (1) ForkJoinTask elements (2) requirement that |
494 |
|
* nullness and bounds checks have already been performed by |
495 |
|
* callers and (3) effective offsets are known not to overflow |
496 |
|
* from int to long (because of MAXIMUM_QUEUE_CAPACITY). We don't |
497 |
|
* need corresponding version for reads: plain array reads are OK |
498 |
< |
* because they protected by other volatile reads and are |
498 |
> |
* because they are protected by other volatile reads and are |
499 |
|
* confirmed by CASes. |
500 |
|
* |
501 |
|
* Most uses don't actually call these methods, but instead contain |
519 |
|
* range. This method is used only during resets and backouts. |
520 |
|
*/ |
521 |
|
private static final void writeSlot(ForkJoinTask<?>[] q, int i, |
522 |
< |
ForkJoinTask<?> t) { |
522 |
> |
ForkJoinTask<?> t) { |
523 |
|
UNSAFE.putObjectVolatile(q, (i << qShift) + qBase, t); |
524 |
|
} |
525 |
|
|
552 |
|
ForkJoinTask<?> t; |
553 |
|
ForkJoinTask<?>[] q; |
554 |
|
int b, i; |
555 |
< |
if ((b = base) != sp && |
555 |
> |
if (sp != (b = base) && |
556 |
|
(q = queue) != null && // must read q after b |
557 |
|
(t = q[i = (q.length - 1) & b]) != null && base == b && |
558 |
|
UNSAFE.compareAndSwapObject(q, (i << qShift) + qBase, t, null)) { |
564 |
|
|
565 |
|
/** |
566 |
|
* Tries to take a task from the base of own queue. Assumes active |
567 |
< |
* status. Called only by current thread. |
567 |
> |
* status. Called only by this thread. |
568 |
|
* |
569 |
|
* @return a task, or null if none |
570 |
|
*/ |
587 |
|
|
588 |
|
/** |
589 |
|
* Returns a popped task, or null if empty. Assumes active status. |
590 |
< |
* Called only by current thread. (Note: a specialization of this |
550 |
< |
* code appears in popWhileJoining.) |
590 |
> |
* Called only by this thread. |
591 |
|
*/ |
592 |
< |
final ForkJoinTask<?> popTask() { |
593 |
< |
int s; |
594 |
< |
ForkJoinTask<?>[] q; |
595 |
< |
if (base != (s = sp) && (q = queue) != null) { |
596 |
< |
int i = (q.length - 1) & --s; |
597 |
< |
ForkJoinTask<?> t = q[i]; |
598 |
< |
if (t != null && UNSAFE.compareAndSwapObject |
599 |
< |
(q, (i << qShift) + qBase, t, null)) { |
600 |
< |
sp = s; |
601 |
< |
return t; |
592 |
> |
private ForkJoinTask<?> popTask() { |
593 |
> |
ForkJoinTask<?>[] q = queue; |
594 |
> |
if (q != null) { |
595 |
> |
int s; |
596 |
> |
while ((s = sp) != base) { |
597 |
> |
int i = (q.length - 1) & --s; |
598 |
> |
long u = (i << qShift) + qBase; // raw offset |
599 |
> |
ForkJoinTask<?> t = q[i]; |
600 |
> |
if (t == null) // lost to stealer |
601 |
> |
break; |
602 |
> |
if (UNSAFE.compareAndSwapObject(q, u, t, null)) { |
603 |
> |
sp = s; // putOrderedInt may encourage more timely write |
604 |
> |
// UNSAFE.putOrderedInt(this, spOffset, s); |
605 |
> |
return t; |
606 |
> |
} |
607 |
|
} |
608 |
|
} |
609 |
|
return null; |
611 |
|
|
612 |
|
/** |
613 |
|
* Specialized version of popTask to pop only if topmost element |
614 |
< |
* is the given task. Called only by current thread while |
570 |
< |
* active. |
614 |
> |
* is the given task. Called only by this thread while active. |
615 |
|
* |
616 |
|
* @param t the task. Caller must ensure non-null. |
617 |
|
*/ |
618 |
|
final boolean unpushTask(ForkJoinTask<?> t) { |
619 |
|
int s; |
620 |
< |
ForkJoinTask<?>[] q; |
621 |
< |
if (base != (s = sp) && (q = queue) != null && |
620 |
> |
ForkJoinTask<?>[] q = queue; |
621 |
> |
if ((s = sp) != base && q != null && |
622 |
|
UNSAFE.compareAndSwapObject |
623 |
|
(q, (((q.length - 1) & --s) << qShift) + qBase, t, null)) { |
624 |
< |
sp = s; |
624 |
> |
sp = s; // putOrderedInt may encourage more timely write |
625 |
> |
// UNSAFE.putOrderedInt(this, spOffset, s); |
626 |
|
return true; |
627 |
|
} |
628 |
|
return false; |
629 |
|
} |
630 |
|
|
631 |
|
/** |
632 |
< |
* Returns next task or null if empty or contended |
632 |
> |
* Returns next task, or null if empty or contended. |
633 |
|
*/ |
634 |
|
final ForkJoinTask<?> peekTask() { |
635 |
|
ForkJoinTask<?>[] q = queue; |
671 |
|
* Computes next value for random victim probe in scan(). Scans |
672 |
|
* don't require a very high quality generator, but also not a |
673 |
|
* crummy one. Marsaglia xor-shift is cheap and works well enough. |
674 |
< |
* Note: This is manually inlined in scan() |
674 |
> |
* Note: This is manually inlined in scan(). |
675 |
|
*/ |
676 |
|
private static final int xorShift(int r) { |
677 |
|
r ^= r << 13; |
710 |
|
for (;;) { |
711 |
|
ForkJoinWorkerThread v = ws[k & mask]; |
712 |
|
r ^= r << 13; r ^= r >>> 17; r ^= r << 5; // inline xorshift |
713 |
< |
if (v != null && v.base != v.sp) { |
714 |
< |
if (canSteal || // ensure active status |
715 |
< |
(canSteal = active = p.tryIncrementActiveCount())) { |
716 |
< |
int b = v.base; // inline specialized deqTask |
717 |
< |
ForkJoinTask<?>[] q; |
718 |
< |
if (b != v.sp && (q = v.queue) != null) { |
719 |
< |
ForkJoinTask<?> t; |
720 |
< |
int i = (q.length - 1) & b; |
721 |
< |
long u = (i << qShift) + qBase; // raw offset |
722 |
< |
if ((t = q[i]) != null && v.base == b && |
723 |
< |
UNSAFE.compareAndSwapObject(q, u, t, null)) { |
724 |
< |
stolen = t; |
725 |
< |
v.base = b + 1; |
726 |
< |
seed = r; |
727 |
< |
++stealCount; |
728 |
< |
return t; |
729 |
< |
} |
713 |
> |
ForkJoinTask<?>[] q; ForkJoinTask<?> t; int b, a; |
714 |
> |
if (v != null && (b = v.base) != v.sp && |
715 |
> |
(q = v.queue) != null) { |
716 |
> |
int i = (q.length - 1) & b; |
717 |
> |
long u = (i << qShift) + qBase; // raw offset |
718 |
> |
int pid = poolIndex; |
719 |
> |
if ((t = q[i]) != null) { |
720 |
> |
if (!canSteal && // inline p.tryIncrementActiveCount |
721 |
> |
UNSAFE.compareAndSwapInt(p, poolRunStateOffset, |
722 |
> |
a = p.runState, a + 1)) |
723 |
> |
canSteal = active = true; |
724 |
> |
if (canSteal && v.base == b++ && |
725 |
> |
UNSAFE.compareAndSwapObject(q, u, t, null)) { |
726 |
> |
v.base = b; |
727 |
> |
v.stealHint = pid; |
728 |
> |
UNSAFE.putOrderedObject(this, |
729 |
> |
currentStealOffset, t); |
730 |
> |
seed = r; |
731 |
> |
++stealCount; |
732 |
> |
return t; |
733 |
|
} |
734 |
|
} |
735 |
|
j = -n; |
749 |
|
// Run State management |
750 |
|
|
751 |
|
// status check methods used mainly by ForkJoinPool |
752 |
< |
final boolean isTerminating() { return (runState & TERMINATING) != 0; } |
753 |
< |
final boolean isTerminated() { return (runState & TERMINATED) != 0; } |
754 |
< |
final boolean isSuspended() { return (runState & SUSPENDED) != 0; } |
755 |
< |
final boolean isTrimmed() { return (runState & TRIMMED) != 0; } |
752 |
> |
final boolean isRunning() { return runState == 0; } |
753 |
> |
final boolean isTerminated() { return (runState & TERMINATED) != 0; } |
754 |
> |
final boolean isSuspended() { return (runState & SUSPENDED) != 0; } |
755 |
> |
final boolean isTrimmed() { return (runState & TRIMMED) != 0; } |
756 |
> |
|
757 |
> |
final boolean isTerminating() { |
758 |
> |
if ((runState & TERMINATING) != 0) |
759 |
> |
return true; |
760 |
> |
if (pool.isAtLeastTerminating()) { // propagate pool state |
761 |
> |
shutdown(); |
762 |
> |
return true; |
763 |
> |
} |
764 |
> |
return false; |
765 |
> |
} |
766 |
|
|
767 |
|
/** |
768 |
< |
* Sets state to TERMINATING, also resuming if suspended. |
768 |
> |
* Sets state to TERMINATING. Does NOT unpark or interrupt |
769 |
> |
* to wake up if currently blocked. Callers must do so if desired. |
770 |
|
*/ |
771 |
|
final void shutdown() { |
772 |
|
for (;;) { |
773 |
|
int s = runState; |
774 |
+ |
if ((s & (TERMINATING|TERMINATED)) != 0) |
775 |
+ |
break; |
776 |
|
if ((s & SUSPENDED) != 0) { // kill and wakeup if suspended |
777 |
|
if (UNSAFE.compareAndSwapInt(this, runStateOffset, s, |
778 |
|
(s & ~SUSPENDED) | |
779 |
< |
(TRIMMED|TERMINATING))) { |
719 |
< |
LockSupport.unpark(this); |
779 |
> |
(TRIMMED|TERMINATING))) |
780 |
|
break; |
721 |
– |
} |
781 |
|
} |
782 |
|
else if (UNSAFE.compareAndSwapInt(this, runStateOffset, s, |
783 |
|
s | TERMINATING)) |
786 |
|
} |
787 |
|
|
788 |
|
/** |
789 |
< |
* Sets state to TERMINATED. Called only by this thread. |
789 |
> |
* Sets state to TERMINATED. Called only by onTermination(). |
790 |
|
*/ |
791 |
|
private void setTerminated() { |
792 |
|
int s; |
796 |
|
} |
797 |
|
|
798 |
|
/** |
740 |
– |
* Instrumented version of park used by ForkJoinPool.awaitEvent |
741 |
– |
*/ |
742 |
– |
final void doPark() { |
743 |
– |
++parkCount; |
744 |
– |
LockSupport.park(this); |
745 |
– |
} |
746 |
– |
|
747 |
– |
/** |
799 |
|
* If suspended, tries to set status to unsuspended. |
800 |
< |
* Caller must unpark to actually resume |
800 |
> |
* Does NOT wake up if blocked. |
801 |
|
* |
802 |
|
* @return true if successful |
803 |
|
*/ |
804 |
|
final boolean tryUnsuspend() { |
805 |
< |
int s = runState; |
806 |
< |
if ((s & SUSPENDED) != 0) |
807 |
< |
return UNSAFE.compareAndSwapInt(this, runStateOffset, s, |
808 |
< |
s & ~SUSPENDED); |
805 |
> |
int s; |
806 |
> |
while (((s = runState) & SUSPENDED) != 0) { |
807 |
> |
if (UNSAFE.compareAndSwapInt(this, runStateOffset, s, |
808 |
> |
s & ~SUSPENDED)) |
809 |
> |
return true; |
810 |
> |
} |
811 |
|
return false; |
812 |
|
} |
813 |
|
|
814 |
|
/** |
815 |
< |
* Sets suspended status and blocks as spare until resumed, |
816 |
< |
* shutdown, or timed out. |
764 |
< |
* |
765 |
< |
* @return false if trimmed |
815 |
> |
* Sets suspended status and blocks as spare until resumed |
816 |
> |
* or shutdown. |
817 |
|
*/ |
818 |
< |
final boolean suspendAsSpare() { |
819 |
< |
for (;;) { // set suspended unless terminating |
818 |
> |
final void suspendAsSpare() { |
819 |
> |
for (;;) { // set suspended unless terminating |
820 |
|
int s = runState; |
821 |
|
if ((s & TERMINATING) != 0) { // must kill |
822 |
|
if (UNSAFE.compareAndSwapInt(this, runStateOffset, s, |
823 |
|
s | (TRIMMED | TERMINATING))) |
824 |
< |
return false; |
824 |
> |
return; |
825 |
|
} |
826 |
|
else if (UNSAFE.compareAndSwapInt(this, runStateOffset, s, |
827 |
|
s | SUSPENDED)) |
828 |
|
break; |
829 |
|
} |
830 |
< |
boolean timed; |
831 |
< |
long nanos; |
781 |
< |
long startTime; |
782 |
< |
if (poolIndex < pool.parallelism) { |
783 |
< |
timed = false; |
784 |
< |
nanos = 0L; |
785 |
< |
startTime = 0L; |
786 |
< |
} |
787 |
< |
else { |
788 |
< |
timed = true; |
789 |
< |
nanos = SPARE_KEEPALIVE_NANOS; |
790 |
< |
startTime = System.nanoTime(); |
791 |
< |
} |
792 |
< |
pool.accumulateStealCount(this); |
793 |
< |
lastEventCount = 0; // reset upon resume |
794 |
< |
interrupted(); // clear/ignore interrupts |
830 |
> |
ForkJoinPool p = pool; |
831 |
> |
p.pushSpare(this); |
832 |
|
while ((runState & SUSPENDED) != 0) { |
833 |
< |
++parkCount; |
834 |
< |
if (!timed) |
833 |
> |
if (p.tryAccumulateStealCount(this)) { |
834 |
> |
interrupted(); // clear/ignore interrupts |
835 |
> |
if ((runState & SUSPENDED) == 0) |
836 |
> |
break; |
837 |
|
LockSupport.park(this); |
799 |
– |
else if ((nanos -= (System.nanoTime() - startTime)) > 0) |
800 |
– |
LockSupport.parkNanos(this, nanos); |
801 |
– |
else { // try to trim on timeout |
802 |
– |
int s = runState; |
803 |
– |
if (UNSAFE.compareAndSwapInt(this, runStateOffset, s, |
804 |
– |
(s & ~SUSPENDED) | |
805 |
– |
(TRIMMED|TERMINATING))) |
806 |
– |
return false; |
838 |
|
} |
839 |
|
} |
809 |
– |
return true; |
840 |
|
} |
841 |
|
|
842 |
|
// Misc support methods for ForkJoinPool |
846 |
|
* used by ForkJoinTask. |
847 |
|
*/ |
848 |
|
final int getQueueSize() { |
849 |
< |
return -base + sp; |
849 |
> |
int n; // external calls must read base first |
850 |
> |
return (n = -base + sp) <= 0 ? 0 : n; |
851 |
|
} |
852 |
|
|
853 |
|
/** |
855 |
|
* thread. |
856 |
|
*/ |
857 |
|
final void cancelTasks() { |
858 |
+ |
ForkJoinTask<?> cj = currentJoin; // try to cancel ongoing tasks |
859 |
+ |
if (cj != null) { |
860 |
+ |
currentJoin = null; |
861 |
+ |
cj.cancelIgnoringExceptions(); |
862 |
+ |
try { |
863 |
+ |
this.interrupt(); // awaken wait |
864 |
+ |
} catch (SecurityException ignore) { |
865 |
+ |
} |
866 |
+ |
} |
867 |
+ |
ForkJoinTask<?> cs = currentSteal; |
868 |
+ |
if (cs != null) { |
869 |
+ |
currentSteal = null; |
870 |
+ |
cs.cancelIgnoringExceptions(); |
871 |
+ |
} |
872 |
|
while (base != sp) { |
873 |
|
ForkJoinTask<?> t = deqTask(); |
874 |
|
if (t != null) |
896 |
|
// Support methods for ForkJoinTask |
897 |
|
|
898 |
|
/** |
899 |
+ |
* Gets and removes a local task. |
900 |
+ |
* |
901 |
+ |
* @return a task, if available |
902 |
+ |
*/ |
903 |
+ |
final ForkJoinTask<?> pollLocalTask() { |
904 |
+ |
ForkJoinPool p = pool; |
905 |
+ |
while (sp != base) { |
906 |
+ |
int a; // inline p.tryIncrementActiveCount |
907 |
+ |
if (active || |
908 |
+ |
(active = UNSAFE.compareAndSwapInt(p, poolRunStateOffset, |
909 |
+ |
a = p.runState, a + 1))) |
910 |
+ |
return locallyFifo ? locallyDeqTask() : popTask(); |
911 |
+ |
} |
912 |
+ |
return null; |
913 |
+ |
} |
914 |
+ |
|
915 |
+ |
/** |
916 |
+ |
* Gets and removes a local or stolen task. |
917 |
+ |
* |
918 |
+ |
* @return a task, if available |
919 |
+ |
*/ |
920 |
+ |
final ForkJoinTask<?> pollTask() { |
921 |
+ |
ForkJoinTask<?> t = pollLocalTask(); |
922 |
+ |
if (t == null) { |
923 |
+ |
t = scan(); |
924 |
+ |
// cannot retain/track/help steal |
925 |
+ |
UNSAFE.putOrderedObject(this, currentStealOffset, null); |
926 |
+ |
} |
927 |
+ |
return t; |
928 |
+ |
} |
929 |
+ |
|
930 |
+ |
/** |
931 |
|
* Possibly runs some tasks and/or blocks, until task is done. |
932 |
|
* |
933 |
|
* @param joinMe the task to join |
934 |
+ |
* @param timed true if use timed wait |
935 |
+ |
* @param nanos wait time if timed |
936 |
|
*/ |
937 |
< |
final void joinTask(ForkJoinTask<?> joinMe) { |
938 |
< |
ForkJoinTask<?> prevJoining = joining; |
939 |
< |
joining = joinMe; |
940 |
< |
while (joinMe.status >= 0) { |
941 |
< |
int s = sp; |
942 |
< |
if (s == base) { |
943 |
< |
nonlocalJoinTask(joinMe); |
944 |
< |
break; |
945 |
< |
} |
946 |
< |
// process local task |
947 |
< |
ForkJoinTask<?> t; |
948 |
< |
ForkJoinTask<?>[] q = queue; |
937 |
> |
final void joinTask(ForkJoinTask<?> joinMe, boolean timed, long nanos) { |
938 |
> |
// currentJoin only written by this thread; only need ordered store |
939 |
> |
ForkJoinTask<?> prevJoin = currentJoin; |
940 |
> |
UNSAFE.putOrderedObject(this, currentJoinOffset, joinMe); |
941 |
> |
if (isTerminating()) // cancel if shutting down |
942 |
> |
joinMe.cancelIgnoringExceptions(); |
943 |
> |
else |
944 |
> |
pool.awaitJoin(joinMe, this, timed, nanos); |
945 |
> |
UNSAFE.putOrderedObject(this, currentJoinOffset, prevJoin); |
946 |
> |
} |
947 |
> |
|
948 |
> |
/** |
949 |
> |
* Run tasks in local queue until given task is done. |
950 |
> |
* Not currently used because it complicates semantics. |
951 |
> |
* |
952 |
> |
* @param joinMe the task to join |
953 |
> |
*/ |
954 |
> |
private void localHelpJoinTask(ForkJoinTask<?> joinMe) { |
955 |
> |
int s; |
956 |
> |
ForkJoinTask<?>[] q; |
957 |
> |
while (joinMe.status >= 0 && (s = sp) != base && (q = queue) != null) { |
958 |
|
int i = (q.length - 1) & --s; |
959 |
|
long u = (i << qShift) + qBase; // raw offset |
960 |
< |
if ((t = q[i]) != null && |
961 |
< |
UNSAFE.compareAndSwapObject(q, u, t, null)) { |
960 |
> |
ForkJoinTask<?> t = q[i]; |
961 |
> |
if (t == null) // lost to a stealer |
962 |
> |
break; |
963 |
> |
if (UNSAFE.compareAndSwapObject(q, u, t, null)) { |
964 |
|
/* |
965 |
< |
* This recheck (and similarly in nonlocalJoinTask) |
965 |
> |
* This recheck (and similarly in helpJoinTask) |
966 |
|
* handles cases where joinMe is independently |
967 |
|
* cancelled or forced even though there is other work |
968 |
|
* available. Back out of the pop by putting t back |
969 |
< |
* into slot before we commit by setting sp. |
969 |
> |
* into slot before we commit by writing sp. |
970 |
|
*/ |
971 |
|
if (joinMe.status < 0) { |
972 |
|
UNSAFE.putObjectVolatile(q, u, t); |
973 |
|
break; |
974 |
|
} |
975 |
|
sp = s; |
976 |
< |
t.tryExec(); |
976 |
> |
// UNSAFE.putOrderedInt(this, spOffset, s); |
977 |
> |
t.quietlyExec(); |
978 |
|
} |
979 |
|
} |
889 |
– |
joining = prevJoining; |
980 |
|
} |
981 |
|
|
982 |
|
/** |
983 |
|
* Tries to locate and help perform tasks for a stealer of the |
984 |
< |
* given task (or in turn one of its stealers), blocking (via |
985 |
< |
* pool.tryAwaitJoin) upon failure to find work. Traces |
896 |
< |
* stolen->joining links looking for a thread working on |
984 |
> |
* given task, or in turn one of its stealers. Traces |
985 |
> |
* currentSteal->currentJoin links looking for a thread working on |
986 |
|
* a descendant of the given task and with a non-empty queue to |
987 |
< |
* steal back and execute tasks from. Inhibits mutual steal chains |
988 |
< |
* and scans on outer joins upon nesting to avoid unbounded |
989 |
< |
* growth. Restarts search upon encountering inconsistencies. |
990 |
< |
* Tries to block if two passes agree that there are no remaining |
991 |
< |
* targets. |
987 |
> |
* steal back and execute tasks from. |
988 |
> |
* |
989 |
> |
* The implementation is very branchy to cope with potential |
990 |
> |
* inconsistencies or loops encountering chains that are stale, |
991 |
> |
* unknown, or of length greater than MAX_HELP_DEPTH links. All |
992 |
> |
* of these cases are dealt with by just returning back to the |
993 |
> |
* caller, who is expected to retry if other join mechanisms also |
994 |
> |
* don't work out. |
995 |
|
* |
996 |
|
* @param joinMe the task to join |
997 |
|
*/ |
998 |
< |
private void nonlocalJoinTask(ForkJoinTask<?> joinMe) { |
999 |
< |
ForkJoinPool p = pool; |
1000 |
< |
int scans = p.parallelism; // give up if too many retries |
1001 |
< |
ForkJoinTask<?> bottom = null; // target seen when can't descend |
1002 |
< |
restart: while (joinMe.status >= 0) { |
1003 |
< |
ForkJoinTask<?> target = null; |
1004 |
< |
ForkJoinTask<?> next = joinMe; |
1005 |
< |
while (scans >= 0 && next != null) { |
1006 |
< |
--scans; |
1007 |
< |
target = next; |
1008 |
< |
next = null; |
1009 |
< |
ForkJoinWorkerThread v = null; |
1010 |
< |
ForkJoinWorkerThread[] ws = p.workers; |
1011 |
< |
int n = ws.length; |
1012 |
< |
for (int j = 0; j < n; ++j) { |
1013 |
< |
ForkJoinWorkerThread w = ws[j]; |
1014 |
< |
if (w != null && w.stolen == target) { |
1015 |
< |
v = w; |
1016 |
< |
break; |
1017 |
< |
} |
1018 |
< |
} |
1019 |
< |
if (v != null && v != this) { |
1020 |
< |
ForkJoinTask<?> prevStolen = stolen; |
1021 |
< |
int b; |
930 |
< |
ForkJoinTask<?>[] q; |
931 |
< |
while ((b = v.base) != v.sp && (q = v.queue) != null) { |
932 |
< |
int i = (q.length - 1) & b; |
933 |
< |
long u = (i << qShift) + qBase; |
934 |
< |
ForkJoinTask<?> t = q[i]; |
935 |
< |
if (target.status < 0) |
936 |
< |
continue restart; |
937 |
< |
if (t != null && v.base == b && |
938 |
< |
UNSAFE.compareAndSwapObject(q, u, t, null)) { |
939 |
< |
if (joinMe.status < 0) { |
940 |
< |
UNSAFE.putObjectVolatile(q, u, t); |
941 |
< |
return; // back out |
998 |
> |
final void helpJoinTask(ForkJoinTask<?> joinMe) { |
999 |
> |
ForkJoinWorkerThread[] ws; |
1000 |
> |
int n; |
1001 |
> |
if (joinMe.status < 0) // already done |
1002 |
> |
return; |
1003 |
> |
if ((ws = pool.workers) == null || (n = ws.length) <= 1) |
1004 |
> |
return; // need at least 2 workers |
1005 |
> |
|
1006 |
> |
ForkJoinTask<?> task = joinMe; // base of chain |
1007 |
> |
ForkJoinWorkerThread thread = this; // thread with stolen task |
1008 |
> |
for (int d = 0; d < MAX_HELP_DEPTH; ++d) { // chain length |
1009 |
> |
// Try to find v, the stealer of task, by first using hint |
1010 |
> |
ForkJoinWorkerThread v = ws[thread.stealHint & (n - 1)]; |
1011 |
> |
if (v == null || v.currentSteal != task) { |
1012 |
> |
for (int j = 0; ; ++j) { // search array |
1013 |
> |
if (j < n) { |
1014 |
> |
ForkJoinTask<?> vs; |
1015 |
> |
if ((v = ws[j]) != null && v != this && |
1016 |
> |
(vs = v.currentSteal) != null) { |
1017 |
> |
if (joinMe.status < 0 || task.status < 0) |
1018 |
> |
return; // stale or done |
1019 |
> |
if (vs == task) { |
1020 |
> |
thread.stealHint = j; |
1021 |
> |
break; // save hint for next time |
1022 |
|
} |
943 |
– |
stolen = t; |
944 |
– |
v.base = b + 1; |
945 |
– |
t.tryExec(); |
946 |
– |
stolen = prevStolen; |
1023 |
|
} |
948 |
– |
if (joinMe.status < 0) |
949 |
– |
return; |
1024 |
|
} |
1025 |
< |
next = v.joining; |
1025 |
> |
else |
1026 |
> |
return; // no stealer |
1027 |
|
} |
1028 |
< |
if (target.status < 0) |
1029 |
< |
continue restart; // inconsistent |
1030 |
< |
if (joinMe.status < 0) |
1028 |
> |
} |
1029 |
> |
for (;;) { // Try to help v, using specialized form of deqTask |
1030 |
> |
if (joinMe.status < 0) |
1031 |
|
return; |
1032 |
+ |
int b = v.base; |
1033 |
+ |
ForkJoinTask<?>[] q = v.queue; |
1034 |
+ |
if (b == v.sp || q == null) |
1035 |
+ |
break; |
1036 |
+ |
int i = (q.length - 1) & b; |
1037 |
+ |
long u = (i << qShift) + qBase; |
1038 |
+ |
ForkJoinTask<?> t = q[i]; |
1039 |
+ |
int pid = poolIndex; |
1040 |
+ |
ForkJoinTask<?> ps = currentSteal; |
1041 |
+ |
if (task.status < 0) |
1042 |
+ |
return; // stale or done |
1043 |
+ |
if (t != null && v.base == b++ && |
1044 |
+ |
UNSAFE.compareAndSwapObject(q, u, t, null)) { |
1045 |
+ |
if (joinMe.status < 0) { |
1046 |
+ |
UNSAFE.putObjectVolatile(q, u, t); |
1047 |
+ |
return; // back out on cancel |
1048 |
+ |
} |
1049 |
+ |
v.base = b; |
1050 |
+ |
v.stealHint = pid; |
1051 |
+ |
UNSAFE.putOrderedObject(this, currentStealOffset, t); |
1052 |
+ |
t.quietlyExec(); |
1053 |
+ |
UNSAFE.putOrderedObject(this, currentStealOffset, ps); |
1054 |
+ |
} |
1055 |
|
} |
1056 |
< |
|
1057 |
< |
if (bottom != target) |
1058 |
< |
bottom = target; // recheck landing spot |
1059 |
< |
else if (p.tryAwaitJoin(joinMe) < 0) |
1060 |
< |
return; // successfully blocked |
1061 |
< |
Thread.yield(); // tame spin in case too many active |
1056 |
> |
// Try to descend to find v's stealer |
1057 |
> |
ForkJoinTask<?> next = v.currentJoin; |
1058 |
> |
if (task.status < 0 || next == null || next == task || |
1059 |
> |
joinMe.status < 0) |
1060 |
> |
return; |
1061 |
> |
task = next; |
1062 |
> |
thread = v; |
1063 |
|
} |
1064 |
|
} |
1065 |
|
|
1066 |
|
/** |
1067 |
+ |
* Implements ForkJoinTask.getSurplusQueuedTaskCount(). |
1068 |
|
* Returns an estimate of the number of tasks, offset by a |
1069 |
|
* function of number of idle workers. |
1070 |
|
* |
1116 |
|
} |
1117 |
|
|
1118 |
|
/** |
1019 |
– |
* Gets and removes a local task. |
1020 |
– |
* |
1021 |
– |
* @return a task, if available |
1022 |
– |
*/ |
1023 |
– |
final ForkJoinTask<?> pollLocalTask() { |
1024 |
– |
while (sp != base) { |
1025 |
– |
if (active || (active = pool.tryIncrementActiveCount())) |
1026 |
– |
return locallyFifo? locallyDeqTask() : popTask(); |
1027 |
– |
} |
1028 |
– |
return null; |
1029 |
– |
} |
1030 |
– |
|
1031 |
– |
/** |
1032 |
– |
* Gets and removes a local or stolen task. |
1033 |
– |
* |
1034 |
– |
* @return a task, if available |
1035 |
– |
*/ |
1036 |
– |
final ForkJoinTask<?> pollTask() { |
1037 |
– |
ForkJoinTask<?> t; |
1038 |
– |
return (t = pollLocalTask()) != null ? t : scan(); |
1039 |
– |
} |
1040 |
– |
|
1041 |
– |
/** |
1119 |
|
* Runs tasks until {@code pool.isQuiescent()}. |
1120 |
|
*/ |
1121 |
|
final void helpQuiescePool() { |
1122 |
+ |
ForkJoinTask<?> ps = currentSteal; // to restore below |
1123 |
|
for (;;) { |
1124 |
|
ForkJoinTask<?> t = pollLocalTask(); |
1125 |
< |
if (t != null || (t = scan()) != null) { |
1126 |
< |
t.tryExec(); |
1049 |
< |
stolen = null; |
1050 |
< |
} |
1125 |
> |
if (t != null || (t = scan()) != null) |
1126 |
> |
t.quietlyExec(); |
1127 |
|
else { |
1128 |
|
ForkJoinPool p = pool; |
1129 |
+ |
int a; // to inline CASes |
1130 |
|
if (active) { |
1131 |
+ |
if (!UNSAFE.compareAndSwapInt |
1132 |
+ |
(p, poolRunStateOffset, a = p.runState, a - 1)) |
1133 |
+ |
continue; // retry later |
1134 |
|
active = false; // inactivate |
1135 |
< |
do {} while (!p.tryDecrementActiveCount()); |
1135 |
> |
UNSAFE.putOrderedObject(this, currentStealOffset, ps); |
1136 |
|
} |
1137 |
|
if (p.isQuiescent()) { |
1138 |
|
active = true; // re-activate |
1139 |
< |
do {} while (!p.tryIncrementActiveCount()); |
1139 |
> |
do {} while (!UNSAFE.compareAndSwapInt |
1140 |
> |
(p, poolRunStateOffset, a = p.runState, a+1)); |
1141 |
|
return; |
1142 |
|
} |
1143 |
|
} |
1147 |
|
// Unsafe mechanics |
1148 |
|
|
1149 |
|
private static final sun.misc.Unsafe UNSAFE = getUnsafe(); |
1150 |
+ |
private static final long spOffset = |
1151 |
+ |
objectFieldOffset("sp", ForkJoinWorkerThread.class); |
1152 |
|
private static final long runStateOffset = |
1153 |
|
objectFieldOffset("runState", ForkJoinWorkerThread.class); |
1154 |
+ |
private static final long currentJoinOffset = |
1155 |
+ |
objectFieldOffset("currentJoin", ForkJoinWorkerThread.class); |
1156 |
+ |
private static final long currentStealOffset = |
1157 |
+ |
objectFieldOffset("currentSteal", ForkJoinWorkerThread.class); |
1158 |
|
private static final long qBase = |
1159 |
|
UNSAFE.arrayBaseOffset(ForkJoinTask[].class); |
1160 |
+ |
private static final long poolRunStateOffset = // to inline CAS |
1161 |
+ |
objectFieldOffset("runState", ForkJoinPool.class); |
1162 |
+ |
|
1163 |
|
private static final int qShift; |
1164 |
|
|
1165 |
|
static { |
1167 |
|
if ((s & (s-1)) != 0) |
1168 |
|
throw new Error("data type scale not a power of two"); |
1169 |
|
qShift = 31 - Integer.numberOfLeadingZeros(s); |
1170 |
+ |
MAXIMUM_QUEUE_CAPACITY = 1 << (31 - qShift); |
1171 |
|
} |
1172 |
|
|
1173 |
|
private static long objectFieldOffset(String field, Class<?> klazz) { |