155 |
|
private static final Random seedGenerator = new Random(); |
156 |
|
|
157 |
|
/** |
158 |
– |
* The timeout value for suspending spares. Spare workers that |
159 |
– |
* remain unsignalled for more than this time may be trimmed |
160 |
– |
* (killed and removed from pool). Since our goal is to avoid |
161 |
– |
* long-term thread buildup, the exact value of timeout does not |
162 |
– |
* matter too much so long as it avoids most false-alarm timeouts |
163 |
– |
* under GC stalls or momentarily high system load. |
164 |
– |
*/ |
165 |
– |
private static final long SPARE_KEEPALIVE_NANOS = |
166 |
– |
5L * 1000L * 1000L * 1000L; // 5 secs |
167 |
– |
|
168 |
– |
/** |
158 |
|
* The maximum stolen->joining link depth allowed in helpJoinTask. |
159 |
|
* Depths for legitimate chains are unbounded, but we use a fixed |
160 |
|
* constant to avoid (otherwise unchecked) cycles and bound |
164 |
|
private static final int MAX_HELP_DEPTH = 8; |
165 |
|
|
166 |
|
/** |
167 |
+ |
* The wakeup interval (in nanoseconds) for the first worker |
168 |
+ |
* suspended as spare. On each wakeup not signalled by a |
169 |
+ |
* resumption, it may ask the pool to reduce the number of spares. |
170 |
+ |
*/ |
171 |
+ |
private static final long TRIM_RATE_NANOS = 200L * 1000L * 1000L; |
172 |
+ |
|
173 |
+ |
/** |
174 |
|
* Capacity of work-stealing queue array upon initialization. |
175 |
|
* Must be a power of two. Initial size must be at least 4, but is |
176 |
|
* padded to minimize cache effects. |
234 |
|
private static final int TRIMMED = 0x08; // killed while suspended |
235 |
|
|
236 |
|
/** |
241 |
– |
* Number of LockSupport.park calls to block this thread for |
242 |
– |
* suspension or event waits. Used for internal instrumention; |
243 |
– |
* currently not exported but included because volatile write upon |
244 |
– |
* park also provides a workaround for a JVM bug. |
245 |
– |
*/ |
246 |
– |
volatile int parkCount; |
247 |
– |
|
248 |
– |
/** |
237 |
|
* Number of steals, transferred and reset in pool callbacks pool |
238 |
|
* when idle Accessed directly by pool. |
239 |
|
*/ |
277 |
|
volatile long nextWaiter; |
278 |
|
|
279 |
|
/** |
280 |
+ |
* Number of times this thread suspended as spare |
281 |
+ |
*/ |
282 |
+ |
int spareCount; |
283 |
+ |
|
284 |
+ |
/** |
285 |
+ |
* Encoded index and count of next spare waiter. Used only |
286 |
+ |
* by ForkJoinPool for managing spares. |
287 |
+ |
*/ |
288 |
+ |
volatile int nextSpare; |
289 |
+ |
|
290 |
+ |
/** |
291 |
|
* The task currently being joined, set only when actively trying |
292 |
|
* to helpStealer. Written only by current thread, but read by |
293 |
|
* others. |
310 |
|
protected ForkJoinWorkerThread(ForkJoinPool pool) { |
311 |
|
this.pool = pool; |
312 |
|
this.locallyFifo = pool.locallyFifo; |
313 |
+ |
setDaemon(true); |
314 |
|
// To avoid exposing construction details to subclasses, |
315 |
|
// remaining initialization is in start() and onStart() |
316 |
|
} |
320 |
|
*/ |
321 |
|
final void start(int poolIndex, UncaughtExceptionHandler ueh) { |
322 |
|
this.poolIndex = poolIndex; |
323 |
– |
setDaemon(true); |
323 |
|
if (ueh != null) |
324 |
|
setUncaughtExceptionHandler(ueh); |
325 |
|
start(); |
381 |
|
protected void onTermination(Throwable exception) { |
382 |
|
try { |
383 |
|
cancelTasks(); |
384 |
+ |
while (active) // force inactive |
385 |
+ |
active = !pool.tryDecrementActiveCount(); |
386 |
|
setTerminated(); |
387 |
|
pool.workerTerminated(this); |
388 |
|
} catch (Throwable ex) { // Shouldn't ever happen |
417 |
|
* Find and execute tasks and check status while running |
418 |
|
*/ |
419 |
|
private void mainLoop() { |
420 |
< |
int emptyScans = 0; // consecutive times failed to find work |
420 |
> |
int misses = 0; // track consecutive times failed to find work; max 2 |
421 |
|
ForkJoinPool p = pool; |
422 |
|
for (;;) { |
423 |
< |
p.preStep(this, emptyScans); |
423 |
> |
p.preStep(this, misses); |
424 |
|
if (runState != 0) |
425 |
< |
return; |
426 |
< |
ForkJoinTask<?> t; // try to get and run stolen or submitted task |
427 |
< |
if ((t = scan()) != null || (t = pollSubmission()) != null) { |
427 |
< |
t.tryExec(); |
428 |
< |
if (base != sp) |
429 |
< |
runLocalTasks(); |
430 |
< |
currentSteal = null; |
431 |
< |
emptyScans = 0; |
432 |
< |
} |
433 |
< |
else |
434 |
< |
++emptyScans; |
425 |
> |
break; |
426 |
> |
misses = ((tryExecSteal() || tryExecSubmission()) ? 0 : |
427 |
> |
(misses < 2 ? misses + 1 : 2)); |
428 |
|
} |
429 |
|
} |
430 |
|
|
431 |
|
/** |
432 |
< |
* Runs local tasks until queue is empty or shut down. Call only |
433 |
< |
* while active. |
432 |
> |
* Try to steal a task and execute it |
433 |
> |
* |
434 |
> |
* @return true if ran a task |
435 |
|
*/ |
436 |
< |
private void runLocalTasks() { |
437 |
< |
while (runState == 0) { |
438 |
< |
ForkJoinTask<?> t = locallyFifo? locallyDeqTask() : popTask(); |
439 |
< |
if (t != null) |
440 |
< |
t.tryExec(); |
441 |
< |
else if (base == sp) |
442 |
< |
break; |
436 |
> |
private boolean tryExecSteal() { |
437 |
> |
ForkJoinTask<?> t; |
438 |
> |
if ((t = scan()) != null) { |
439 |
> |
t.quietlyExec(); |
440 |
> |
currentSteal = null; |
441 |
> |
if (sp != base) |
442 |
> |
execLocalTasks(); |
443 |
> |
return true; |
444 |
|
} |
445 |
+ |
return false; |
446 |
|
} |
447 |
|
|
448 |
|
/** |
449 |
< |
* If a submission exists, try to activate and take it |
449 |
> |
* If a submission exists, try to activate and run it; |
450 |
|
* |
451 |
< |
* @return a task, if available |
451 |
> |
* @return true if ran a task |
452 |
|
*/ |
453 |
< |
private ForkJoinTask<?> pollSubmission() { |
453 |
> |
private boolean tryExecSubmission() { |
454 |
|
ForkJoinPool p = pool; |
455 |
|
while (p.hasQueuedSubmissions()) { |
456 |
+ |
ForkJoinTask<?> t; |
457 |
|
if (active || (active = p.tryIncrementActiveCount())) { |
458 |
< |
ForkJoinTask<?> t = p.pollSubmission(); |
462 |
< |
if (t != null) { |
458 |
> |
if ((t = p.pollSubmission()) != null) { |
459 |
|
currentSteal = t; |
460 |
< |
return t; |
460 |
> |
t.quietlyExec(); |
461 |
> |
currentSteal = null; |
462 |
> |
if (sp != base) |
463 |
> |
execLocalTasks(); |
464 |
> |
return true; |
465 |
|
} |
466 |
– |
return scan(); // if missed, rescan |
466 |
|
} |
467 |
|
} |
468 |
< |
return null; |
468 |
> |
return false; |
469 |
> |
} |
470 |
> |
|
471 |
> |
/** |
472 |
> |
* Runs local tasks until queue is empty or shut down. Call only |
473 |
> |
* while active. |
474 |
> |
*/ |
475 |
> |
private void execLocalTasks() { |
476 |
> |
while (runState == 0) { |
477 |
> |
ForkJoinTask<?> t = locallyFifo? locallyDeqTask() : popTask(); |
478 |
> |
if (t != null) |
479 |
> |
t.quietlyExec(); |
480 |
> |
else if (sp == base) |
481 |
> |
break; |
482 |
> |
} |
483 |
|
} |
484 |
|
|
485 |
|
/* |
547 |
|
ForkJoinTask<?> t; |
548 |
|
ForkJoinTask<?>[] q; |
549 |
|
int b, i; |
550 |
< |
if ((b = base) != sp && |
550 |
> |
if (sp != (b = base) && |
551 |
|
(q = queue) != null && // must read q after b |
552 |
|
(t = q[i = (q.length - 1) & b]) != null && base == b && |
553 |
|
UNSAFE.compareAndSwapObject(q, (i << qShift) + qBase, t, null)) { |
584 |
|
* Returns a popped task, or null if empty. Assumes active status. |
585 |
|
* Called only by current thread. |
586 |
|
*/ |
587 |
< |
final ForkJoinTask<?> popTask() { |
588 |
< |
int s; |
589 |
< |
ForkJoinTask<?>[] q; |
590 |
< |
if (base != (s = sp) && (q = queue) != null) { |
591 |
< |
int i = (q.length - 1) & --s; |
592 |
< |
ForkJoinTask<?> t = q[i]; |
593 |
< |
if (t != null && UNSAFE.compareAndSwapObject |
594 |
< |
(q, (i << qShift) + qBase, t, null)) { |
595 |
< |
sp = s; |
596 |
< |
return t; |
587 |
> |
private ForkJoinTask<?> popTask() { |
588 |
> |
ForkJoinTask<?>[] q = queue; |
589 |
> |
if (q != null) { |
590 |
> |
int s; |
591 |
> |
while ((s = sp) != base) { |
592 |
> |
int i = (q.length - 1) & --s; |
593 |
> |
long u = (i << qShift) + qBase; // raw offset |
594 |
> |
ForkJoinTask<?> t = q[i]; |
595 |
> |
if (t == null) // lost to stealer |
596 |
> |
break; |
597 |
> |
if (UNSAFE.compareAndSwapObject(q, u, t, null)) { |
598 |
> |
sp = s; // putOrderedInt may encourage more timely write |
599 |
> |
// UNSAFE.putOrderedInt(this, spOffset, s); |
600 |
> |
return t; |
601 |
> |
} |
602 |
|
} |
603 |
|
} |
604 |
|
return null; |
613 |
|
*/ |
614 |
|
final boolean unpushTask(ForkJoinTask<?> t) { |
615 |
|
int s; |
616 |
< |
ForkJoinTask<?>[] q; |
617 |
< |
if (base != (s = sp) && (q = queue) != null && |
616 |
> |
ForkJoinTask<?>[] q = queue; |
617 |
> |
if ((s = sp) != base && q != null && |
618 |
|
UNSAFE.compareAndSwapObject |
619 |
|
(q, (((q.length - 1) & --s) << qShift) + qBase, t, null)) { |
620 |
|
sp = s; |
621 |
+ |
// UNSAFE.putOrderedInt(this, spOffset, s); |
622 |
|
return true; |
623 |
|
} |
624 |
|
return false; |
707 |
|
ForkJoinWorkerThread v = ws[k & mask]; |
708 |
|
r ^= r << 13; r ^= r >>> 17; r ^= r << 5; // inline xorshift |
709 |
|
if (v != null && v.base != v.sp) { |
710 |
< |
if (canSteal || // ensure active status |
711 |
< |
(canSteal = active = p.tryIncrementActiveCount())) { |
712 |
< |
int b = v.base; // inline specialized deqTask |
713 |
< |
ForkJoinTask<?>[] q; |
714 |
< |
if (b != v.sp && (q = v.queue) != null) { |
715 |
< |
ForkJoinTask<?> t; |
716 |
< |
int i = (q.length - 1) & b; |
717 |
< |
long u = (i << qShift) + qBase; // raw offset |
718 |
< |
if ((t = q[i]) != null && v.base == b && |
719 |
< |
UNSAFE.compareAndSwapObject(q, u, t, null)) { |
720 |
< |
currentSteal = t; |
721 |
< |
v.stealHint = poolIndex; |
722 |
< |
v.base = b + 1; |
723 |
< |
seed = r; |
724 |
< |
++stealCount; |
725 |
< |
return t; |
707 |
< |
} |
710 |
> |
ForkJoinTask<?>[] q; int b; |
711 |
> |
if ((canSteal || // ensure active status |
712 |
> |
(canSteal = active = p.tryIncrementActiveCount())) && |
713 |
> |
(q = v.queue) != null && (b = v.base) != v.sp) { |
714 |
> |
int i = (q.length - 1) & b; |
715 |
> |
long u = (i << qShift) + qBase; // raw offset |
716 |
> |
ForkJoinTask<?> t = q[i]; |
717 |
> |
if (v.base == b && t != null && |
718 |
> |
UNSAFE.compareAndSwapObject(q, u, t, null)) { |
719 |
> |
int pid = poolIndex; |
720 |
> |
currentSteal = t; |
721 |
> |
v.stealHint = pid; |
722 |
> |
v.base = b + 1; |
723 |
> |
seed = r; |
724 |
> |
++stealCount; |
725 |
> |
return t; |
726 |
|
} |
727 |
|
} |
728 |
|
j = -n; |
742 |
|
// Run State management |
743 |
|
|
744 |
|
// status check methods used mainly by ForkJoinPool |
745 |
+ |
final boolean isRunning() { return runState == 0; } |
746 |
|
final boolean isTerminating() { return (runState & TERMINATING) != 0; } |
747 |
|
final boolean isTerminated() { return (runState & TERMINATED) != 0; } |
748 |
|
final boolean isSuspended() { return (runState & SUSPENDED) != 0; } |
749 |
|
final boolean isTrimmed() { return (runState & TRIMMED) != 0; } |
750 |
|
|
751 |
|
/** |
752 |
< |
* Sets state to TERMINATING, also resuming if suspended. |
752 |
> |
* Sets state to TERMINATING, also, unless "quiet", unparking if |
753 |
> |
* not already terminated |
754 |
> |
* |
755 |
> |
* @param quiet don't unpark (used for faster status updates on |
756 |
> |
* pool termination) |
757 |
|
*/ |
758 |
< |
final void shutdown() { |
758 |
> |
final void shutdown(boolean quiet) { |
759 |
|
for (;;) { |
760 |
|
int s = runState; |
761 |
+ |
if ((s & (TERMINATING|TERMINATED)) != 0) |
762 |
+ |
break; |
763 |
|
if ((s & SUSPENDED) != 0) { // kill and wakeup if suspended |
764 |
|
if (UNSAFE.compareAndSwapInt(this, runStateOffset, s, |
765 |
|
(s & ~SUSPENDED) | |
766 |
< |
(TRIMMED|TERMINATING))) { |
742 |
< |
LockSupport.unpark(this); |
766 |
> |
(TRIMMED|TERMINATING))) |
767 |
|
break; |
744 |
– |
} |
768 |
|
} |
769 |
|
else if (UNSAFE.compareAndSwapInt(this, runStateOffset, s, |
770 |
|
s | TERMINATING)) |
771 |
|
break; |
772 |
|
} |
773 |
+ |
if (!quiet && (runState & TERMINATED) != 0) |
774 |
+ |
LockSupport.unpark(this); |
775 |
|
} |
776 |
|
|
777 |
|
/** |
778 |
< |
* Sets state to TERMINATED. Called only by this thread. |
778 |
> |
* Sets state to TERMINATED. Called only by onTermination() |
779 |
|
*/ |
780 |
|
private void setTerminated() { |
781 |
|
int s; |
785 |
|
} |
786 |
|
|
787 |
|
/** |
763 |
– |
* Instrumented version of park used by ForkJoinPool.eventSync |
764 |
– |
*/ |
765 |
– |
final void doPark() { |
766 |
– |
++parkCount; |
767 |
– |
LockSupport.park(this); |
768 |
– |
} |
769 |
– |
|
770 |
– |
/** |
788 |
|
* If suspended, tries to set status to unsuspended and unparks. |
789 |
|
* |
790 |
|
* @return true if successful |
791 |
|
*/ |
792 |
< |
final boolean tryResumeSpare() { |
793 |
< |
int s = runState; |
794 |
< |
if ((s & SUSPENDED) != 0 && |
795 |
< |
UNSAFE.compareAndSwapInt(this, runStateOffset, s, |
796 |
< |
s & ~SUSPENDED)) { |
797 |
< |
LockSupport.unpark(this); |
781 |
< |
return true; |
792 |
> |
final boolean tryUnsuspend() { |
793 |
> |
int s; |
794 |
> |
while (((s = runState) & SUSPENDED) != 0) { |
795 |
> |
if (UNSAFE.compareAndSwapInt(this, runStateOffset, s, |
796 |
> |
s & ~SUSPENDED)) |
797 |
> |
return true; |
798 |
|
} |
799 |
|
return false; |
800 |
|
} |
801 |
|
|
802 |
|
/** |
803 |
< |
* Sets suspended status and blocks as spare until resumed, |
804 |
< |
* shutdown, or timed out. |
805 |
< |
* |
790 |
< |
* @return false if trimmed |
803 |
> |
* Sets suspended status and blocks as spare until resumed |
804 |
> |
* or shutdown. |
805 |
> |
* @returns true if still running on exit |
806 |
|
*/ |
807 |
|
final boolean suspendAsSpare() { |
808 |
< |
for (;;) { // set suspended unless terminating |
808 |
> |
lastEventCount = 0; // reset upon resume |
809 |
> |
for (;;) { // set suspended unless terminating |
810 |
|
int s = runState; |
811 |
|
if ((s & TERMINATING) != 0) { // must kill |
812 |
|
if (UNSAFE.compareAndSwapInt(this, runStateOffset, s, |
817 |
|
s | SUSPENDED)) |
818 |
|
break; |
819 |
|
} |
820 |
< |
int pc = pool.parallelism; |
821 |
< |
pool.accumulateStealCount(this); |
806 |
< |
boolean timed; |
807 |
< |
long nanos; |
808 |
< |
long startTime; |
809 |
< |
if (poolIndex < pc) { // untimed wait for core threads |
810 |
< |
timed = false; |
811 |
< |
nanos = 0L; |
812 |
< |
startTime = 0L; |
813 |
< |
} |
814 |
< |
else { // timed wait for added threads |
815 |
< |
timed = true; |
816 |
< |
nanos = SPARE_KEEPALIVE_NANOS; |
817 |
< |
startTime = System.nanoTime(); |
818 |
< |
} |
819 |
< |
lastEventCount = 0; // reset upon resume |
820 |
< |
interrupted(); // clear/ignore interrupts |
820 |
> |
ForkJoinPool p = pool; |
821 |
> |
p.pushSpare(this); |
822 |
|
while ((runState & SUSPENDED) != 0) { |
823 |
< |
++parkCount; |
824 |
< |
if (!timed) |
823 |
> |
if (!p.tryAccumulateStealCount(this)) |
824 |
> |
continue; |
825 |
> |
interrupted(); // clear/ignore interrupts |
826 |
> |
if ((runState & SUSPENDED) == 0) |
827 |
> |
break; |
828 |
> |
if (nextSpare != 0) // untimed |
829 |
|
LockSupport.park(this); |
830 |
< |
else if ((nanos -= (System.nanoTime() - startTime)) > 0) |
831 |
< |
LockSupport.parkNanos(this, nanos); |
832 |
< |
else { // try to trim on timeout |
833 |
< |
int s = runState; |
834 |
< |
if (UNSAFE.compareAndSwapInt(this, runStateOffset, s, |
835 |
< |
(s & ~SUSPENDED) | |
836 |
< |
(TRIMMED|TERMINATING))) |
837 |
< |
return false; |
830 |
> |
else { |
831 |
> |
long startTime = System.nanoTime(); |
832 |
> |
LockSupport.parkNanos(this, TRIM_RATE_NANOS); |
833 |
> |
if ((runState & SUSPENDED) == 0) |
834 |
> |
break; |
835 |
> |
long now = System.nanoTime(); |
836 |
> |
if (now - startTime >= TRIM_RATE_NANOS) |
837 |
> |
pool.tryTrimSpare(now); |
838 |
|
} |
839 |
|
} |
840 |
< |
return true; |
840 |
> |
return runState == 0; |
841 |
|
} |
842 |
|
|
843 |
|
// Misc support methods for ForkJoinPool |
847 |
|
* used by ForkJoinTask. |
848 |
|
*/ |
849 |
|
final int getQueueSize() { |
850 |
< |
return -base + sp; |
850 |
> |
int n; // external calls must read base first |
851 |
> |
return (n = -base + sp) <= 0 ? 0 : n; |
852 |
|
} |
853 |
|
|
854 |
|
/** |
856 |
|
* thread. |
857 |
|
*/ |
858 |
|
final void cancelTasks() { |
859 |
< |
ForkJoinTask<?> cj = currentJoin; // try to kill live tasks |
859 |
> |
ForkJoinTask<?> cj = currentJoin; // try to cancel ongoing tasks |
860 |
|
if (cj != null) { |
861 |
|
currentJoin = null; |
862 |
|
cj.cancelIgnoringExceptions(); |
863 |
+ |
try { |
864 |
+ |
this.interrupt(); // awaken wait |
865 |
+ |
} catch (SecurityException ignore) { |
866 |
+ |
} |
867 |
|
} |
868 |
|
ForkJoinTask<?> cs = currentSteal; |
869 |
|
if (cs != null) { |
918 |
|
ForkJoinTask<?> t = pollLocalTask(); |
919 |
|
if (t == null) { |
920 |
|
t = scan(); |
921 |
< |
currentSteal = null; // cannot retain/track |
921 |
> |
currentSteal = null; // cannot retain/track/help |
922 |
|
} |
923 |
|
return t; |
924 |
|
} |
925 |
|
|
926 |
|
/** |
927 |
|
* Possibly runs some tasks and/or blocks, until task is done. |
918 |
– |
* The main body is basically a big spinloop, alternating between |
919 |
– |
* calls to helpJoinTask and pool.tryAwaitJoin with increased |
920 |
– |
* patience parameters until either the task is done without |
921 |
– |
* waiting, or we have, if necessary, created or resumed a |
922 |
– |
* replacement for this thread while it blocks. |
928 |
|
* |
929 |
|
* @param joinMe the task to join |
925 |
– |
* @return task status on exit |
930 |
|
*/ |
931 |
< |
final int joinTask(ForkJoinTask<?> joinMe) { |
932 |
< |
int stat; |
931 |
> |
final void joinTask(ForkJoinTask<?> joinMe) { |
932 |
> |
// currentJoin only written by this thread; only need ordered store |
933 |
|
ForkJoinTask<?> prevJoin = currentJoin; |
930 |
– |
// Only written by this thread; only need ordered store |
934 |
|
UNSAFE.putOrderedObject(this, currentJoinOffset, joinMe); |
935 |
< |
if ((stat = joinMe.status) >= 0 && |
936 |
< |
(sp == base || (stat = localHelpJoinTask(joinMe)) >= 0)) { |
937 |
< |
for (int retries = 0; ; ++retries) { |
938 |
< |
helpJoinTask(joinMe, retries); |
936 |
< |
if ((stat = joinMe.status) < 0) |
937 |
< |
break; |
938 |
< |
pool.tryAwaitJoin(joinMe, retries); |
939 |
< |
if ((stat = joinMe.status) < 0) |
940 |
< |
break; |
941 |
< |
Thread.yield(); // tame unbounded loop |
942 |
< |
} |
943 |
< |
} |
935 |
> |
if (sp != base) |
936 |
> |
localHelpJoinTask(joinMe); |
937 |
> |
if (joinMe.status >= 0) |
938 |
> |
pool.awaitJoin(joinMe, this); |
939 |
|
UNSAFE.putOrderedObject(this, currentJoinOffset, prevJoin); |
945 |
– |
return stat; |
940 |
|
} |
941 |
|
|
942 |
|
/** |
943 |
|
* Run tasks in local queue until given task is done. |
944 |
|
* |
945 |
|
* @param joinMe the task to join |
952 |
– |
* @return task status on exit |
946 |
|
*/ |
947 |
< |
private int localHelpJoinTask(ForkJoinTask<?> joinMe) { |
948 |
< |
int stat, s; |
947 |
> |
private void localHelpJoinTask(ForkJoinTask<?> joinMe) { |
948 |
> |
int s; |
949 |
|
ForkJoinTask<?>[] q; |
950 |
< |
while ((stat = joinMe.status) >= 0 && |
958 |
< |
base != (s = sp) && (q = queue) != null) { |
959 |
< |
ForkJoinTask<?> t; |
950 |
> |
while (joinMe.status >= 0 && (s = sp) != base && (q = queue) != null) { |
951 |
|
int i = (q.length - 1) & --s; |
952 |
|
long u = (i << qShift) + qBase; // raw offset |
953 |
< |
if ((t = q[i]) != null && |
954 |
< |
UNSAFE.compareAndSwapObject(q, u, t, null)) { |
953 |
> |
ForkJoinTask<?> t = q[i]; |
954 |
> |
if (t == null) // lost to a stealer |
955 |
> |
break; |
956 |
> |
if (UNSAFE.compareAndSwapObject(q, u, t, null)) { |
957 |
|
/* |
958 |
|
* This recheck (and similarly in helpJoinTask) |
959 |
|
* handles cases where joinMe is independently |
961 |
|
* available. Back out of the pop by putting t back |
962 |
|
* into slot before we commit by writing sp. |
963 |
|
*/ |
964 |
< |
if ((stat = joinMe.status) < 0) { |
964 |
> |
if (joinMe.status < 0) { |
965 |
|
UNSAFE.putObjectVolatile(q, u, t); |
966 |
|
break; |
967 |
|
} |
968 |
|
sp = s; |
969 |
< |
t.tryExec(); |
969 |
> |
// UNSAFE.putOrderedInt(this, spOffset, s); |
970 |
> |
t.quietlyExec(); |
971 |
|
} |
972 |
|
} |
979 |
– |
return stat; |
973 |
|
} |
974 |
|
|
975 |
|
/** |
977 |
|
* given task, or in turn one of its stealers. Traces |
978 |
|
* currentSteal->currentJoin links looking for a thread working on |
979 |
|
* a descendant of the given task and with a non-empty queue to |
980 |
< |
* steal back and execute tasks from. Restarts search upon |
981 |
< |
* encountering chains that are stale, unknown, or of length |
982 |
< |
* greater than MAX_HELP_DEPTH links, to avoid unbounded cycles. |
983 |
< |
* |
984 |
< |
* The implementation is very branchy to cope with the restart |
985 |
< |
* cases. Returns void, not task status (which must be reread by |
986 |
< |
* caller anyway) to slightly simplify control paths. |
980 |
> |
* steal back and execute tasks from. |
981 |
> |
* |
982 |
> |
* The implementation is very branchy to cope with the potential |
983 |
> |
* inconsistencies or loops encountering chains that are stale, |
984 |
> |
* unknown, or of length greater than MAX_HELP_DEPTH links. All |
985 |
> |
* of these cases are dealt with by just returning back to the |
986 |
> |
* caller, who is expected to retry if other join mechanisms also |
987 |
> |
* don't work out. |
988 |
|
* |
989 |
|
* @param joinMe the task to join |
996 |
– |
* @param rescans the number of times to recheck for work |
990 |
|
*/ |
991 |
< |
private void helpJoinTask(ForkJoinTask<?> joinMe, int rescans) { |
991 |
> |
final void helpJoinTask(ForkJoinTask<?> joinMe) { |
992 |
|
ForkJoinWorkerThread[] ws = pool.workers; |
993 |
< |
int n; |
994 |
< |
if (ws == null || (n = ws.length) <= 1) |
1002 |
< |
return; // need at least 2 workers |
1003 |
< |
restart:while (rescans-- >= 0 && joinMe.status >= 0) { |
993 |
> |
int n; // need at least 2 workers |
994 |
> |
if (ws != null && (n = ws.length) > 1 && joinMe.status >= 0) { |
995 |
|
ForkJoinTask<?> task = joinMe; // base of chain |
996 |
|
ForkJoinWorkerThread thread = this; // thread with stolen task |
997 |
< |
for (int depth = 0; depth < MAX_HELP_DEPTH; ++depth) { |
997 |
> |
for (int d = 0; d < MAX_HELP_DEPTH; ++d) { // chain length |
998 |
|
// Try to find v, the stealer of task, by first using hint |
999 |
|
ForkJoinWorkerThread v = ws[thread.stealHint & (n - 1)]; |
1000 |
|
if (v == null || v.currentSteal != task) { |
1001 |
|
for (int j = 0; ; ++j) { // search array |
1002 |
< |
if (task.status < 0 || j == n) |
1003 |
< |
continue restart; // stale or no stealer |
1004 |
< |
if ((v = ws[j]) != null && v.currentSteal == task) { |
1005 |
< |
thread.stealHint = j; // save for next time |
1006 |
< |
break; |
1002 |
> |
if (j < n) { |
1003 |
> |
if ((v = ws[j]) != null) { |
1004 |
> |
if (task.status < 0) |
1005 |
> |
return; // stale or done |
1006 |
> |
if (v.currentSteal == task) { |
1007 |
> |
thread.stealHint = j; |
1008 |
> |
break; // save hint for next time |
1009 |
> |
} |
1010 |
> |
} |
1011 |
|
} |
1012 |
+ |
else |
1013 |
+ |
return; // no stealer |
1014 |
|
} |
1015 |
|
} |
1016 |
|
// Try to help v, using specialized form of deqTask |
1020 |
|
int i = (q.length - 1) & b; |
1021 |
|
long u = (i << qShift) + qBase; |
1022 |
|
ForkJoinTask<?> t = q[i]; |
1023 |
< |
if (task.status < 0) // stale |
1024 |
< |
continue restart; |
1025 |
< |
if (t != null) { |
1026 |
< |
if (v.base == b && |
1027 |
< |
UNSAFE.compareAndSwapObject(q, u, t, null)) { |
1023 |
> |
if (task.status < 0) |
1024 |
> |
return; // stale or done |
1025 |
> |
if (v.base == b) { |
1026 |
> |
if (t == null) |
1027 |
> |
return; // producer stalled |
1028 |
> |
if (UNSAFE.compareAndSwapObject(q, u, t, null)) { |
1029 |
|
if (joinMe.status < 0) { |
1030 |
|
UNSAFE.putObjectVolatile(q, u, t); |
1031 |
|
return; // back out on cancel |
1032 |
|
} |
1033 |
+ |
int pid = poolIndex; |
1034 |
|
ForkJoinTask<?> prevSteal = currentSteal; |
1035 |
|
currentSteal = t; |
1036 |
< |
v.stealHint = poolIndex; |
1036 |
> |
v.stealHint = pid; |
1037 |
|
v.base = b + 1; |
1038 |
< |
t.tryExec(); |
1038 |
> |
t.quietlyExec(); |
1039 |
|
currentSteal = prevSteal; |
1040 |
|
} |
1041 |
|
} |
1043 |
– |
else if (v.base == b) // producer stalled |
1044 |
– |
continue restart; // retry via restart |
1042 |
|
if (joinMe.status < 0) |
1043 |
|
return; |
1044 |
|
} |
1045 |
|
// Try to descend to find v's stealer |
1046 |
|
ForkJoinTask<?> next = v.currentJoin; |
1047 |
< |
if (next == null || next == task || task.status < 0) |
1048 |
< |
continue restart; // no descendent or stale |
1052 |
< |
if (joinMe.status < 0) |
1047 |
> |
if (task.status < 0 || next == null || next == task || |
1048 |
> |
joinMe.status < 0) |
1049 |
|
return; |
1050 |
|
task = next; |
1051 |
|
thread = v; |
1111 |
|
for (;;) { |
1112 |
|
ForkJoinTask<?> t = pollLocalTask(); |
1113 |
|
if (t != null || (t = scan()) != null) { |
1114 |
< |
t.tryExec(); |
1114 |
> |
t.quietlyExec(); |
1115 |
|
currentSteal = null; |
1116 |
|
} |
1117 |
|
else { |
1118 |
|
ForkJoinPool p = pool; |
1119 |
|
if (active) { |
1120 |
+ |
if (!p.tryDecrementActiveCount()) |
1121 |
+ |
continue; // retry later |
1122 |
|
active = false; // inactivate |
1125 |
– |
do {} while (!p.tryDecrementActiveCount()); |
1123 |
|
} |
1124 |
|
if (p.isQuiescent()) { |
1125 |
|
active = true; // re-activate |
1133 |
|
// Unsafe mechanics |
1134 |
|
|
1135 |
|
private static final sun.misc.Unsafe UNSAFE = getUnsafe(); |
1136 |
+ |
private static final long spOffset = |
1137 |
+ |
objectFieldOffset("sp", ForkJoinWorkerThread.class); |
1138 |
|
private static final long runStateOffset = |
1139 |
|
objectFieldOffset("runState", ForkJoinWorkerThread.class); |
1140 |
|
private static final long currentJoinOffset = |
1141 |
|
objectFieldOffset("currentJoin", ForkJoinWorkerThread.class); |
1142 |
+ |
private static final long currentStealOffset = |
1143 |
+ |
objectFieldOffset("currentSteal", ForkJoinWorkerThread.class); |
1144 |
|
private static final long qBase = |
1145 |
|
UNSAFE.arrayBaseOffset(ForkJoinTask[].class); |
1146 |
+ |
|
1147 |
|
private static final int qShift; |
1148 |
|
|
1149 |
|
static { |