6 |
|
|
7 |
|
package jsr166y; |
8 |
|
|
9 |
– |
import java.util.concurrent.*; |
9 |
|
import java.util.ArrayList; |
10 |
|
import java.util.Arrays; |
11 |
|
import java.util.Collection; |
12 |
|
import java.util.Collections; |
13 |
|
import java.util.List; |
14 |
+ |
import java.util.concurrent.AbstractExecutorService; |
15 |
+ |
import java.util.concurrent.Callable; |
16 |
+ |
import java.util.concurrent.ExecutorService; |
17 |
+ |
import java.util.concurrent.Future; |
18 |
+ |
import java.util.concurrent.RejectedExecutionException; |
19 |
+ |
import java.util.concurrent.RunnableFuture; |
20 |
+ |
import java.util.concurrent.TimeUnit; |
21 |
+ |
import java.util.concurrent.TimeoutException; |
22 |
+ |
import java.util.concurrent.atomic.AtomicInteger; |
23 |
|
import java.util.concurrent.locks.LockSupport; |
24 |
|
import java.util.concurrent.locks.ReentrantLock; |
17 |
– |
import java.util.concurrent.atomic.AtomicInteger; |
18 |
– |
import java.util.concurrent.CountDownLatch; |
25 |
|
|
26 |
|
/** |
27 |
|
* An {@link ExecutorService} for running {@link ForkJoinTask}s. |
435 |
|
|
436 |
|
/** |
437 |
|
* The wakeup interval (in nanoseconds) for the oldest worker |
438 |
< |
* waiting for an event invokes tryShutdownUnusedWorker to shrink |
439 |
< |
* the number of workers. The exact value does not matter too |
440 |
< |
* much, but should be long enough to slowly release resources |
441 |
< |
* during long periods without use without disrupting normal use. |
438 |
> |
* waiting for an event to invoke tryShutdownUnusedWorker to |
439 |
> |
* shrink the number of workers. The exact value does not matter |
440 |
> |
* too much. It must be short enough to release resources during |
441 |
> |
* sustained periods of idleness, but not so short that threads |
442 |
> |
* are continually re-created. |
443 |
|
*/ |
444 |
|
private static final long SHRINK_RATE_NANOS = |
445 |
|
30L * 1000L * 1000L * 1000L; // 2 per minute |
496 |
|
*/ |
497 |
|
private volatile long eventWaiters; |
498 |
|
|
499 |
< |
private static final int EVENT_COUNT_SHIFT = 32; |
500 |
< |
private static final long WAITER_ID_MASK = (1L << 16) - 1L; |
499 |
> |
private static final int EVENT_COUNT_SHIFT = 32; |
500 |
> |
private static final int WAITER_ID_MASK = (1 << 16) - 1; |
501 |
|
|
502 |
|
/** |
503 |
|
* A counter for events that may wake up worker threads: |
522 |
|
* Lifecycle control. The low word contains the number of workers |
523 |
|
* that are (probably) executing tasks. This value is atomically |
524 |
|
* incremented before a worker gets a task to run, and decremented |
525 |
< |
* when worker has no tasks and cannot find any. Bits 16-18 |
525 |
> |
* when a worker has no tasks and cannot find any. Bits 16-18 |
526 |
|
* contain runLevel value. When all are zero, the pool is |
527 |
|
* running. Level transitions are monotonic (running -> shutdown |
528 |
|
* -> terminating -> terminated) so each transition adds a bit. |
596 |
|
} |
597 |
|
|
598 |
|
/** |
599 |
+ |
* Tries to increment running count part of workerCounts |
600 |
+ |
*/ |
601 |
+ |
final boolean tryIncrementRunningCount() { |
602 |
+ |
int c; |
603 |
+ |
return UNSAFE.compareAndSwapInt(this, workerCountsOffset, |
604 |
+ |
c = workerCounts, |
605 |
+ |
c + ONE_RUNNING); |
606 |
+ |
} |
607 |
+ |
|
608 |
+ |
/** |
609 |
|
* Tries to decrement running count unless already zero |
610 |
|
*/ |
611 |
|
final boolean tryDecrementRunningCount() { |
621 |
|
* (rarely) necessary when other count updates lag. |
622 |
|
* |
623 |
|
* @param dr -- either zero or ONE_RUNNING |
624 |
< |
* @param dt == either zero or ONE_TOTAL |
624 |
> |
* @param dt -- either zero or ONE_TOTAL |
625 |
|
*/ |
626 |
|
private void decrementWorkerCounts(int dr, int dt) { |
627 |
|
for (;;) { |
679 |
|
for (k = 0; k < n && ws[k] != null; ++k) |
680 |
|
; |
681 |
|
if (k == n) |
682 |
< |
ws = Arrays.copyOf(ws, n << 1); |
682 |
> |
ws = workers = Arrays.copyOf(ws, n << 1); |
683 |
|
} |
684 |
|
ws[k] = w; |
685 |
< |
workers = ws; // volatile array write ensures slot visibility |
685 |
> |
int c = eventCount; // advance event count to ensure visibility |
686 |
> |
UNSAFE.compareAndSwapInt(this, eventCountOffset, c, c+1); |
687 |
|
} finally { |
688 |
|
lock.unlock(); |
689 |
|
} |
716 |
|
*/ |
717 |
|
final void workerTerminated(ForkJoinWorkerThread w) { |
718 |
|
forgetWorker(w); |
719 |
< |
decrementWorkerCounts(w.isTrimmed()? 0 : ONE_RUNNING, ONE_TOTAL); |
719 |
> |
decrementWorkerCounts(w.isTrimmed() ? 0 : ONE_RUNNING, ONE_TOTAL); |
720 |
|
while (w.stealCount != 0) // collect final count |
721 |
|
tryAccumulateStealCount(w); |
722 |
|
tryTerminate(false); |
737 |
|
int ec = eventCount; |
738 |
|
boolean releasedOne = false; |
739 |
|
ForkJoinWorkerThread w; int id; |
740 |
< |
while ((id = ((int)(h & WAITER_ID_MASK)) - 1) >= 0 && |
740 |
> |
while ((id = (((int)h) & WAITER_ID_MASK) - 1) >= 0 && |
741 |
|
(int)(h >>> EVENT_COUNT_SHIFT) != ec && |
742 |
|
id < n && (w = ws[id]) != null) { |
743 |
|
if (UNSAFE.compareAndSwapLong(this, eventWaitersOffset, |
775 |
|
long nh = (((long)ec) << EVENT_COUNT_SHIFT) | ((long)(w.poolIndex+1)); |
776 |
|
long h; |
777 |
|
while ((runState < SHUTDOWN || !tryTerminate(false)) && |
778 |
< |
(((int)((h = eventWaiters) & WAITER_ID_MASK)) == 0 || |
778 |
> |
(((int)(h = eventWaiters) & WAITER_ID_MASK) == 0 || |
779 |
|
(int)(h >>> EVENT_COUNT_SHIFT) == ec) && |
780 |
|
eventCount == ec) { |
781 |
|
if (UNSAFE.compareAndSwapLong(this, eventWaitersOffset, |
802 |
|
if (tryAccumulateStealCount(w)) { // transfer while idle |
803 |
|
boolean untimed = (w.nextWaiter != 0L || |
804 |
|
(workerCounts & RUNNING_COUNT_MASK) <= 1); |
805 |
< |
long startTime = untimed? 0 : System.nanoTime(); |
805 |
> |
long startTime = untimed ? 0 : System.nanoTime(); |
806 |
|
Thread.interrupted(); // clear/ignore interrupt |
807 |
< |
if (eventCount != ec || w.runState != 0 || |
808 |
< |
runState >= TERMINATING) // recheck after clear |
791 |
< |
break; |
807 |
> |
if (w.isTerminating() || eventCount != ec) |
808 |
> |
break; // recheck after clear |
809 |
|
if (untimed) |
810 |
|
LockSupport.park(w); |
811 |
|
else { |
812 |
|
LockSupport.parkNanos(w, SHRINK_RATE_NANOS); |
813 |
< |
if (eventCount != ec || w.runState != 0 || |
797 |
< |
runState >= TERMINATING) |
813 |
> |
if (eventCount != ec || w.isTerminating()) |
814 |
|
break; |
815 |
|
if (System.nanoTime() - startTime >= SHRINK_RATE_NANOS) |
816 |
|
tryShutdownUnusedWorker(ec); |
842 |
|
if ((sw = spareWaiters) != 0 && |
843 |
|
(id = (sw & SPARE_ID_MASK) - 1) >= 0 && |
844 |
|
id < n && (w = ws[id]) != null && |
845 |
< |
(workerCounts & RUNNING_COUNT_MASK) < parallelism && |
845 |
> |
(runState >= TERMINATING || |
846 |
> |
(workerCounts & RUNNING_COUNT_MASK) < parallelism) && |
847 |
|
spareWaiters == sw && |
848 |
|
UNSAFE.compareAndSwapInt(this, spareWaitersOffset, |
849 |
|
sw, w.nextSpare)) { |
879 |
|
UNSAFE.compareAndSwapInt(this, workerCountsOffset, wc, |
880 |
|
wc + (ONE_RUNNING|ONE_TOTAL))) { |
881 |
|
ForkJoinWorkerThread w = null; |
882 |
+ |
Throwable fail = null; |
883 |
|
try { |
884 |
|
w = factory.newThread(this); |
885 |
< |
} finally { // adjust on null or exceptional factory return |
886 |
< |
if (w == null) { |
869 |
< |
decrementWorkerCounts(ONE_RUNNING, ONE_TOTAL); |
870 |
< |
tryTerminate(false); // handle failure during shutdown |
871 |
< |
} |
885 |
> |
} catch (Throwable ex) { |
886 |
> |
fail = ex; |
887 |
|
} |
888 |
< |
if (w == null) |
888 |
> |
if (w == null) { // null or exceptional factory return |
889 |
> |
decrementWorkerCounts(ONE_RUNNING, ONE_TOTAL); |
890 |
> |
tryTerminate(false); // handle failure during shutdown |
891 |
> |
// If originating from an external caller, |
892 |
> |
// propagate exception, else ignore |
893 |
> |
if (fail != null && runState < TERMINATING && |
894 |
> |
!(Thread.currentThread() instanceof |
895 |
> |
ForkJoinWorkerThread)) |
896 |
> |
UNSAFE.throwException(fail); |
897 |
|
break; |
898 |
+ |
} |
899 |
|
w.start(recordWorker(w), ueh); |
900 |
< |
if ((workerCounts >>> TOTAL_COUNT_SHIFT) >= pc) { |
877 |
< |
int c; // advance event count |
878 |
< |
UNSAFE.compareAndSwapInt(this, eventCountOffset, |
879 |
< |
c = eventCount, c+1); |
900 |
> |
if ((workerCounts >>> TOTAL_COUNT_SHIFT) >= pc) |
901 |
|
break; // add at most one unless total below target |
881 |
– |
} |
902 |
|
} |
903 |
|
} |
904 |
|
if (eventWaiters != 0L) |
934 |
|
} |
935 |
|
else if ((h = eventWaiters) != 0L) { |
936 |
|
long nh; |
937 |
< |
int id = ((int)(h & WAITER_ID_MASK)) - 1; |
937 |
> |
int id = (((int)h) & WAITER_ID_MASK) - 1; |
938 |
|
if (id >= 0 && id < n && (w = ws[id]) != null && |
939 |
|
(nh = w.nextWaiter) != 0L && // keep at least one worker |
940 |
|
UNSAFE.compareAndSwapLong(this, eventWaitersOffset, h, nh)) |
980 |
|
boolean active = w.active; |
981 |
|
boolean inactivate = false; |
982 |
|
int pc = parallelism; |
983 |
< |
int rs; |
984 |
< |
while (w.runState == 0 && (rs = runState) < TERMINATING) { |
983 |
> |
while (w.runState == 0) { |
984 |
> |
int rs = runState; |
985 |
> |
if (rs >= TERMINATING) { // propagate shutdown |
986 |
> |
w.shutdown(); |
987 |
> |
break; |
988 |
> |
} |
989 |
|
if ((inactivate || (active && (rs & ACTIVE_COUNT_MASK) >= pc)) && |
990 |
< |
UNSAFE.compareAndSwapInt(this, runStateOffset, rs, rs - 1)) |
990 |
> |
UNSAFE.compareAndSwapInt(this, runStateOffset, rs, --rs)) { |
991 |
|
inactivate = active = w.active = false; |
992 |
< |
int wc = workerCounts; |
992 |
> |
if (rs == SHUTDOWN) { // all inactive and shut down |
993 |
> |
tryTerminate(false); |
994 |
> |
continue; |
995 |
> |
} |
996 |
> |
} |
997 |
> |
int wc = workerCounts; // try to suspend as spare |
998 |
|
if ((wc & RUNNING_COUNT_MASK) > pc) { |
999 |
|
if (!(inactivate |= active) && // must inactivate to suspend |
1000 |
< |
workerCounts == wc && // try to suspend as spare |
1000 |
> |
workerCounts == wc && |
1001 |
|
UNSAFE.compareAndSwapInt(this, workerCountsOffset, |
1002 |
|
wc, wc - ONE_RUNNING)) |
1003 |
|
w.suspendAsSpare(); |
1004 |
|
} |
1005 |
|
else if ((wc >>> TOTAL_COUNT_SHIFT) < pc) |
1006 |
|
helpMaintainParallelism(); // not enough workers |
1007 |
< |
else if (!ran) { |
1007 |
> |
else if (ran) |
1008 |
> |
break; |
1009 |
> |
else { |
1010 |
|
long h = eventWaiters; |
1011 |
|
int ec = eventCount; |
1012 |
|
if (h != 0L && (int)(h >>> EVENT_COUNT_SHIFT) != ec) |
1018 |
|
else if (!(inactivate |= active)) |
1019 |
|
eventSync(w, wec); // must inactivate before sync |
1020 |
|
} |
990 |
– |
else |
991 |
– |
break; |
1021 |
|
} |
1022 |
|
} |
1023 |
|
|
1027 |
|
* |
1028 |
|
* @param joinMe the task to join |
1029 |
|
* @param worker the current worker thread |
1030 |
+ |
* @param timed true if wait should time out |
1031 |
+ |
* @param nanos timeout value if timed |
1032 |
|
*/ |
1033 |
< |
final void awaitJoin(ForkJoinTask<?> joinMe, ForkJoinWorkerThread worker) { |
1033 |
> |
final void awaitJoin(ForkJoinTask<?> joinMe, ForkJoinWorkerThread worker, |
1034 |
> |
boolean timed, long nanos) { |
1035 |
> |
long startTime = timed? System.nanoTime() : 0L; |
1036 |
|
int retries = 2 + (parallelism >> 2); // #helpJoins before blocking |
1037 |
+ |
boolean running = true; // false when count decremented |
1038 |
|
while (joinMe.status >= 0) { |
1039 |
< |
int wc; |
1040 |
< |
worker.helpJoinTask(joinMe); |
1039 |
> |
if (runState >= TERMINATING) { |
1040 |
> |
joinMe.cancelIgnoringExceptions(); |
1041 |
> |
break; |
1042 |
> |
} |
1043 |
> |
running = worker.helpJoinTask(joinMe, running); |
1044 |
|
if (joinMe.status < 0) |
1045 |
|
break; |
1046 |
< |
else if (retries > 0) |
1046 |
> |
if (retries > 0) { |
1047 |
|
--retries; |
1048 |
< |
else if (((wc = workerCounts) & RUNNING_COUNT_MASK) != 0 && |
1049 |
< |
UNSAFE.compareAndSwapInt(this, workerCountsOffset, |
1050 |
< |
wc, wc - ONE_RUNNING)) { |
1051 |
< |
int stat, c; long h; |
1052 |
< |
while ((stat = joinMe.status) >= 0 && |
1053 |
< |
(h = eventWaiters) != 0L && // help release others |
1054 |
< |
(int)(h >>> EVENT_COUNT_SHIFT) != eventCount) |
1048 |
> |
continue; |
1049 |
> |
} |
1050 |
> |
int wc = workerCounts; |
1051 |
> |
if ((wc & RUNNING_COUNT_MASK) != 0) { |
1052 |
> |
if (running) { |
1053 |
> |
if (!UNSAFE.compareAndSwapInt(this, workerCountsOffset, |
1054 |
> |
wc, wc - ONE_RUNNING)) |
1055 |
> |
continue; |
1056 |
> |
running = false; |
1057 |
> |
} |
1058 |
> |
long h = eventWaiters; |
1059 |
> |
if (h != 0L && (int)(h >>> EVENT_COUNT_SHIFT) != eventCount) |
1060 |
|
releaseEventWaiters(); |
1061 |
< |
if (stat >= 0 && |
1062 |
< |
((workerCounts & RUNNING_COUNT_MASK) == 0 || |
1063 |
< |
(stat = |
1064 |
< |
joinMe.internalAwaitDone(JOIN_TIMEOUT_MILLIS)) >= 0)) |
1065 |
< |
helpMaintainParallelism(); // timeout or no running workers |
1066 |
< |
do {} while (!UNSAFE.compareAndSwapInt |
1067 |
< |
(this, workerCountsOffset, |
1068 |
< |
c = workerCounts, c + ONE_RUNNING)); |
1069 |
< |
if (stat < 0) |
1070 |
< |
break; // else restart |
1061 |
> |
if (joinMe.status < 0) |
1062 |
> |
break; |
1063 |
> |
if ((workerCounts & RUNNING_COUNT_MASK) != 0) { |
1064 |
> |
long ms; int ns; |
1065 |
> |
if (!timed) { |
1066 |
> |
ms = JOIN_TIMEOUT_MILLIS; |
1067 |
> |
ns = 0; |
1068 |
> |
} |
1069 |
> |
else { // at most JOIN_TIMEOUT_MILLIS per wait |
1070 |
> |
long nt = nanos - (System.nanoTime() - startTime); |
1071 |
> |
if (nt <= 0L) |
1072 |
> |
break; |
1073 |
> |
ms = nt / 1000000; |
1074 |
> |
if (ms > JOIN_TIMEOUT_MILLIS) { |
1075 |
> |
ms = JOIN_TIMEOUT_MILLIS; |
1076 |
> |
ns = 0; |
1077 |
> |
} |
1078 |
> |
else |
1079 |
> |
ns = (int) (nt % 1000000); |
1080 |
> |
} |
1081 |
> |
if (joinMe.internalAwaitDone(ms, ns) < 0) |
1082 |
> |
break; |
1083 |
> |
} |
1084 |
|
} |
1085 |
+ |
helpMaintainParallelism(); |
1086 |
+ |
} |
1087 |
+ |
if (!running) { |
1088 |
+ |
int c; |
1089 |
+ |
do {} while (!UNSAFE.compareAndSwapInt |
1090 |
+ |
(this, workerCountsOffset, |
1091 |
+ |
c = workerCounts, c + ONE_RUNNING)); |
1092 |
|
} |
1093 |
|
} |
1094 |
|
|
1099 |
|
throws InterruptedException { |
1100 |
|
while (!blocker.isReleasable()) { |
1101 |
|
int wc = workerCounts; |
1102 |
< |
if ((wc & RUNNING_COUNT_MASK) != 0 && |
1103 |
< |
UNSAFE.compareAndSwapInt(this, workerCountsOffset, |
1104 |
< |
wc, wc - ONE_RUNNING)) { |
1102 |
> |
if ((wc & RUNNING_COUNT_MASK) == 0) |
1103 |
> |
helpMaintainParallelism(); |
1104 |
> |
else if (UNSAFE.compareAndSwapInt(this, workerCountsOffset, |
1105 |
> |
wc, wc - ONE_RUNNING)) { |
1106 |
|
try { |
1107 |
|
while (!blocker.isReleasable()) { |
1108 |
|
long h = eventWaiters; |
1147 |
|
// Finish now if all threads terminated; else in some subsequent call |
1148 |
|
if ((workerCounts >>> TOTAL_COUNT_SHIFT) == 0) { |
1149 |
|
advanceRunLevel(TERMINATED); |
1150 |
< |
termination.arrive(); |
1150 |
> |
termination.forceTermination(); |
1151 |
|
} |
1152 |
|
return true; |
1153 |
|
} |
1175 |
|
if (passes > 0 && !w.isTerminated()) { |
1176 |
|
w.cancelTasks(); |
1177 |
|
LockSupport.unpark(w); |
1178 |
< |
if (passes > 1) { |
1178 |
> |
if (passes > 1 && !w.isInterrupted()) { |
1179 |
|
try { |
1180 |
|
w.interrupt(); |
1181 |
|
} catch (SecurityException ignore) { |
1342 |
|
// Execution methods |
1343 |
|
|
1344 |
|
/** |
1345 |
< |
* Common code for execute, invoke and submit |
1345 |
> |
* Submits task and creates, starts, or resumes some workers if necessary |
1346 |
|
*/ |
1347 |
|
private <T> void doSubmit(ForkJoinTask<T> task) { |
1285 |
– |
if (task == null) |
1286 |
– |
throw new NullPointerException(); |
1287 |
– |
if (runState >= SHUTDOWN) |
1288 |
– |
throw new RejectedExecutionException(); |
1348 |
|
submissionQueue.offer(task); |
1349 |
|
int c; // try to increment event count -- CAS failure OK |
1350 |
|
UNSAFE.compareAndSwapInt(this, eventCountOffset, c = eventCount, c+1); |
1351 |
< |
helpMaintainParallelism(); // create, start, or resume some workers |
1351 |
> |
helpMaintainParallelism(); |
1352 |
|
} |
1353 |
|
|
1354 |
|
/** |
1361 |
|
* scheduled for execution |
1362 |
|
*/ |
1363 |
|
public <T> T invoke(ForkJoinTask<T> task) { |
1364 |
< |
doSubmit(task); |
1365 |
< |
return task.join(); |
1364 |
> |
if (task == null) |
1365 |
> |
throw new NullPointerException(); |
1366 |
> |
if (runState >= SHUTDOWN) |
1367 |
> |
throw new RejectedExecutionException(); |
1368 |
> |
Thread t = Thread.currentThread(); |
1369 |
> |
if ((t instanceof ForkJoinWorkerThread) && |
1370 |
> |
((ForkJoinWorkerThread)t).pool == this) |
1371 |
> |
return task.invoke(); // bypass submit if in same pool |
1372 |
> |
else { |
1373 |
> |
doSubmit(task); |
1374 |
> |
return task.join(); |
1375 |
> |
} |
1376 |
> |
} |
1377 |
> |
|
1378 |
> |
/** |
1379 |
> |
* Unless terminating, forks task if within an ongoing FJ |
1380 |
> |
* computation in the current pool, else submits as external task. |
1381 |
> |
*/ |
1382 |
> |
private <T> void forkOrSubmit(ForkJoinTask<T> task) { |
1383 |
> |
if (runState >= SHUTDOWN) |
1384 |
> |
throw new RejectedExecutionException(); |
1385 |
> |
Thread t = Thread.currentThread(); |
1386 |
> |
if ((t instanceof ForkJoinWorkerThread) && |
1387 |
> |
((ForkJoinWorkerThread)t).pool == this) |
1388 |
> |
task.fork(); |
1389 |
> |
else |
1390 |
> |
doSubmit(task); |
1391 |
|
} |
1392 |
|
|
1393 |
|
/** |
1399 |
|
* scheduled for execution |
1400 |
|
*/ |
1401 |
|
public void execute(ForkJoinTask<?> task) { |
1402 |
< |
doSubmit(task); |
1402 |
> |
if (task == null) |
1403 |
> |
throw new NullPointerException(); |
1404 |
> |
forkOrSubmit(task); |
1405 |
|
} |
1406 |
|
|
1407 |
|
// AbstractExecutorService methods |
1412 |
|
* scheduled for execution |
1413 |
|
*/ |
1414 |
|
public void execute(Runnable task) { |
1415 |
+ |
if (task == null) |
1416 |
+ |
throw new NullPointerException(); |
1417 |
|
ForkJoinTask<?> job; |
1418 |
|
if (task instanceof ForkJoinTask<?>) // avoid re-wrap |
1419 |
|
job = (ForkJoinTask<?>) task; |
1420 |
|
else |
1421 |
|
job = ForkJoinTask.adapt(task, null); |
1422 |
< |
doSubmit(job); |
1422 |
> |
forkOrSubmit(job); |
1423 |
|
} |
1424 |
|
|
1425 |
|
/** |
1432 |
|
* scheduled for execution |
1433 |
|
*/ |
1434 |
|
public <T> ForkJoinTask<T> submit(ForkJoinTask<T> task) { |
1435 |
< |
doSubmit(task); |
1435 |
> |
if (task == null) |
1436 |
> |
throw new NullPointerException(); |
1437 |
> |
forkOrSubmit(task); |
1438 |
|
return task; |
1439 |
|
} |
1440 |
|
|
1444 |
|
* scheduled for execution |
1445 |
|
*/ |
1446 |
|
public <T> ForkJoinTask<T> submit(Callable<T> task) { |
1447 |
+ |
if (task == null) |
1448 |
+ |
throw new NullPointerException(); |
1449 |
|
ForkJoinTask<T> job = ForkJoinTask.adapt(task); |
1450 |
< |
doSubmit(job); |
1450 |
> |
forkOrSubmit(job); |
1451 |
|
return job; |
1452 |
|
} |
1453 |
|
|
1457 |
|
* scheduled for execution |
1458 |
|
*/ |
1459 |
|
public <T> ForkJoinTask<T> submit(Runnable task, T result) { |
1460 |
+ |
if (task == null) |
1461 |
+ |
throw new NullPointerException(); |
1462 |
|
ForkJoinTask<T> job = ForkJoinTask.adapt(task, result); |
1463 |
< |
doSubmit(job); |
1463 |
> |
forkOrSubmit(job); |
1464 |
|
return job; |
1465 |
|
} |
1466 |
|
|
1470 |
|
* scheduled for execution |
1471 |
|
*/ |
1472 |
|
public ForkJoinTask<?> submit(Runnable task) { |
1473 |
+ |
if (task == null) |
1474 |
+ |
throw new NullPointerException(); |
1475 |
|
ForkJoinTask<?> job; |
1476 |
|
if (task instanceof ForkJoinTask<?>) // avoid re-wrap |
1477 |
|
job = (ForkJoinTask<?>) task; |
1478 |
|
else |
1479 |
|
job = ForkJoinTask.adapt(task, null); |
1480 |
< |
doSubmit(job); |
1480 |
> |
forkOrSubmit(job); |
1481 |
|
return job; |
1482 |
|
} |
1483 |
|
|
1537 |
|
|
1538 |
|
/** |
1539 |
|
* Returns the number of worker threads that have started but not |
1540 |
< |
* yet terminated. This result returned by this method may differ |
1540 |
> |
* yet terminated. The result returned by this method may differ |
1541 |
|
* from {@link #getParallelism} when threads are created to |
1542 |
|
* maintain parallelism when others are cooperatively blocked. |
1543 |
|
* |
1785 |
|
} |
1786 |
|
|
1787 |
|
/** |
1788 |
+ |
* Returns true if terminating or terminated. Used by ForkJoinWorkerThread. |
1789 |
+ |
*/ |
1790 |
+ |
final boolean isAtLeastTerminating() { |
1791 |
+ |
return runState >= TERMINATING; |
1792 |
+ |
} |
1793 |
+ |
|
1794 |
+ |
/** |
1795 |
|
* Returns {@code true} if this pool has been shut down. |
1796 |
|
* |
1797 |
|
* @return {@code true} if this pool has been shut down |
1814 |
|
public boolean awaitTermination(long timeout, TimeUnit unit) |
1815 |
|
throws InterruptedException { |
1816 |
|
try { |
1817 |
< |
return termination.awaitAdvanceInterruptibly(0, timeout, unit) > 0; |
1817 |
> |
termination.awaitAdvanceInterruptibly(0, timeout, unit); |
1818 |
|
} catch (TimeoutException ex) { |
1819 |
|
return false; |
1820 |
|
} |
1821 |
+ |
return true; |
1822 |
|
} |
1823 |
|
|
1824 |
|
/** |