5 |
|
*/ |
6 |
|
|
7 |
|
package jsr166e; |
8 |
+ |
|
9 |
|
import java.util.ArrayList; |
10 |
|
import java.util.Arrays; |
11 |
|
import java.util.Collection; |
18 |
|
import java.util.concurrent.Future; |
19 |
|
import java.util.concurrent.RejectedExecutionException; |
20 |
|
import java.util.concurrent.RunnableFuture; |
21 |
+ |
import java.util.concurrent.ThreadLocalRandom; |
22 |
|
import java.util.concurrent.TimeUnit; |
23 |
|
import java.util.concurrent.atomic.AtomicInteger; |
24 |
|
import java.util.concurrent.atomic.AtomicLong; |
43 |
|
* ForkJoinPool}s may also be appropriate for use with event-style |
44 |
|
* tasks that are never joined. |
45 |
|
* |
46 |
< |
* <p>A {@code ForkJoinPool} is constructed with a given target |
47 |
< |
* parallelism level; by default, equal to the number of available |
48 |
< |
* processors. The pool attempts to maintain enough active (or |
49 |
< |
* available) threads by dynamically adding, suspending, or resuming |
50 |
< |
* internal worker threads, even if some tasks are stalled waiting to |
51 |
< |
* join others. However, no such adjustments are guaranteed in the |
52 |
< |
* face of blocked IO or other unmanaged synchronization. The nested |
53 |
< |
* {@link ManagedBlocker} interface enables extension of the kinds of |
46 |
> |
* <p>A static {@link #commonPool} is available and appropriate for |
47 |
> |
* most applications. The common pool is used by any ForkJoinTask that |
48 |
> |
* is not explicitly submitted to a specified pool. Using the common |
49 |
> |
* pool normally reduces resource usage (its threads are slowly |
50 |
> |
* reclaimed during periods of non-use, and reinstated upon subsequent |
51 |
> |
* use). The common pool is by default constructed with default |
52 |
> |
* parameters, but these may be controlled by setting any or all of |
53 |
> |
* the three properties {@code |
54 |
> |
* java.util.concurrent.ForkJoinPool.common.{parallelism, |
55 |
> |
* threadFactory, exceptionHandler}}. |
56 |
> |
* |
57 |
> |
* <p>For applications that require separate or custom pools, a {@code |
58 |
> |
* ForkJoinPool} may be constructed with a given target parallelism |
59 |
> |
* level; by default, equal to the number of available processors. The |
60 |
> |
* pool attempts to maintain enough active (or available) threads by |
61 |
> |
* dynamically adding, suspending, or resuming internal worker |
62 |
> |
* threads, even if some tasks are stalled waiting to join |
63 |
> |
* others. However, no such adjustments are guaranteed in the face of |
64 |
> |
* blocked IO or other unmanaged synchronization. The nested {@link |
65 |
> |
* ManagedBlocker} interface enables extension of the kinds of |
66 |
|
* synchronization accommodated. |
67 |
|
* |
68 |
|
* <p>In addition to execution and lifecycle control methods, this |
107 |
|
* </tr> |
108 |
|
* </table> |
109 |
|
* |
96 |
– |
* <p><b>Sample Usage.</b> Normally a single {@code ForkJoinPool} is |
97 |
– |
* used for all parallel task execution in a program or subsystem. |
98 |
– |
* Otherwise, use would not usually outweigh the construction and |
99 |
– |
* bookkeeping overhead of creating a large set of threads. For |
100 |
– |
* example, a common pool could be used for the {@code SortTasks} |
101 |
– |
* illustrated in {@link RecursiveAction}. Because {@code |
102 |
– |
* ForkJoinPool} uses threads in {@linkplain java.lang.Thread#isDaemon |
103 |
– |
* daemon} mode, there is typically no need to explicitly {@link |
104 |
– |
* #shutdown} such a pool upon program exit. |
105 |
– |
* |
106 |
– |
* <pre> {@code |
107 |
– |
* static final ForkJoinPool mainPool = new ForkJoinPool(); |
108 |
– |
* ... |
109 |
– |
* public void sort(long[] array) { |
110 |
– |
* mainPool.invoke(new SortTask(array, 0, array.length)); |
111 |
– |
* }}</pre> |
112 |
– |
* |
110 |
|
* <p><b>Implementation notes</b>: This implementation restricts the |
111 |
|
* maximum number of running threads to 32767. Attempts to create |
112 |
|
* pools with greater than the maximum number result in |
236 |
|
* when locked remains available to check consistency. |
237 |
|
* |
238 |
|
* Recording WorkQueues. WorkQueues are recorded in the |
239 |
< |
* "workQueues" array that is created upon pool construction and |
240 |
< |
* expanded if necessary. Updates to the array while recording |
241 |
< |
* new workers and unrecording terminated ones are protected from |
242 |
< |
* each other by a lock but the array is otherwise concurrently |
243 |
< |
* readable, and accessed directly. To simplify index-based |
244 |
< |
* operations, the array size is always a power of two, and all |
245 |
< |
* readers must tolerate null slots. Shared (submission) queues |
246 |
< |
* are at even indices, worker queues at odd indices. Grouping |
247 |
< |
* them together in this way simplifies and speeds up task |
251 |
< |
* scanning. |
239 |
> |
* "workQueues" array that is created upon first use and expanded |
240 |
> |
* if necessary. Updates to the array while recording new workers |
241 |
> |
* and unrecording terminated ones are protected from each other |
242 |
> |
* by a lock but the array is otherwise concurrently readable, and |
243 |
> |
* accessed directly. To simplify index-based operations, the |
244 |
> |
* array size is always a power of two, and all readers must |
245 |
> |
* tolerate null slots. Shared (submission) queues are at even |
246 |
> |
* indices, worker queues at odd indices. Grouping them together |
247 |
> |
* in this way simplifies and speeds up task scanning. |
248 |
|
* |
249 |
|
* All worker thread creation is on-demand, triggered by task |
250 |
|
* submissions, replacement of terminated workers, and/or |
316 |
|
* |
317 |
|
* Trimming workers. To release resources after periods of lack of |
318 |
|
* use, a worker starting to wait when the pool is quiescent will |
319 |
< |
* time out and terminate if the pool has remained quiescent for |
320 |
< |
* SHRINK_RATE nanosecs. This will slowly propagate, eventually |
321 |
< |
* terminating all workers after long periods of non-use. |
319 |
> |
* time out and terminate if the pool has remained quiescent for a |
320 |
> |
* given period -- a short period if there are more threads than |
321 |
> |
* parallelism, longer as the number of threads decreases. This |
322 |
> |
* will slowly propagate, eventually terminating all workers after |
323 |
> |
* periods of non-use. |
324 |
|
* |
325 |
|
* Shutdown and Termination. A call to shutdownNow atomically sets |
326 |
|
* a runState bit and then (non-atomically) sets each worker's |
502 |
|
} |
503 |
|
|
504 |
|
/** |
507 |
– |
* A simple non-reentrant lock used for exclusion when managing |
508 |
– |
* queues and workers. We use a custom lock so that we can readily |
509 |
– |
* probe lock state in constructions that check among alternative |
510 |
– |
* actions. The lock is normally only very briefly held, and |
511 |
– |
* sometimes treated as a spinlock, but other usages block to |
512 |
– |
* reduce overall contention in those cases where locked code |
513 |
– |
* bodies perform allocation/resizing. |
514 |
– |
*/ |
515 |
– |
static final class Mutex extends AbstractQueuedSynchronizer { |
516 |
– |
public final boolean tryAcquire(int ignore) { |
517 |
– |
return compareAndSetState(0, 1); |
518 |
– |
} |
519 |
– |
public final boolean tryRelease(int ignore) { |
520 |
– |
setState(0); |
521 |
– |
return true; |
522 |
– |
} |
523 |
– |
public final void lock() { acquire(0); } |
524 |
– |
public final void unlock() { release(0); } |
525 |
– |
public final boolean isHeldExclusively() { return getState() == 1; } |
526 |
– |
public final Condition newCondition() { return new ConditionObject(); } |
527 |
– |
} |
528 |
– |
|
529 |
– |
/** |
505 |
|
* Class for artificial tasks that are used to replace the target |
506 |
|
* of local joins if they are removed from an interior queue slot |
507 |
|
* in WorkQueue.tryRemoveAndExec. We don't need the proxy to |
692 |
|
|
693 |
|
/** |
694 |
|
* Takes next task, if one exists, in LIFO order. Call only |
695 |
< |
* by owner in unshared queues. (We do not have a shared |
721 |
< |
* version of this method because it is never needed.) |
695 |
> |
* by owner in unshared queues. |
696 |
|
*/ |
697 |
|
final ForkJoinTask<?> pop() { |
698 |
|
ForkJoinTask<?>[] a; ForkJoinTask<?> t; int m; |
710 |
|
return null; |
711 |
|
} |
712 |
|
|
713 |
+ |
final ForkJoinTask<?> sharedPop() { |
714 |
+ |
ForkJoinTask<?> task = null; |
715 |
+ |
if (runState == 0 && U.compareAndSwapInt(this, RUNSTATE, 0, 1)) { |
716 |
+ |
try { |
717 |
+ |
ForkJoinTask<?>[] a; int m; |
718 |
+ |
if ((a = array) != null && (m = a.length - 1) >= 0) { |
719 |
+ |
for (int s; (s = top - 1) - base >= 0;) { |
720 |
+ |
long j = ((m & s) << ASHIFT) + ABASE; |
721 |
+ |
ForkJoinTask<?> t = |
722 |
+ |
(ForkJoinTask<?>)U.getObject(a, j); |
723 |
+ |
if (t == null) |
724 |
+ |
break; |
725 |
+ |
if (U.compareAndSwapObject(a, j, t, null)) { |
726 |
+ |
top = s; |
727 |
+ |
task = t; |
728 |
+ |
break; |
729 |
+ |
} |
730 |
+ |
} |
731 |
+ |
} |
732 |
+ |
} finally { |
733 |
+ |
runState = 0; |
734 |
+ |
} |
735 |
+ |
} |
736 |
+ |
return task; |
737 |
+ |
} |
738 |
+ |
|
739 |
+ |
/** |
740 |
+ |
* Version of pop that takes top element only if it |
741 |
+ |
* its root is the given CountedCompleter. |
742 |
+ |
*/ |
743 |
+ |
final ForkJoinTask<?> popCC(CountedCompleter<?> root) { |
744 |
+ |
ForkJoinTask<?>[] a; int m; |
745 |
+ |
if (root != null && (a = array) != null && (m = a.length - 1) >= 0) { |
746 |
+ |
for (int s; (s = top - 1) - base >= 0;) { |
747 |
+ |
long j = ((m & s) << ASHIFT) + ABASE; |
748 |
+ |
ForkJoinTask<?> t = |
749 |
+ |
(ForkJoinTask<?>)U.getObject(a, j); |
750 |
+ |
if (t == null || !(t instanceof CountedCompleter) || |
751 |
+ |
((CountedCompleter<?>)t).getRoot() != root) |
752 |
+ |
break; |
753 |
+ |
if (U.compareAndSwapObject(a, j, t, null)) { |
754 |
+ |
top = s; |
755 |
+ |
return t; |
756 |
+ |
} |
757 |
+ |
if (root.status < 0) |
758 |
+ |
break; |
759 |
+ |
} |
760 |
+ |
} |
761 |
+ |
return null; |
762 |
+ |
} |
763 |
+ |
|
764 |
+ |
/** |
765 |
+ |
* Shared version of popCC |
766 |
+ |
*/ |
767 |
+ |
final ForkJoinTask<?> sharedPopCC(CountedCompleter<?> root) { |
768 |
+ |
ForkJoinTask<?> task = null; |
769 |
+ |
if (root != null && |
770 |
+ |
runState == 0 && U.compareAndSwapInt(this, RUNSTATE, 0, 1)) { |
771 |
+ |
try { |
772 |
+ |
ForkJoinTask<?>[] a; int m; |
773 |
+ |
if ((a = array) != null && (m = a.length - 1) >= 0) { |
774 |
+ |
for (int s; (s = top - 1) - base >= 0;) { |
775 |
+ |
long j = ((m & s) << ASHIFT) + ABASE; |
776 |
+ |
ForkJoinTask<?> t = |
777 |
+ |
(ForkJoinTask<?>)U.getObject(a, j); |
778 |
+ |
if (t == null || !(t instanceof CountedCompleter) || |
779 |
+ |
((CountedCompleter<?>)t).getRoot() != root) |
780 |
+ |
break; |
781 |
+ |
if (U.compareAndSwapObject(a, j, t, null)) { |
782 |
+ |
top = s; |
783 |
+ |
task = t; |
784 |
+ |
break; |
785 |
+ |
} |
786 |
+ |
if (root.status < 0) |
787 |
+ |
break; |
788 |
+ |
} |
789 |
+ |
} |
790 |
+ |
} finally { |
791 |
+ |
runState = 0; |
792 |
+ |
} |
793 |
+ |
} |
794 |
+ |
return task; |
795 |
+ |
} |
796 |
+ |
|
797 |
|
/** |
798 |
|
* Takes a task in FIFO order if b is base of queue and a task |
799 |
|
* can be claimed without contention. Specialized versions |
871 |
|
} |
872 |
|
|
873 |
|
/** |
874 |
+ |
* Version of tryUnpush for shared queues; called by non-FJ |
875 |
+ |
* submitters after prechecking that task probably exists. |
876 |
+ |
*/ |
877 |
+ |
final boolean trySharedUnpush(ForkJoinTask<?> t) { |
878 |
+ |
boolean success = false; |
879 |
+ |
if (runState == 0 && U.compareAndSwapInt(this, RUNSTATE, 0, 1)) { |
880 |
+ |
try { |
881 |
+ |
ForkJoinTask<?>[] a; int s; |
882 |
+ |
if ((a = array) != null && (s = top) != base && |
883 |
+ |
U.compareAndSwapObject |
884 |
+ |
(a, (((a.length - 1) & --s) << ASHIFT) + ABASE, t, null)) { |
885 |
+ |
top = s; |
886 |
+ |
success = true; |
887 |
+ |
} |
888 |
+ |
} finally { |
889 |
+ |
runState = 0; // unlock |
890 |
+ |
} |
891 |
+ |
} |
892 |
+ |
return success; |
893 |
+ |
} |
894 |
+ |
|
895 |
+ |
/** |
896 |
|
* Polls the given task only if it is at the current base. |
897 |
|
*/ |
898 |
|
final boolean pollFor(ForkJoinTask<?> task) { |
1127 |
|
ASHIFT = 31 - Integer.numberOfLeadingZeros(s); |
1128 |
|
} |
1129 |
|
} |
1130 |
+ |
|
1131 |
|
/** |
1132 |
|
* Per-thread records for threads that submit to pools. Currently |
1133 |
|
* holds only pseudo-random seed / index that is used to choose |
1166 |
|
public static final ForkJoinWorkerThreadFactory |
1167 |
|
defaultForkJoinWorkerThreadFactory; |
1168 |
|
|
1169 |
+ |
|
1170 |
+ |
/** Property prefix for constructing common pool */ |
1171 |
+ |
private static final String propPrefix = |
1172 |
+ |
"java.util.concurrent.ForkJoinPool.common."; |
1173 |
+ |
|
1174 |
+ |
/** |
1175 |
+ |
* Common (static) pool. Non-null for public use unless a static |
1176 |
+ |
* construction exception, but internal usages must null-check on |
1177 |
+ |
* use. |
1178 |
+ |
*/ |
1179 |
+ |
static final ForkJoinPool commonPool; |
1180 |
+ |
|
1181 |
+ |
/** |
1182 |
+ |
* Common pool parallelism. Must equal commonPool.parallelism. |
1183 |
+ |
*/ |
1184 |
+ |
static final int commonPoolParallelism; |
1185 |
+ |
|
1186 |
|
/** |
1187 |
|
* Generator for assigning sequence numbers as pool names. |
1188 |
|
*/ |
1201 |
|
private static final RuntimePermission modifyThreadPermission; |
1202 |
|
|
1203 |
|
/** |
1204 |
< |
* Per-thread submission bookeeping. Shared across all pools |
1204 |
> |
* Per-thread submission bookkeeping. Shared across all pools |
1205 |
|
* to reduce ThreadLocal pollution and because random motion |
1206 |
|
* to avoid contention in one pool is likely to hold for others. |
1207 |
|
*/ |
1210 |
|
// static constants |
1211 |
|
|
1212 |
|
/** |
1213 |
< |
* The wakeup interval (in nanoseconds) for a worker waiting for a |
1214 |
< |
* task when the pool is quiescent to instead try to shrink the |
1215 |
< |
* number of workers. The exact value does not matter too |
1118 |
< |
* much. It must be short enough to release resources during |
1119 |
< |
* sustained periods of idleness, but not so short that threads |
1120 |
< |
* are continually re-created. |
1213 |
> |
* Initial timeout value (in nanoseconds) for the thread triggering |
1214 |
> |
* quiescence to park waiting for new work. On timeout, the thread |
1215 |
> |
* will instead try to shrink the number of workers. |
1216 |
|
*/ |
1217 |
< |
private static final long SHRINK_RATE = |
1123 |
< |
4L * 1000L * 1000L * 1000L; // 4 seconds |
1217 |
> |
private static final long IDLE_TIMEOUT = 1000L * 1000L * 1000L; // 1sec |
1218 |
|
|
1219 |
|
/** |
1220 |
< |
* The timeout value for attempted shrinkage, includes |
1127 |
< |
* some slop to cope with system timer imprecision. |
1220 |
> |
* Timeout value when there are more threads than parallelism level |
1221 |
|
*/ |
1222 |
< |
private static final long SHRINK_TIMEOUT = SHRINK_RATE - (SHRINK_RATE / 10); |
1222 |
> |
private static final long FAST_IDLE_TIMEOUT = 100L * 1000L * 1000L; |
1223 |
|
|
1224 |
|
/** |
1225 |
|
* The maximum stolen->joining link depth allowed in method |
1339 |
|
* empirically works OK on current JVMs. |
1340 |
|
*/ |
1341 |
|
|
1342 |
+ |
volatile long stealCount; // collects worker counts |
1343 |
|
volatile long ctl; // main pool control |
1344 |
|
final int parallelism; // parallelism level |
1345 |
|
final int localMode; // per-worker scheduling mode |
1346 |
+ |
volatile int nextWorkerNumber; // to create worker name string |
1347 |
|
final int submitMask; // submit queue index bound |
1348 |
|
int nextSeed; // for initializing worker seeds |
1349 |
+ |
volatile int mainLock; // spinlock for array updates |
1350 |
|
volatile int runState; // shutdown status and seq |
1351 |
|
WorkQueue[] workQueues; // main registry |
1256 |
– |
final Mutex lock; // for registration |
1257 |
– |
final Condition termination; // for awaitTermination |
1352 |
|
final ForkJoinWorkerThreadFactory factory; // factory for new workers |
1353 |
|
final Thread.UncaughtExceptionHandler ueh; // per-worker UEH |
1260 |
– |
final AtomicLong stealCount; // collect counts when terminated |
1261 |
– |
final AtomicInteger nextWorkerNumber; // to create worker name string |
1354 |
|
final String workerNamePrefix; // to create worker name string |
1355 |
|
|
1356 |
+ |
/* |
1357 |
+ |
* Mechanics for main lock protecting worker array updates. Uses |
1358 |
+ |
* the same strategy as ConcurrentHashMap bins -- a spinLock for |
1359 |
+ |
* normal cases, but falling back to builtin lock when (rarely) |
1360 |
+ |
* needed. See internal ConcurrentHashMap documentation for |
1361 |
+ |
* explanation. |
1362 |
+ |
*/ |
1363 |
+ |
|
1364 |
+ |
static final int LOCK_WAITING = 2; // bit to indicate need for signal |
1365 |
+ |
static final int MAX_LOCK_SPINS = 1 << 8; |
1366 |
+ |
|
1367 |
+ |
private void tryAwaitMainLock() { |
1368 |
+ |
int spins = MAX_LOCK_SPINS, r = 0, h; |
1369 |
+ |
while (((h = mainLock) & 1) != 0) { |
1370 |
+ |
if (r == 0) |
1371 |
+ |
r = ThreadLocalRandom.current().nextInt(); // randomize spins |
1372 |
+ |
else if (spins >= 0) { |
1373 |
+ |
r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift |
1374 |
+ |
if (r >= 0) |
1375 |
+ |
--spins; |
1376 |
+ |
} |
1377 |
+ |
else if (U.compareAndSwapInt(this, MAINLOCK, h, h | LOCK_WAITING)) { |
1378 |
+ |
synchronized (this) { |
1379 |
+ |
if ((mainLock & LOCK_WAITING) != 0) { |
1380 |
+ |
try { |
1381 |
+ |
wait(); |
1382 |
+ |
} catch (InterruptedException ie) { |
1383 |
+ |
Thread.currentThread().interrupt(); |
1384 |
+ |
} |
1385 |
+ |
} |
1386 |
+ |
else |
1387 |
+ |
notifyAll(); // possibly won race vs signaller |
1388 |
+ |
} |
1389 |
+ |
break; |
1390 |
+ |
} |
1391 |
+ |
} |
1392 |
+ |
} |
1393 |
+ |
|
1394 |
|
// Creating, registering, and deregistering workers |
1395 |
|
|
1396 |
|
/** |
1417 |
|
* ForkJoinWorkerThread. |
1418 |
|
*/ |
1419 |
|
final String nextWorkerName() { |
1420 |
< |
return workerNamePrefix.concat |
1421 |
< |
(Integer.toString(nextWorkerNumber.addAndGet(1))); |
1420 |
> |
int n; |
1421 |
> |
do {} while (!U.compareAndSwapInt(this, NEXTWORKERNUMBER, |
1422 |
> |
n = nextWorkerNumber, ++n)); |
1423 |
> |
return workerNamePrefix.concat(Integer.toString(n)); |
1424 |
|
} |
1425 |
|
|
1426 |
|
/** |
1432 |
|
* |
1433 |
|
* @param w the worker's queue |
1434 |
|
*/ |
1303 |
– |
|
1435 |
|
final void registerWorker(WorkQueue w) { |
1436 |
< |
Mutex lock = this.lock; |
1437 |
< |
lock.lock(); |
1436 |
> |
while (!U.compareAndSwapInt(this, MAINLOCK, 0, 1)) |
1437 |
> |
tryAwaitMainLock(); |
1438 |
|
try { |
1439 |
< |
WorkQueue[] ws = workQueues; |
1440 |
< |
if (w != null && ws != null) { // skip on shutdown/failure |
1439 |
> |
WorkQueue[] ws; |
1440 |
> |
if ((ws = workQueues) == null) |
1441 |
> |
ws = workQueues = new WorkQueue[submitMask + 1]; |
1442 |
> |
if (w != null) { |
1443 |
|
int rs, n = ws.length, m = n - 1; |
1444 |
|
int s = nextSeed += SEED_INCREMENT; // rarely-colliding sequence |
1445 |
|
w.seed = (s == 0) ? 1 : s; // ensure non-zero seed |
1460 |
|
runState = ((rs = runState) & SHUTDOWN) | ((rs + 2) & ~SHUTDOWN); |
1461 |
|
} |
1462 |
|
} finally { |
1463 |
< |
lock.unlock(); |
1463 |
> |
if (!U.compareAndSwapInt(this, MAINLOCK, 1, 0)) { |
1464 |
> |
mainLock = 0; |
1465 |
> |
synchronized (this) { notifyAll(); }; |
1466 |
> |
} |
1467 |
|
} |
1468 |
+ |
|
1469 |
|
} |
1470 |
|
|
1471 |
|
/** |
1478 |
|
* @param ex the exception causing failure, or null if none |
1479 |
|
*/ |
1480 |
|
final void deregisterWorker(ForkJoinWorkerThread wt, Throwable ex) { |
1344 |
– |
Mutex lock = this.lock; |
1481 |
|
WorkQueue w = null; |
1482 |
|
if (wt != null && (w = wt.workQueue) != null) { |
1483 |
|
w.runState = -1; // ensure runState is set |
1484 |
< |
stealCount.getAndAdd(w.totalSteals + w.nsteals); |
1484 |
> |
long steals = w.totalSteals + w.nsteals, sc; |
1485 |
> |
do {} while (!U.compareAndSwapLong(this, STEALCOUNT, |
1486 |
> |
sc = stealCount, sc + steals)); |
1487 |
|
int idx = w.poolIndex; |
1488 |
< |
lock.lock(); |
1489 |
< |
try { // remove record from array |
1488 |
> |
while (!U.compareAndSwapInt(this, MAINLOCK, 0, 1)) |
1489 |
> |
tryAwaitMainLock(); |
1490 |
> |
try { |
1491 |
|
WorkQueue[] ws = workQueues; |
1492 |
|
if (ws != null && idx >= 0 && idx < ws.length && ws[idx] == w) |
1493 |
|
ws[idx] = null; |
1494 |
|
} finally { |
1495 |
< |
lock.unlock(); |
1495 |
> |
if (!U.compareAndSwapInt(this, MAINLOCK, 1, 0)) { |
1496 |
> |
mainLock = 0; |
1497 |
> |
synchronized (this) { notifyAll(); }; |
1498 |
> |
} |
1499 |
|
} |
1500 |
|
} |
1501 |
|
|
1517 |
|
U.throwException(ex); |
1518 |
|
} |
1519 |
|
|
1378 |
– |
|
1520 |
|
// Submissions |
1521 |
|
|
1522 |
|
/** |
1534 |
|
for (int r = s.seed, m = submitMask;;) { |
1535 |
|
WorkQueue[] ws; WorkQueue q; |
1536 |
|
int k = r & m & SQMASK; // use only even indices |
1537 |
< |
if (runState < 0 || (ws = workQueues) == null || ws.length <= k) |
1537 |
> |
if (runState < 0) |
1538 |
|
throw new RejectedExecutionException(); // shutting down |
1539 |
+ |
else if ((ws = workQueues) == null || ws.length <= k) { |
1540 |
+ |
while (!U.compareAndSwapInt(this, MAINLOCK, 0, 1)) |
1541 |
+ |
tryAwaitMainLock(); |
1542 |
+ |
try { |
1543 |
+ |
if (workQueues == null) |
1544 |
+ |
workQueues = new WorkQueue[submitMask + 1]; |
1545 |
+ |
} finally { |
1546 |
+ |
if (!U.compareAndSwapInt(this, MAINLOCK, 1, 0)) { |
1547 |
+ |
mainLock = 0; |
1548 |
+ |
synchronized (this) { notifyAll(); }; |
1549 |
+ |
} |
1550 |
+ |
} |
1551 |
+ |
} |
1552 |
|
else if ((q = ws[k]) == null) { // create new queue |
1553 |
|
WorkQueue nq = new WorkQueue(this, null, SHARED_QUEUE); |
1554 |
< |
Mutex lock = this.lock; // construct outside lock |
1555 |
< |
lock.lock(); |
1556 |
< |
try { // recheck under lock |
1554 |
> |
while (!U.compareAndSwapInt(this, MAINLOCK, 0, 1)) |
1555 |
> |
tryAwaitMainLock(); |
1556 |
> |
try { |
1557 |
|
int rs = runState; // to update seq |
1558 |
|
if (ws == workQueues && ws[k] == null) { |
1559 |
|
ws[k] = nq; |
1560 |
|
runState = ((rs & SHUTDOWN) | ((rs + 2) & ~SHUTDOWN)); |
1561 |
|
} |
1562 |
|
} finally { |
1563 |
< |
lock.unlock(); |
1563 |
> |
if (!U.compareAndSwapInt(this, MAINLOCK, 1, 0)) { |
1564 |
> |
mainLock = 0; |
1565 |
> |
synchronized (this) { notifyAll(); }; |
1566 |
> |
} |
1567 |
|
} |
1568 |
|
} |
1569 |
|
else if (q.trySharedPush(task)) { |
1580 |
|
} |
1581 |
|
} |
1582 |
|
|
1583 |
+ |
/** |
1584 |
+ |
* Submits the given (non-null) task to the common pool, if possible. |
1585 |
+ |
*/ |
1586 |
+ |
static void submitToCommonPool(ForkJoinTask<?> task) { |
1587 |
+ |
ForkJoinPool p; |
1588 |
+ |
if ((p = commonPool) == null) |
1589 |
+ |
throw new RejectedExecutionException("Common Pool Unavailable"); |
1590 |
+ |
p.doSubmit(task); |
1591 |
+ |
} |
1592 |
+ |
|
1593 |
+ |
/** |
1594 |
+ |
* Returns true if caller is (or may be) submitter to the common |
1595 |
+ |
* pool, and not all workers are active, and there appear to be |
1596 |
+ |
* tasks in the associated submission queue. |
1597 |
+ |
*/ |
1598 |
+ |
static boolean canHelpCommonPool() { |
1599 |
+ |
ForkJoinPool p; WorkQueue[] ws; WorkQueue q; |
1600 |
+ |
int k = submitters.get().seed & SQMASK; |
1601 |
+ |
return ((p = commonPool) != null && |
1602 |
+ |
(int)(p.ctl >> AC_SHIFT) < 0 && |
1603 |
+ |
(ws = p.workQueues) != null && |
1604 |
+ |
ws.length > (k &= p.submitMask) && |
1605 |
+ |
(q = ws[k]) != null && |
1606 |
+ |
q.top - q.base > 0); |
1607 |
+ |
} |
1608 |
+ |
|
1609 |
+ |
/** |
1610 |
+ |
* Returns true if the given task was submitted to common pool |
1611 |
+ |
* and has not yet commenced execution, and is available for |
1612 |
+ |
* removal according to execution policies; if so removing the |
1613 |
+ |
* submission from the pool. |
1614 |
+ |
* |
1615 |
+ |
* @param task the task |
1616 |
+ |
* @return true if successful |
1617 |
+ |
*/ |
1618 |
+ |
static boolean tryUnsubmitFromCommonPool(ForkJoinTask<?> task) { |
1619 |
+ |
// Peek, looking for task and eligibility before |
1620 |
+ |
// using trySharedUnpush to actually take it under lock |
1621 |
+ |
ForkJoinPool p; WorkQueue[] ws; WorkQueue q; |
1622 |
+ |
ForkJoinTask<?>[] a; int s; |
1623 |
+ |
int k = submitters.get().seed & SQMASK; |
1624 |
+ |
return ((p = commonPool) != null && |
1625 |
+ |
(int)(p.ctl >> AC_SHIFT) < 0 && |
1626 |
+ |
(ws = p.workQueues) != null && |
1627 |
+ |
ws.length > (k &= p.submitMask) && |
1628 |
+ |
(q = ws[k]) != null && |
1629 |
+ |
(a = q.array) != null && |
1630 |
+ |
(s = q.top - 1) - q.base >= 0 && |
1631 |
+ |
s >= 0 && s < a.length && |
1632 |
+ |
a[s] == task && |
1633 |
+ |
q.trySharedUnpush(task)); |
1634 |
+ |
} |
1635 |
+ |
|
1636 |
+ |
/** |
1637 |
+ |
* Tries to pop a task from common pool with given root |
1638 |
+ |
*/ |
1639 |
+ |
static ForkJoinTask<?> popCCFromCommonPool(CountedCompleter<?> root) { |
1640 |
+ |
ForkJoinPool p; WorkQueue[] ws; WorkQueue q; |
1641 |
+ |
ForkJoinTask<?> t; |
1642 |
+ |
int k = submitters.get().seed & SQMASK; |
1643 |
+ |
if (root != null && |
1644 |
+ |
(p = commonPool) != null && |
1645 |
+ |
(int)(p.ctl >> AC_SHIFT) < 0 && |
1646 |
+ |
(ws = p.workQueues) != null && |
1647 |
+ |
ws.length > (k &= p.submitMask) && |
1648 |
+ |
(q = ws[k]) != null && q.top - q.base > 0 && |
1649 |
+ |
root.status < 0 && |
1650 |
+ |
(t = q.sharedPopCC(root)) != null) |
1651 |
+ |
return t; |
1652 |
+ |
return null; |
1653 |
+ |
} |
1654 |
+ |
|
1655 |
+ |
|
1656 |
|
// Maintaining ctl counts |
1657 |
|
|
1658 |
|
/** |
1664 |
|
} |
1665 |
|
|
1666 |
|
/** |
1667 |
< |
* Tries to activate or create a worker if too few are active. |
1667 |
> |
* Tries to create one or activate one or more workers if too few are active. |
1668 |
|
*/ |
1669 |
|
final void signalWork() { |
1670 |
|
long c; int u; |
1748 |
|
* awaiting signal, |
1749 |
|
* |
1750 |
|
* @param w the worker (via its WorkQueue) |
1751 |
< |
* @return a task or null of none found |
1751 |
> |
* @return a task or null if none found |
1752 |
|
*/ |
1753 |
|
private final ForkJoinTask<?> scan(WorkQueue w) { |
1754 |
|
WorkQueue[] ws; // first update random seed |
1765 |
|
t = (ForkJoinTask<?>)U.getObjectVolatile(a, i); |
1766 |
|
if (q.base == b && ec >= 0 && t != null && |
1767 |
|
U.compareAndSwapObject(a, i, t, null)) { |
1768 |
< |
if (q.top - (q.base = b + 1) > 1) |
1768 |
> |
if (q.top - (q.base = b + 1) > 0) |
1769 |
|
signalWork(); // help pushes signal |
1770 |
|
return t; |
1771 |
|
} |
1811 |
|
} |
1812 |
|
} |
1813 |
|
else if (w.eventCount < 0) { // already queued |
1814 |
< |
if ((nr = w.rescans) > 0) { // continue rescanning |
1815 |
< |
int ac = a + parallelism; |
1816 |
< |
if (((w.rescans = (ac < nr) ? ac : nr - 1) & 3) == 0) |
1817 |
< |
Thread.yield(); // yield before block |
1588 |
< |
} |
1589 |
< |
else { |
1814 |
> |
int ac = a + parallelism; |
1815 |
> |
if ((nr = w.rescans) > 0) // continue rescanning |
1816 |
> |
w.rescans = (ac < nr) ? ac : nr - 1; |
1817 |
> |
else if (((w.seed >>> 16) & ac) == 0) { // randomize park |
1818 |
|
Thread.interrupted(); // clear status |
1819 |
|
Thread wt = Thread.currentThread(); |
1820 |
|
U.putObject(wt, PARKBLOCKER, this); |
1832 |
|
/** |
1833 |
|
* If inactivating worker w has caused the pool to become |
1834 |
|
* quiescent, checks for pool termination, and, so long as this is |
1835 |
< |
* not the only worker, waits for event for up to SHRINK_RATE |
1836 |
< |
* nanosecs. On timeout, if ctl has not changed, terminates the |
1835 |
> |
* not the only worker, waits for event for up to a given |
1836 |
> |
* duration. On timeout, if ctl has not changed, terminates the |
1837 |
|
* worker, which will in turn wake up another worker to possibly |
1838 |
|
* repeat this process. |
1839 |
|
* |
1844 |
|
private void idleAwaitWork(WorkQueue w, long currentCtl, long prevCtl) { |
1845 |
|
if (w.eventCount < 0 && !tryTerminate(false, false) && |
1846 |
|
(int)prevCtl != 0 && !hasQueuedSubmissions() && ctl == currentCtl) { |
1847 |
+ |
int dc = -(short)(currentCtl >>> TC_SHIFT); |
1848 |
+ |
long parkTime = dc < 0 ? FAST_IDLE_TIMEOUT: (dc + 1) * IDLE_TIMEOUT; |
1849 |
+ |
long deadline = System.nanoTime() + parkTime - 100000L; // 1ms slop |
1850 |
|
Thread wt = Thread.currentThread(); |
1620 |
– |
Thread.yield(); // yield before block |
1851 |
|
while (ctl == currentCtl) { |
1622 |
– |
long startTime = System.nanoTime(); |
1852 |
|
Thread.interrupted(); // timed variant of version in scan() |
1853 |
|
U.putObject(wt, PARKBLOCKER, this); |
1854 |
|
w.parker = wt; |
1855 |
|
if (ctl == currentCtl) |
1856 |
< |
U.park(false, SHRINK_RATE); |
1856 |
> |
U.park(false, parkTime); |
1857 |
|
w.parker = null; |
1858 |
|
U.putObject(wt, PARKBLOCKER, null); |
1859 |
|
if (ctl != currentCtl) |
1860 |
|
break; |
1861 |
< |
if (System.nanoTime() - startTime >= SHRINK_TIMEOUT && |
1861 |
> |
if (deadline - System.nanoTime() <= 0L && |
1862 |
|
U.compareAndSwapLong(this, CTL, currentCtl, prevCtl)) { |
1863 |
|
w.eventCount = (w.eventCount + E_SEQ) | E_MASK; |
1864 |
|
w.runState = -1; // shrink |
2143 |
|
} |
2144 |
|
} |
2145 |
|
|
1917 |
– |
|
2146 |
|
/** |
2147 |
|
* Runs tasks until {@code isQuiescent()}. We piggyback on |
2148 |
|
* active count ctl maintenance, but rather than blocking |
2185 |
|
} |
2186 |
|
|
2187 |
|
/** |
2188 |
+ |
* Restricted version of helpQuiescePool for non-FJ callers |
2189 |
+ |
*/ |
2190 |
+ |
static void externalHelpQuiescePool() { |
2191 |
+ |
ForkJoinPool p; WorkQueue[] ws; WorkQueue q, sq; |
2192 |
+ |
ForkJoinTask<?>[] a; int b; |
2193 |
+ |
ForkJoinTask<?> t = null; |
2194 |
+ |
int k = submitters.get().seed & SQMASK; |
2195 |
+ |
if ((p = commonPool) != null && |
2196 |
+ |
(int)(p.ctl >> AC_SHIFT) < 0 && |
2197 |
+ |
(ws = p.workQueues) != null && |
2198 |
+ |
ws.length > (k &= p.submitMask) && |
2199 |
+ |
(q = ws[k]) != null) { |
2200 |
+ |
while (q.top - q.base > 0) { |
2201 |
+ |
if ((t = q.sharedPop()) != null) |
2202 |
+ |
break; |
2203 |
+ |
} |
2204 |
+ |
if (t == null && (sq = p.findNonEmptyStealQueue(q)) != null && |
2205 |
+ |
(b = sq.base) - sq.top < 0) |
2206 |
+ |
t = sq.pollAt(b); |
2207 |
+ |
if (t != null) |
2208 |
+ |
t.doExec(); |
2209 |
+ |
} |
2210 |
+ |
} |
2211 |
+ |
|
2212 |
+ |
/** |
2213 |
|
* Gets and removes a local or stolen task for the given worker. |
2214 |
|
* |
2215 |
|
* @return a task, if available |
2242 |
|
8); |
2243 |
|
} |
2244 |
|
|
2245 |
+ |
/** |
2246 |
+ |
* Returns approximate submission queue length for the given caller |
2247 |
+ |
*/ |
2248 |
+ |
static int getEstimatedSubmitterQueueLength() { |
2249 |
+ |
ForkJoinPool p; WorkQueue[] ws; WorkQueue q; |
2250 |
+ |
int k = submitters.get().seed & SQMASK; |
2251 |
+ |
return ((p = commonPool) != null && |
2252 |
+ |
p.runState >= 0 && |
2253 |
+ |
(ws = p.workQueues) != null && |
2254 |
+ |
ws.length > (k &= p.submitMask) && |
2255 |
+ |
(q = ws[k]) != null) ? |
2256 |
+ |
q.queueSize() : 0; |
2257 |
+ |
} |
2258 |
+ |
|
2259 |
|
// Termination |
2260 |
|
|
2261 |
|
/** |
2273 |
|
* @return true if now terminating or terminated |
2274 |
|
*/ |
2275 |
|
private boolean tryTerminate(boolean now, boolean enable) { |
2009 |
– |
Mutex lock = this.lock; |
2276 |
|
for (long c;;) { |
2277 |
|
if (((c = ctl) & STOP_BIT) != 0) { // already terminating |
2278 |
|
if ((short)(c >>> TC_SHIFT) == -parallelism) { |
2279 |
< |
lock.lock(); // don't need try/finally |
2280 |
< |
termination.signalAll(); // signal when 0 workers |
2281 |
< |
lock.unlock(); |
2279 |
> |
synchronized (this) { |
2280 |
> |
notifyAll(); // signal when 0 workers |
2281 |
> |
} |
2282 |
|
} |
2283 |
|
return true; |
2284 |
|
} |
2285 |
|
if (runState >= 0) { // not yet enabled |
2286 |
|
if (!enable) |
2287 |
|
return false; |
2288 |
< |
lock.lock(); |
2289 |
< |
runState |= SHUTDOWN; |
2290 |
< |
lock.unlock(); |
2288 |
> |
while (!U.compareAndSwapInt(this, MAINLOCK, 0, 1)) |
2289 |
> |
tryAwaitMainLock(); |
2290 |
> |
try { |
2291 |
> |
runState |= SHUTDOWN; |
2292 |
> |
} finally { |
2293 |
> |
if (!U.compareAndSwapInt(this, MAINLOCK, 1, 0)) { |
2294 |
> |
mainLock = 0; |
2295 |
> |
synchronized (this) { notifyAll(); }; |
2296 |
> |
} |
2297 |
> |
} |
2298 |
|
} |
2299 |
|
if (!now) { // check if idle & no tasks |
2300 |
|
if ((int)(c >> AC_SHIFT) != -parallelism || |
2427 |
|
// Use nearest power 2 for workQueues size. See Hackers Delight sec 3.2. |
2428 |
|
int n = parallelism - 1; |
2429 |
|
n |= n >>> 1; n |= n >>> 2; n |= n >>> 4; n |= n >>> 8; n |= n >>> 16; |
2430 |
< |
int size = (n + 1) << 1; // #slots = 2*#workers |
2158 |
< |
this.submitMask = size - 1; // room for max # of submit queues |
2159 |
< |
this.workQueues = new WorkQueue[size]; |
2160 |
< |
this.termination = (this.lock = new Mutex()).newCondition(); |
2161 |
< |
this.stealCount = new AtomicLong(); |
2162 |
< |
this.nextWorkerNumber = new AtomicInteger(); |
2430 |
> |
this.submitMask = ((n + 1) << 1) - 1; |
2431 |
|
int pn = poolNumberGenerator.incrementAndGet(); |
2432 |
|
StringBuilder sb = new StringBuilder("ForkJoinPool-"); |
2433 |
|
sb.append(Integer.toString(pn)); |
2434 |
|
sb.append("-worker-"); |
2435 |
|
this.workerNamePrefix = sb.toString(); |
2168 |
– |
lock.lock(); |
2436 |
|
this.runState = 1; // set init flag |
2437 |
< |
lock.unlock(); |
2437 |
> |
} |
2438 |
> |
|
2439 |
> |
/** |
2440 |
> |
* Constructor for common pool, suitable only for static initialization. |
2441 |
> |
* Basically the same as above, but uses smallest possible initial footprint. |
2442 |
> |
*/ |
2443 |
> |
ForkJoinPool(int parallelism, int submitMask, |
2444 |
> |
ForkJoinWorkerThreadFactory factory, |
2445 |
> |
Thread.UncaughtExceptionHandler handler) { |
2446 |
> |
this.factory = factory; |
2447 |
> |
this.ueh = handler; |
2448 |
> |
this.submitMask = submitMask; |
2449 |
> |
this.parallelism = parallelism; |
2450 |
> |
long np = (long)(-parallelism); |
2451 |
> |
this.ctl = ((np << AC_SHIFT) & AC_MASK) | ((np << TC_SHIFT) & TC_MASK); |
2452 |
> |
this.localMode = LIFO_QUEUE; |
2453 |
> |
this.workerNamePrefix = "ForkJoinPool.commonPool-worker-"; |
2454 |
> |
this.runState = 1; |
2455 |
> |
} |
2456 |
> |
|
2457 |
> |
/** |
2458 |
> |
* Returns the common pool instance. |
2459 |
> |
* |
2460 |
> |
* @return the common pool instance |
2461 |
> |
*/ |
2462 |
> |
public static ForkJoinPool commonPool() { |
2463 |
> |
ForkJoinPool p; |
2464 |
> |
if ((p = commonPool) == null) |
2465 |
> |
throw new Error("Common Pool Unavailable"); |
2466 |
> |
return p; |
2467 |
|
} |
2468 |
|
|
2469 |
|
// Execution methods |
2639 |
|
} |
2640 |
|
|
2641 |
|
/** |
2642 |
+ |
* Returns the targeted parallelism level of the common pool. |
2643 |
+ |
* |
2644 |
+ |
* @return the targeted parallelism level of the common pool |
2645 |
+ |
*/ |
2646 |
+ |
public static int getCommonPoolParallelism() { |
2647 |
+ |
return commonPoolParallelism; |
2648 |
+ |
} |
2649 |
+ |
|
2650 |
+ |
/** |
2651 |
|
* Returns the number of worker threads that have started but not |
2652 |
|
* yet terminated. The result returned by this method may differ |
2653 |
|
* from {@link #getParallelism} when threads are created to |
2728 |
|
* @return the number of steals |
2729 |
|
*/ |
2730 |
|
public long getStealCount() { |
2731 |
< |
long count = stealCount.get(); |
2731 |
> |
long count = stealCount; |
2732 |
|
WorkQueue[] ws; WorkQueue w; |
2733 |
|
if ((ws = workQueues) != null) { |
2734 |
|
for (int i = 1; i < ws.length; i += 2) { |
2858 |
|
public String toString() { |
2859 |
|
// Use a single pass through workQueues to collect counts |
2860 |
|
long qt = 0L, qs = 0L; int rc = 0; |
2861 |
< |
long st = stealCount.get(); |
2861 |
> |
long st = stealCount; |
2862 |
|
long c = ctl; |
2863 |
|
WorkQueue[] ws; WorkQueue w; |
2864 |
|
if ((ws = workQueues) != null) { |
2899 |
|
} |
2900 |
|
|
2901 |
|
/** |
2902 |
< |
* Initiates an orderly shutdown in which previously submitted |
2903 |
< |
* tasks are executed, but no new tasks will be accepted. |
2904 |
< |
* Invocation has no additional effect if already shut down. |
2905 |
< |
* Tasks that are in the process of being submitted concurrently |
2906 |
< |
* during the course of this method may or may not be rejected. |
2902 |
> |
* Possibly initiates an orderly shutdown in which previously |
2903 |
> |
* submitted tasks are executed, but no new tasks will be |
2904 |
> |
* accepted. Invocation has no effect on execution state if this |
2905 |
> |
* is the {@link #commonPool}, and no additional effect if |
2906 |
> |
* already shut down. Tasks that are in the process of being |
2907 |
> |
* submitted concurrently during the course of this method may or |
2908 |
> |
* may not be rejected. |
2909 |
|
* |
2910 |
|
* @throws SecurityException if a security manager exists and |
2911 |
|
* the caller is not permitted to modify threads |
2914 |
|
*/ |
2915 |
|
public void shutdown() { |
2916 |
|
checkPermission(); |
2917 |
< |
tryTerminate(false, true); |
2917 |
> |
if (this != commonPool) |
2918 |
> |
tryTerminate(false, true); |
2919 |
|
} |
2920 |
|
|
2921 |
|
/** |
2922 |
< |
* Attempts to cancel and/or stop all tasks, and reject all |
2923 |
< |
* subsequently submitted tasks. Tasks that are in the process of |
2924 |
< |
* being submitted or executed concurrently during the course of |
2925 |
< |
* this method may or may not be rejected. This method cancels |
2926 |
< |
* both existing and unexecuted tasks, in order to permit |
2927 |
< |
* termination in the presence of task dependencies. So the method |
2928 |
< |
* always returns an empty list (unlike the case for some other |
2929 |
< |
* Executors). |
2922 |
> |
* Possibly attempts to cancel and/or stop all tasks, and reject |
2923 |
> |
* all subsequently submitted tasks. Invocation has no effect on |
2924 |
> |
* execution state if this is the {@link #commonPool}, and no |
2925 |
> |
* additional effect if already shut down. Otherwise, tasks that |
2926 |
> |
* are in the process of being submitted or executed concurrently |
2927 |
> |
* during the course of this method may or may not be |
2928 |
> |
* rejected. This method cancels both existing and unexecuted |
2929 |
> |
* tasks, in order to permit termination in the presence of task |
2930 |
> |
* dependencies. So the method always returns an empty list |
2931 |
> |
* (unlike the case for some other Executors). |
2932 |
|
* |
2933 |
|
* @return an empty list |
2934 |
|
* @throws SecurityException if a security manager exists and |
2938 |
|
*/ |
2939 |
|
public List<Runnable> shutdownNow() { |
2940 |
|
checkPermission(); |
2941 |
< |
tryTerminate(true, true); |
2941 |
> |
if (this != commonPool) |
2942 |
> |
tryTerminate(true, true); |
2943 |
|
return Collections.emptyList(); |
2944 |
|
} |
2945 |
|
|
2996 |
|
public boolean awaitTermination(long timeout, TimeUnit unit) |
2997 |
|
throws InterruptedException { |
2998 |
|
long nanos = unit.toNanos(timeout); |
2999 |
< |
final Mutex lock = this.lock; |
3000 |
< |
lock.lock(); |
3001 |
< |
try { |
3002 |
< |
for (;;) { |
3003 |
< |
if (isTerminated()) |
3004 |
< |
return true; |
3005 |
< |
if (nanos <= 0) |
3006 |
< |
return false; |
3007 |
< |
nanos = termination.awaitNanos(nanos); |
2999 |
> |
if (isTerminated()) |
3000 |
> |
return true; |
3001 |
> |
long startTime = System.nanoTime(); |
3002 |
> |
boolean terminated = false; |
3003 |
> |
synchronized (this) { |
3004 |
> |
for (long waitTime = nanos, millis = 0L;;) { |
3005 |
> |
if (terminated = isTerminated() || |
3006 |
> |
waitTime <= 0L || |
3007 |
> |
(millis = unit.toMillis(waitTime)) <= 0L) |
3008 |
> |
break; |
3009 |
> |
wait(millis); |
3010 |
> |
waitTime = nanos - (System.nanoTime() - startTime); |
3011 |
|
} |
2698 |
– |
} finally { |
2699 |
– |
lock.unlock(); |
3012 |
|
} |
3013 |
+ |
return terminated; |
3014 |
|
} |
3015 |
|
|
3016 |
|
/** |
3142 |
|
private static final long PARKBLOCKER; |
3143 |
|
private static final int ABASE; |
3144 |
|
private static final int ASHIFT; |
3145 |
+ |
private static final long NEXTWORKERNUMBER; |
3146 |
+ |
private static final long STEALCOUNT; |
3147 |
+ |
private static final long MAINLOCK; |
3148 |
|
|
3149 |
|
static { |
3150 |
|
poolNumberGenerator = new AtomicInteger(); |
3160 |
|
Class<?> ak = ForkJoinTask[].class; |
3161 |
|
CTL = U.objectFieldOffset |
3162 |
|
(k.getDeclaredField("ctl")); |
3163 |
+ |
NEXTWORKERNUMBER = U.objectFieldOffset |
3164 |
+ |
(k.getDeclaredField("nextWorkerNumber")); |
3165 |
+ |
STEALCOUNT = U.objectFieldOffset |
3166 |
+ |
(k.getDeclaredField("stealCount")); |
3167 |
+ |
MAINLOCK = U.objectFieldOffset |
3168 |
+ |
(k.getDeclaredField("mainLock")); |
3169 |
|
Class<?> tk = Thread.class; |
3170 |
|
PARKBLOCKER = U.objectFieldOffset |
3171 |
|
(tk.getDeclaredField("parkBlocker")); |
3172 |
|
ABASE = U.arrayBaseOffset(ak); |
3173 |
|
s = U.arrayIndexScale(ak); |
3174 |
+ |
ASHIFT = 31 - Integer.numberOfLeadingZeros(s); |
3175 |
|
} catch (Exception e) { |
3176 |
|
throw new Error(e); |
3177 |
|
} |
3178 |
|
if ((s & (s-1)) != 0) |
3179 |
|
throw new Error("data type scale not a power of two"); |
3180 |
< |
ASHIFT = 31 - Integer.numberOfLeadingZeros(s); |
3180 |
> |
try { // Establish common pool |
3181 |
> |
String pp = System.getProperty(propPrefix + "parallelism"); |
3182 |
> |
String fp = System.getProperty(propPrefix + "threadFactory"); |
3183 |
> |
String up = System.getProperty(propPrefix + "exceptionHandler"); |
3184 |
> |
ForkJoinWorkerThreadFactory fac = (fp == null) ? |
3185 |
> |
defaultForkJoinWorkerThreadFactory : |
3186 |
> |
((ForkJoinWorkerThreadFactory)ClassLoader. |
3187 |
> |
getSystemClassLoader().loadClass(fp).newInstance()); |
3188 |
> |
Thread.UncaughtExceptionHandler ueh = (up == null) ? null : |
3189 |
> |
((Thread.UncaughtExceptionHandler)ClassLoader. |
3190 |
> |
getSystemClassLoader().loadClass(up).newInstance()); |
3191 |
> |
int par; |
3192 |
> |
if ((pp == null || (par = Integer.parseInt(pp)) <= 0)) |
3193 |
> |
par = Runtime.getRuntime().availableProcessors(); |
3194 |
> |
if (par > MAX_CAP) |
3195 |
> |
par = MAX_CAP; |
3196 |
> |
commonPoolParallelism = par; |
3197 |
> |
int n = par - 1; // precompute submit mask |
3198 |
> |
n |= n >>> 1; n |= n >>> 2; n |= n >>> 4; |
3199 |
> |
n |= n >>> 8; n |= n >>> 16; |
3200 |
> |
int mask = ((n + 1) << 1) - 1; |
3201 |
> |
commonPool = new ForkJoinPool(par, mask, fac, ueh); |
3202 |
> |
} catch (Exception e) { |
3203 |
> |
throw new Error(e); |
3204 |
> |
} |
3205 |
|
} |
3206 |
|
|
3207 |
|
/** |