5 |
|
*/ |
6 |
|
|
7 |
|
package jsr166y; |
8 |
+ |
|
9 |
|
import java.util.ArrayList; |
10 |
|
import java.util.Arrays; |
11 |
|
import java.util.Collection; |
42 |
|
* ForkJoinPool}s may also be appropriate for use with event-style |
43 |
|
* tasks that are never joined. |
44 |
|
* |
45 |
< |
* <p>A {@code ForkJoinPool} is constructed with a given target |
46 |
< |
* parallelism level; by default, equal to the number of available |
47 |
< |
* processors. The pool attempts to maintain enough active (or |
48 |
< |
* available) threads by dynamically adding, suspending, or resuming |
49 |
< |
* internal worker threads, even if some tasks are stalled waiting to |
50 |
< |
* join others. However, no such adjustments are guaranteed in the |
51 |
< |
* face of blocked IO or other unmanaged synchronization. The nested |
52 |
< |
* {@link ManagedBlocker} interface enables extension of the kinds of |
45 |
> |
* <p>A static {@link #commonPool} is available and appropriate for |
46 |
> |
* most applications. The common pool is used by any ForkJoinTask that |
47 |
> |
* is not explicitly submitted to a specified pool. Using the common |
48 |
> |
* pool normally reduces resource usage (its threads are slowly |
49 |
> |
* reclaimed during periods of non-use, and reinstated upon subsequent |
50 |
> |
* use). The common pool is by default constructed with default |
51 |
> |
* parameters, but these may be controlled by setting any or all of |
52 |
> |
* the three properties {@code |
53 |
> |
* java.util.concurrent.ForkJoinPool.common.{parallelism, |
54 |
> |
* threadFactory, exceptionHandler}}. |
55 |
> |
* |
56 |
> |
* <p>For applications that require separate or custom pools, a {@code |
57 |
> |
* ForkJoinPool} may be constructed with a given target parallelism |
58 |
> |
* level; by default, equal to the number of available processors. The |
59 |
> |
* pool attempts to maintain enough active (or available) threads by |
60 |
> |
* dynamically adding, suspending, or resuming internal worker |
61 |
> |
* threads, even if some tasks are stalled waiting to join |
62 |
> |
* others. However, no such adjustments are guaranteed in the face of |
63 |
> |
* blocked IO or other unmanaged synchronization. The nested {@link |
64 |
> |
* ManagedBlocker} interface enables extension of the kinds of |
65 |
|
* synchronization accommodated. |
66 |
|
* |
67 |
|
* <p>In addition to execution and lifecycle control methods, this |
106 |
|
* </tr> |
107 |
|
* </table> |
108 |
|
* |
96 |
– |
* <p><b>Sample Usage.</b> Normally a single {@code ForkJoinPool} is |
97 |
– |
* used for all parallel task execution in a program or subsystem. |
98 |
– |
* Otherwise, use would not usually outweigh the construction and |
99 |
– |
* bookkeeping overhead of creating a large set of threads. For |
100 |
– |
* example, a common pool could be used for the {@code SortTasks} |
101 |
– |
* illustrated in {@link RecursiveAction}. Because {@code |
102 |
– |
* ForkJoinPool} uses threads in {@linkplain java.lang.Thread#isDaemon |
103 |
– |
* daemon} mode, there is typically no need to explicitly {@link |
104 |
– |
* #shutdown} such a pool upon program exit. |
105 |
– |
* |
106 |
– |
* <pre> {@code |
107 |
– |
* static final ForkJoinPool mainPool = new ForkJoinPool(); |
108 |
– |
* ... |
109 |
– |
* public void sort(long[] array) { |
110 |
– |
* mainPool.invoke(new SortTask(array, 0, array.length)); |
111 |
– |
* }}</pre> |
112 |
– |
* |
109 |
|
* <p><b>Implementation notes</b>: This implementation restricts the |
110 |
|
* maximum number of running threads to 32767. Attempts to create |
111 |
|
* pools with greater than the maximum number result in |
235 |
|
* when locked remains available to check consistency. |
236 |
|
* |
237 |
|
* Recording WorkQueues. WorkQueues are recorded in the |
238 |
< |
* "workQueues" array that is created upon pool construction and |
239 |
< |
* expanded if necessary. Updates to the array while recording |
240 |
< |
* new workers and unrecording terminated ones are protected from |
241 |
< |
* each other by a lock but the array is otherwise concurrently |
242 |
< |
* readable, and accessed directly. To simplify index-based |
243 |
< |
* operations, the array size is always a power of two, and all |
244 |
< |
* readers must tolerate null slots. Shared (submission) queues |
245 |
< |
* are at even indices, worker queues at odd indices. Grouping |
246 |
< |
* them together in this way simplifies and speeds up task |
251 |
< |
* scanning. |
238 |
> |
* "workQueues" array that is created upon first use and expanded |
239 |
> |
* if necessary. Updates to the array while recording new workers |
240 |
> |
* and unrecording terminated ones are protected from each other |
241 |
> |
* by a lock but the array is otherwise concurrently readable, and |
242 |
> |
* accessed directly. To simplify index-based operations, the |
243 |
> |
* array size is always a power of two, and all readers must |
244 |
> |
* tolerate null slots. Shared (submission) queues are at even |
245 |
> |
* indices, worker queues at odd indices. Grouping them together |
246 |
> |
* in this way simplifies and speeds up task scanning. |
247 |
|
* |
248 |
|
* All worker thread creation is on-demand, triggered by task |
249 |
|
* submissions, replacement of terminated workers, and/or |
315 |
|
* |
316 |
|
* Trimming workers. To release resources after periods of lack of |
317 |
|
* use, a worker starting to wait when the pool is quiescent will |
318 |
< |
* time out and terminate if the pool has remained quiescent for |
319 |
< |
* SHRINK_RATE nanosecs. This will slowly propagate, eventually |
320 |
< |
* terminating all workers after long periods of non-use. |
318 |
> |
* time out and terminate if the pool has remained quiescent for a |
319 |
> |
* given period -- a short period if there are more threads than |
320 |
> |
* parallelism, longer as the number of threads decreases. This |
321 |
> |
* will slowly propagate, eventually terminating all workers after |
322 |
> |
* periods of non-use. |
323 |
|
* |
324 |
|
* Shutdown and Termination. A call to shutdownNow atomically sets |
325 |
|
* a runState bit and then (non-atomically) sets each worker's |
410 |
|
* unblocked threads to the point that we know they are available) |
411 |
|
* leading to more situations requiring more threads, and so |
412 |
|
* on. This aspect of control can be seen as an (analytically |
413 |
< |
* intractible) game with an opponent that may choose the worst |
413 |
> |
* intractable) game with an opponent that may choose the worst |
414 |
|
* (for us) active thread to stall at any time. We take several |
415 |
|
* precautions to bound losses (and thus bound gains), mainly in |
416 |
|
* methods tryCompensate and awaitJoin: (1) We only try |
501 |
|
} |
502 |
|
|
503 |
|
/** |
507 |
– |
* A simple non-reentrant lock used for exclusion when managing |
508 |
– |
* queues and workers. We use a custom lock so that we can readily |
509 |
– |
* probe lock state in constructions that check among alternative |
510 |
– |
* actions. The lock is normally only very briefly held, and |
511 |
– |
* sometimes treated as a spinlock, but other usages block to |
512 |
– |
* reduce overall contention in those cases where locked code |
513 |
– |
* bodies perform allocation/resizing. |
514 |
– |
*/ |
515 |
– |
static final class Mutex extends AbstractQueuedSynchronizer { |
516 |
– |
public final boolean tryAcquire(int ignore) { |
517 |
– |
return compareAndSetState(0, 1); |
518 |
– |
} |
519 |
– |
public final boolean tryRelease(int ignore) { |
520 |
– |
setState(0); |
521 |
– |
return true; |
522 |
– |
} |
523 |
– |
public final void lock() { acquire(0); } |
524 |
– |
public final void unlock() { release(0); } |
525 |
– |
public final boolean isHeldExclusively() { return getState() == 1; } |
526 |
– |
public final Condition newCondition() { return new ConditionObject(); } |
527 |
– |
} |
528 |
– |
|
529 |
– |
/** |
504 |
|
* Class for artificial tasks that are used to replace the target |
505 |
|
* of local joins if they are removed from an interior queue slot |
506 |
|
* in WorkQueue.tryRemoveAndExec. We don't need the proxy to |
603 |
|
final ForkJoinPool pool; // the containing pool (may be null) |
604 |
|
final ForkJoinWorkerThread owner; // owning thread or null if shared |
605 |
|
volatile Thread parker; // == owner during call to park; else null |
606 |
< |
ForkJoinTask<?> currentJoin; // task being joined in awaitJoin |
606 |
> |
volatile ForkJoinTask<?> currentJoin; // task being joined in awaitJoin |
607 |
|
ForkJoinTask<?> currentSteal; // current non-local task being executed |
608 |
|
// Heuristic padding to ameliorate unfortunate memory placements |
609 |
|
Object p00, p01, p02, p03, p04, p05, p06, p07; |
695 |
|
* version of this method because it is never needed.) |
696 |
|
*/ |
697 |
|
final ForkJoinTask<?> pop() { |
698 |
< |
ForkJoinTask<?> t; int m; |
699 |
< |
ForkJoinTask<?>[] a = array; |
726 |
< |
if (a != null && (m = a.length - 1) >= 0) { |
698 |
> |
ForkJoinTask<?>[] a; ForkJoinTask<?> t; int m; |
699 |
> |
if ((a = array) != null && (m = a.length - 1) >= 0) { |
700 |
|
for (int s; (s = top - 1) - base >= 0;) { |
701 |
< |
int j = ((m & s) << ASHIFT) + ABASE; |
702 |
< |
if ((t = (ForkJoinTask<?>)U.getObjectVolatile(a, j)) == null) |
701 |
> |
long j = ((m & s) << ASHIFT) + ABASE; |
702 |
> |
if ((t = (ForkJoinTask<?>)U.getObject(a, j)) == null) |
703 |
|
break; |
704 |
|
if (U.compareAndSwapObject(a, j, t, null)) { |
705 |
|
top = s; |
787 |
|
} |
788 |
|
|
789 |
|
/** |
790 |
+ |
* Version of tryUnpush for shared queues; called by non-FJ |
791 |
+ |
* submitters after prechecking that task probably exists. |
792 |
+ |
*/ |
793 |
+ |
final boolean trySharedUnpush(ForkJoinTask<?> t) { |
794 |
+ |
boolean success = false; |
795 |
+ |
if (runState == 0 && U.compareAndSwapInt(this, RUNSTATE, 0, 1)) { |
796 |
+ |
try { |
797 |
+ |
ForkJoinTask<?>[] a; int s; |
798 |
+ |
if ((a = array) != null && (s = top) != base && |
799 |
+ |
U.compareAndSwapObject |
800 |
+ |
(a, (((a.length - 1) & --s) << ASHIFT) + ABASE, t, null)) { |
801 |
+ |
top = s; |
802 |
+ |
success = true; |
803 |
+ |
} |
804 |
+ |
} finally { |
805 |
+ |
runState = 0; // unlock |
806 |
+ |
} |
807 |
+ |
} |
808 |
+ |
return success; |
809 |
+ |
} |
810 |
+ |
|
811 |
+ |
/** |
812 |
|
* Polls the given task only if it is at the current base. |
813 |
|
*/ |
814 |
|
final boolean pollFor(ForkJoinTask<?> task) { |
825 |
|
} |
826 |
|
|
827 |
|
/** |
833 |
– |
* If present, removes from queue and executes the given task, or |
834 |
– |
* any other cancelled task. Returns (true) immediately on any CAS |
835 |
– |
* or consistency check failure so caller can retry. |
836 |
– |
* |
837 |
– |
* @return false if no progress can be made |
838 |
– |
*/ |
839 |
– |
final boolean tryRemoveAndExec(ForkJoinTask<?> task) { |
840 |
– |
boolean removed = false, empty = true, progress = true; |
841 |
– |
ForkJoinTask<?>[] a; int m, s, b, n; |
842 |
– |
if ((a = array) != null && (m = a.length - 1) >= 0 && |
843 |
– |
(n = (s = top) - (b = base)) > 0) { |
844 |
– |
for (ForkJoinTask<?> t;;) { // traverse from s to b |
845 |
– |
int j = ((--s & m) << ASHIFT) + ABASE; |
846 |
– |
t = (ForkJoinTask<?>)U.getObjectVolatile(a, j); |
847 |
– |
if (t == null) // inconsistent length |
848 |
– |
break; |
849 |
– |
else if (t == task) { |
850 |
– |
if (s + 1 == top) { // pop |
851 |
– |
if (!U.compareAndSwapObject(a, j, task, null)) |
852 |
– |
break; |
853 |
– |
top = s; |
854 |
– |
removed = true; |
855 |
– |
} |
856 |
– |
else if (base == b) // replace with proxy |
857 |
– |
removed = U.compareAndSwapObject(a, j, task, |
858 |
– |
new EmptyTask()); |
859 |
– |
break; |
860 |
– |
} |
861 |
– |
else if (t.status >= 0) |
862 |
– |
empty = false; |
863 |
– |
else if (s + 1 == top) { // pop and throw away |
864 |
– |
if (U.compareAndSwapObject(a, j, t, null)) |
865 |
– |
top = s; |
866 |
– |
break; |
867 |
– |
} |
868 |
– |
if (--n == 0) { |
869 |
– |
if (!empty && base == b) |
870 |
– |
progress = false; |
871 |
– |
break; |
872 |
– |
} |
873 |
– |
} |
874 |
– |
} |
875 |
– |
if (removed) |
876 |
– |
task.doExec(); |
877 |
– |
return progress; |
878 |
– |
} |
879 |
– |
|
880 |
– |
/** |
828 |
|
* Initializes or doubles the capacity of array. Call either |
829 |
|
* by owner or with lock held -- it is OK for base, but not |
830 |
|
* top, to move while resizings are in progress. |
886 |
|
// Execution methods |
887 |
|
|
888 |
|
/** |
889 |
< |
* Removes and runs tasks until empty, using local mode |
890 |
< |
* ordering. Normally called only after checking for apparent |
891 |
< |
* non-emptiness. |
892 |
< |
*/ |
893 |
< |
final void runLocalTasks() { |
894 |
< |
// hoist checks from repeated pop/poll |
895 |
< |
ForkJoinTask<?>[] a; int m; |
896 |
< |
if ((a = array) != null && (m = a.length - 1) >= 0) { |
897 |
< |
if (mode == 0) { |
898 |
< |
for (int s; (s = top - 1) - base >= 0;) { |
899 |
< |
int j = ((m & s) << ASHIFT) + ABASE; |
900 |
< |
ForkJoinTask<?> t = |
901 |
< |
(ForkJoinTask<?>)U.getObjectVolatile(a, j); |
955 |
< |
if (t != null) { |
956 |
< |
if (U.compareAndSwapObject(a, j, t, null)) { |
957 |
< |
top = s; |
958 |
< |
t.doExec(); |
959 |
< |
} |
960 |
< |
} |
961 |
< |
else |
962 |
< |
break; |
963 |
< |
} |
889 |
> |
* Pops and runs tasks until empty. |
890 |
> |
*/ |
891 |
> |
private void popAndExecAll() { |
892 |
> |
// A bit faster than repeated pop calls |
893 |
> |
ForkJoinTask<?>[] a; int m, s; long j; ForkJoinTask<?> t; |
894 |
> |
while ((a = array) != null && (m = a.length - 1) >= 0 && |
895 |
> |
(s = top - 1) - base >= 0 && |
896 |
> |
(t = ((ForkJoinTask<?>) |
897 |
> |
U.getObject(a, j = ((m & s) << ASHIFT) + ABASE))) |
898 |
> |
!= null) { |
899 |
> |
if (U.compareAndSwapObject(a, j, t, null)) { |
900 |
> |
top = s; |
901 |
> |
t.doExec(); |
902 |
|
} |
903 |
< |
else { |
904 |
< |
for (int b; (b = base) - top < 0;) { |
905 |
< |
int j = ((m & b) << ASHIFT) + ABASE; |
906 |
< |
ForkJoinTask<?> t = |
907 |
< |
(ForkJoinTask<?>)U.getObjectVolatile(a, j); |
908 |
< |
if (t != null) { |
909 |
< |
if (base == b && |
910 |
< |
U.compareAndSwapObject(a, j, t, null)) { |
911 |
< |
base = b + 1; |
912 |
< |
t.doExec(); |
913 |
< |
} |
914 |
< |
} else if (base == b) { |
915 |
< |
if (b + 1 == top) |
903 |
> |
} |
904 |
> |
} |
905 |
> |
|
906 |
> |
/** |
907 |
> |
* Polls and runs tasks until empty. |
908 |
> |
*/ |
909 |
> |
private void pollAndExecAll() { |
910 |
> |
for (ForkJoinTask<?> t; (t = poll()) != null;) |
911 |
> |
t.doExec(); |
912 |
> |
} |
913 |
> |
|
914 |
> |
/** |
915 |
> |
* If present, removes from queue and executes the given task, or |
916 |
> |
* any other cancelled task. Returns (true) immediately on any CAS |
917 |
> |
* or consistency check failure so caller can retry. |
918 |
> |
* |
919 |
> |
* @return 0 if no progress can be made, else positive |
920 |
> |
* (this unusual convention simplifies use with tryHelpStealer.) |
921 |
> |
*/ |
922 |
> |
final int tryRemoveAndExec(ForkJoinTask<?> task) { |
923 |
> |
int stat = 1; |
924 |
> |
boolean removed = false, empty = true; |
925 |
> |
ForkJoinTask<?>[] a; int m, s, b, n; |
926 |
> |
if ((a = array) != null && (m = a.length - 1) >= 0 && |
927 |
> |
(n = (s = top) - (b = base)) > 0) { |
928 |
> |
for (ForkJoinTask<?> t;;) { // traverse from s to b |
929 |
> |
int j = ((--s & m) << ASHIFT) + ABASE; |
930 |
> |
t = (ForkJoinTask<?>)U.getObjectVolatile(a, j); |
931 |
> |
if (t == null) // inconsistent length |
932 |
> |
break; |
933 |
> |
else if (t == task) { |
934 |
> |
if (s + 1 == top) { // pop |
935 |
> |
if (!U.compareAndSwapObject(a, j, task, null)) |
936 |
|
break; |
937 |
< |
Thread.yield(); // wait for lagging update |
937 |
> |
top = s; |
938 |
> |
removed = true; |
939 |
|
} |
940 |
+ |
else if (base == b) // replace with proxy |
941 |
+ |
removed = U.compareAndSwapObject(a, j, task, |
942 |
+ |
new EmptyTask()); |
943 |
+ |
break; |
944 |
+ |
} |
945 |
+ |
else if (t.status >= 0) |
946 |
+ |
empty = false; |
947 |
+ |
else if (s + 1 == top) { // pop and throw away |
948 |
+ |
if (U.compareAndSwapObject(a, j, t, null)) |
949 |
+ |
top = s; |
950 |
+ |
break; |
951 |
+ |
} |
952 |
+ |
if (--n == 0) { |
953 |
+ |
if (!empty && base == b) |
954 |
+ |
stat = 0; |
955 |
+ |
break; |
956 |
|
} |
957 |
|
} |
958 |
|
} |
959 |
+ |
if (removed) |
960 |
+ |
task.doExec(); |
961 |
+ |
return stat; |
962 |
|
} |
963 |
|
|
964 |
|
/** |
965 |
|
* Executes a top-level task and any local tasks remaining |
966 |
|
* after execution. |
989 |
– |
* |
990 |
– |
* @return true unless terminating |
967 |
|
*/ |
968 |
< |
final boolean runTask(ForkJoinTask<?> t) { |
993 |
< |
boolean alive = true; |
968 |
> |
final void runTask(ForkJoinTask<?> t) { |
969 |
|
if (t != null) { |
970 |
|
currentSteal = t; |
971 |
|
t.doExec(); |
972 |
< |
if (top != base) // conservative guard |
973 |
< |
runLocalTasks(); |
972 |
> |
if (top != base) { // process remaining local tasks |
973 |
> |
if (mode == 0) |
974 |
> |
popAndExecAll(); |
975 |
> |
else |
976 |
> |
pollAndExecAll(); |
977 |
> |
} |
978 |
|
++nsteals; |
979 |
|
currentSteal = null; |
980 |
|
} |
1002 |
– |
else if (runState < 0) // terminating |
1003 |
– |
alive = false; |
1004 |
– |
return alive; |
981 |
|
} |
982 |
|
|
983 |
|
/** |
1082 |
|
public static final ForkJoinWorkerThreadFactory |
1083 |
|
defaultForkJoinWorkerThreadFactory; |
1084 |
|
|
1085 |
+ |
|
1086 |
+ |
/** Property prefix for constructing common pool */ |
1087 |
+ |
private static final String propPrefix = |
1088 |
+ |
"java.util.concurrent.ForkJoinPool.common."; |
1089 |
+ |
|
1090 |
+ |
/** |
1091 |
+ |
* Common (static) pool. Non-null for public use unless a static |
1092 |
+ |
* construction exception, but internal usages must null-check on |
1093 |
+ |
* use. |
1094 |
+ |
*/ |
1095 |
+ |
static final ForkJoinPool commonPool; |
1096 |
+ |
|
1097 |
+ |
/** |
1098 |
+ |
* Common pool parallelism. Must equal commonPool.parallelism. |
1099 |
+ |
*/ |
1100 |
+ |
static final int commonPoolParallelism; |
1101 |
+ |
|
1102 |
|
/** |
1103 |
|
* Generator for assigning sequence numbers as pool names. |
1104 |
|
*/ |
1117 |
|
private static final RuntimePermission modifyThreadPermission; |
1118 |
|
|
1119 |
|
/** |
1120 |
< |
* Per-thread submission bookeeping. Shared across all pools |
1120 |
> |
* Per-thread submission bookkeeping. Shared across all pools |
1121 |
|
* to reduce ThreadLocal pollution and because random motion |
1122 |
|
* to avoid contention in one pool is likely to hold for others. |
1123 |
|
*/ |
1126 |
|
// static constants |
1127 |
|
|
1128 |
|
/** |
1129 |
< |
* The wakeup interval (in nanoseconds) for a worker waiting for a |
1130 |
< |
* task when the pool is quiescent to instead try to shrink the |
1131 |
< |
* number of workers. The exact value does not matter too |
1139 |
< |
* much. It must be short enough to release resources during |
1140 |
< |
* sustained periods of idleness, but not so short that threads |
1141 |
< |
* are continually re-created. |
1129 |
> |
* Initial timeout value (in nanoseconds) for the thread triggering |
1130 |
> |
* quiescence to park waiting for new work. On timeout, the thread |
1131 |
> |
* will instead try to shrink the number of workers. |
1132 |
|
*/ |
1133 |
< |
private static final long SHRINK_RATE = |
1144 |
< |
4L * 1000L * 1000L * 1000L; // 4 seconds |
1133 |
> |
private static final long IDLE_TIMEOUT = 1000L * 1000L * 1000L; // 1sec |
1134 |
|
|
1135 |
|
/** |
1136 |
< |
* The timeout value for attempted shrinkage, includes |
1148 |
< |
* some slop to cope with system timer imprecision. |
1136 |
> |
* Timeout value when there are more threads than parallelism level |
1137 |
|
*/ |
1138 |
< |
private static final long SHRINK_TIMEOUT = SHRINK_RATE - (SHRINK_RATE / 10); |
1138 |
> |
private static final long FAST_IDLE_TIMEOUT = 100L * 1000L * 1000L; |
1139 |
|
|
1140 |
|
/** |
1141 |
|
* The maximum stolen->joining link depth allowed in method |
1148 |
|
* traversal parameters at the expense of sometimes blocking when |
1149 |
|
* we could be helping. |
1150 |
|
*/ |
1151 |
< |
private static final int MAX_HELP = 32; |
1151 |
> |
private static final int MAX_HELP = 64; |
1152 |
|
|
1153 |
|
/** |
1154 |
|
* Secondary time-based bound (in nanosecs) for helping attempts |
1158 |
|
* value should roughly approximate the time required to create |
1159 |
|
* and/or activate a worker thread. |
1160 |
|
*/ |
1161 |
< |
private static final long COMPENSATION_DELAY = 100L * 1000L; // 0.1 millisec |
1161 |
> |
private static final long COMPENSATION_DELAY = 1L << 18; // ~0.25 millisec |
1162 |
|
|
1163 |
|
/** |
1164 |
|
* Increment for seed generators. See class ThreadLocal for |
1255 |
|
* empirically works OK on current JVMs. |
1256 |
|
*/ |
1257 |
|
|
1258 |
+ |
volatile long stealCount; // collects worker counts |
1259 |
|
volatile long ctl; // main pool control |
1260 |
|
final int parallelism; // parallelism level |
1261 |
|
final int localMode; // per-worker scheduling mode |
1262 |
+ |
volatile int nextWorkerNumber; // to create worker name string |
1263 |
|
final int submitMask; // submit queue index bound |
1264 |
|
int nextSeed; // for initializing worker seeds |
1265 |
+ |
volatile int mainLock; // spinlock for array updates |
1266 |
|
volatile int runState; // shutdown status and seq |
1267 |
|
WorkQueue[] workQueues; // main registry |
1277 |
– |
final Mutex lock; // for registration |
1278 |
– |
final Condition termination; // for awaitTermination |
1268 |
|
final ForkJoinWorkerThreadFactory factory; // factory for new workers |
1269 |
|
final Thread.UncaughtExceptionHandler ueh; // per-worker UEH |
1281 |
– |
final AtomicLong stealCount; // collect counts when terminated |
1282 |
– |
final AtomicInteger nextWorkerNumber; // to create worker name string |
1270 |
|
final String workerNamePrefix; // to create worker name string |
1271 |
|
|
1272 |
+ |
/* |
1273 |
+ |
* Mechanics for main lock protecting worker array updates. Uses |
1274 |
+ |
* the same strategy as ConcurrentHashMap bins -- a spinLock for |
1275 |
+ |
* normal cases, but falling back to builtin lock when (rarely) |
1276 |
+ |
* needed. See internal ConcurrentHashMap documentation for |
1277 |
+ |
* explanation. |
1278 |
+ |
*/ |
1279 |
+ |
|
1280 |
+ |
static final int LOCK_WAITING = 2; // bit to indicate need for signal |
1281 |
+ |
static final int MAX_LOCK_SPINS = 1 << 8; |
1282 |
+ |
|
1283 |
+ |
private void tryAwaitMainLock() { |
1284 |
+ |
int spins = MAX_LOCK_SPINS, r = 0, h; |
1285 |
+ |
while (((h = mainLock) & 1) != 0) { |
1286 |
+ |
if (r == 0) |
1287 |
+ |
r = ThreadLocalRandom.current().nextInt(); // randomize spins |
1288 |
+ |
else if (spins >= 0) { |
1289 |
+ |
r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift |
1290 |
+ |
if (r >= 0) |
1291 |
+ |
--spins; |
1292 |
+ |
} |
1293 |
+ |
else if (U.compareAndSwapInt(this, MAINLOCK, h, h | LOCK_WAITING)) { |
1294 |
+ |
synchronized (this) { |
1295 |
+ |
if ((mainLock & LOCK_WAITING) != 0) { |
1296 |
+ |
try { |
1297 |
+ |
wait(); |
1298 |
+ |
} catch (InterruptedException ie) { |
1299 |
+ |
Thread.currentThread().interrupt(); |
1300 |
+ |
} |
1301 |
+ |
} |
1302 |
+ |
else |
1303 |
+ |
notifyAll(); // possibly won race vs signaller |
1304 |
+ |
} |
1305 |
+ |
break; |
1306 |
+ |
} |
1307 |
+ |
} |
1308 |
+ |
} |
1309 |
+ |
|
1310 |
|
// Creating, registering, and deregistering workers |
1311 |
|
|
1312 |
|
/** |
1333 |
|
* ForkJoinWorkerThread. |
1334 |
|
*/ |
1335 |
|
final String nextWorkerName() { |
1336 |
< |
return workerNamePrefix.concat |
1337 |
< |
(Integer.toString(nextWorkerNumber.addAndGet(1))); |
1336 |
> |
int n; |
1337 |
> |
do {} while(!U.compareAndSwapInt(this, NEXTWORKERNUMBER, |
1338 |
> |
n = nextWorkerNumber, ++n)); |
1339 |
> |
return workerNamePrefix.concat(Integer.toString(n)); |
1340 |
|
} |
1341 |
|
|
1342 |
|
/** |
1349 |
|
* @param w the worker's queue |
1350 |
|
*/ |
1351 |
|
final void registerWorker(WorkQueue w) { |
1352 |
< |
Mutex lock = this.lock; |
1353 |
< |
lock.lock(); |
1352 |
> |
while (!U.compareAndSwapInt(this, MAINLOCK, 0, 1)) |
1353 |
> |
tryAwaitMainLock(); |
1354 |
|
try { |
1355 |
< |
WorkQueue[] ws = workQueues; |
1356 |
< |
if (w != null && ws != null) { // skip on shutdown/failure |
1357 |
< |
int rs, n; |
1358 |
< |
while ((n = ws.length) < // ensure can hold total |
1359 |
< |
(parallelism + (short)(ctl >>> TC_SHIFT) << 1)) |
1333 |
< |
workQueues = ws = Arrays.copyOf(ws, n << 1); |
1334 |
< |
int m = n - 1; |
1355 |
> |
WorkQueue[] ws; |
1356 |
> |
if ((ws = workQueues) == null) |
1357 |
> |
ws = workQueues = new WorkQueue[submitMask + 1]; |
1358 |
> |
if (w != null) { |
1359 |
> |
int rs, n = ws.length, m = n - 1; |
1360 |
|
int s = nextSeed += SEED_INCREMENT; // rarely-colliding sequence |
1361 |
|
w.seed = (s == 0) ? 1 : s; // ensure non-zero seed |
1362 |
|
int r = (s << 1) | 1; // use odd-numbered indices |
1363 |
< |
while (ws[r &= m] != null) // step by approx half size |
1364 |
< |
r += ((n >>> 1) & SQMASK) + 2; |
1363 |
> |
if (ws[r &= m] != null) { // collision |
1364 |
> |
int probes = 0; // step by approx half size |
1365 |
> |
int step = (n <= 4) ? 2 : ((n >>> 1) & SQMASK) + 2; |
1366 |
> |
while (ws[r = (r + step) & m] != null) { |
1367 |
> |
if (++probes >= n) { |
1368 |
> |
workQueues = ws = Arrays.copyOf(ws, n <<= 1); |
1369 |
> |
m = n - 1; |
1370 |
> |
probes = 0; |
1371 |
> |
} |
1372 |
> |
} |
1373 |
> |
} |
1374 |
|
w.eventCount = w.poolIndex = r; // establish before recording |
1375 |
|
ws[r] = w; // also update seq |
1376 |
|
runState = ((rs = runState) & SHUTDOWN) | ((rs + 2) & ~SHUTDOWN); |
1377 |
|
} |
1378 |
|
} finally { |
1379 |
< |
lock.unlock(); |
1379 |
> |
if (!U.compareAndSwapInt(this, MAINLOCK, 1, 0)) { |
1380 |
> |
mainLock = 0; |
1381 |
> |
synchronized (this) { notifyAll(); }; |
1382 |
> |
} |
1383 |
|
} |
1384 |
+ |
|
1385 |
|
} |
1386 |
|
|
1387 |
|
/** |
1394 |
|
* @param ex the exception causing failure, or null if none |
1395 |
|
*/ |
1396 |
|
final void deregisterWorker(ForkJoinWorkerThread wt, Throwable ex) { |
1359 |
– |
Mutex lock = this.lock; |
1397 |
|
WorkQueue w = null; |
1398 |
|
if (wt != null && (w = wt.workQueue) != null) { |
1399 |
|
w.runState = -1; // ensure runState is set |
1400 |
< |
stealCount.getAndAdd(w.totalSteals + w.nsteals); |
1400 |
> |
long steals = w.totalSteals + w.nsteals, sc; |
1401 |
> |
do {} while(!U.compareAndSwapLong(this, STEALCOUNT, |
1402 |
> |
sc = stealCount, sc + steals)); |
1403 |
|
int idx = w.poolIndex; |
1404 |
< |
lock.lock(); |
1405 |
< |
try { // remove record from array |
1404 |
> |
while (!U.compareAndSwapInt(this, MAINLOCK, 0, 1)) |
1405 |
> |
tryAwaitMainLock(); |
1406 |
> |
try { |
1407 |
|
WorkQueue[] ws = workQueues; |
1408 |
|
if (ws != null && idx >= 0 && idx < ws.length && ws[idx] == w) |
1409 |
|
ws[idx] = null; |
1410 |
|
} finally { |
1411 |
< |
lock.unlock(); |
1411 |
> |
if (!U.compareAndSwapInt(this, MAINLOCK, 1, 0)) { |
1412 |
> |
mainLock = 0; |
1413 |
> |
synchronized (this) { notifyAll(); }; |
1414 |
> |
} |
1415 |
|
} |
1416 |
|
} |
1417 |
|
|
1433 |
|
U.throwException(ex); |
1434 |
|
} |
1435 |
|
|
1393 |
– |
|
1436 |
|
// Submissions |
1437 |
|
|
1438 |
|
/** |
1441 |
|
* range). If no queue exists at the index, one is created. If |
1442 |
|
* the queue is busy, another index is randomly chosen. The |
1443 |
|
* submitMask bounds the effective number of queues to the |
1444 |
< |
* (nearest poswer of two for) parallelism level. |
1444 |
> |
* (nearest power of two for) parallelism level. |
1445 |
|
* |
1446 |
|
* @param task the task. Caller must ensure non-null. |
1447 |
|
*/ |
1450 |
|
for (int r = s.seed, m = submitMask;;) { |
1451 |
|
WorkQueue[] ws; WorkQueue q; |
1452 |
|
int k = r & m & SQMASK; // use only even indices |
1453 |
< |
if (runState < 0 || (ws = workQueues) == null || ws.length <= k) |
1453 |
> |
if (runState < 0) |
1454 |
|
throw new RejectedExecutionException(); // shutting down |
1455 |
+ |
else if ((ws = workQueues) == null || ws.length <= k) { |
1456 |
+ |
while (!U.compareAndSwapInt(this, MAINLOCK, 0, 1)) |
1457 |
+ |
tryAwaitMainLock(); |
1458 |
+ |
try { |
1459 |
+ |
if (workQueues == null) |
1460 |
+ |
workQueues = new WorkQueue[submitMask + 1]; |
1461 |
+ |
} finally { |
1462 |
+ |
if (!U.compareAndSwapInt(this, MAINLOCK, 1, 0)) { |
1463 |
+ |
mainLock = 0; |
1464 |
+ |
synchronized (this) { notifyAll(); }; |
1465 |
+ |
} |
1466 |
+ |
} |
1467 |
+ |
} |
1468 |
|
else if ((q = ws[k]) == null) { // create new queue |
1469 |
|
WorkQueue nq = new WorkQueue(this, null, SHARED_QUEUE); |
1470 |
< |
Mutex lock = this.lock; // construct outside lock |
1471 |
< |
lock.lock(); |
1472 |
< |
try { // recheck under lock |
1470 |
> |
while (!U.compareAndSwapInt(this, MAINLOCK, 0, 1)) |
1471 |
> |
tryAwaitMainLock(); |
1472 |
> |
try { |
1473 |
|
int rs = runState; // to update seq |
1474 |
|
if (ws == workQueues && ws[k] == null) { |
1475 |
|
ws[k] = nq; |
1476 |
|
runState = ((rs & SHUTDOWN) | ((rs + 2) & ~SHUTDOWN)); |
1477 |
|
} |
1478 |
|
} finally { |
1479 |
< |
lock.unlock(); |
1479 |
> |
if (!U.compareAndSwapInt(this, MAINLOCK, 1, 0)) { |
1480 |
> |
mainLock = 0; |
1481 |
> |
synchronized (this) { notifyAll(); }; |
1482 |
> |
} |
1483 |
|
} |
1484 |
|
} |
1485 |
|
else if (q.trySharedPush(task)) { |
1496 |
|
} |
1497 |
|
} |
1498 |
|
|
1499 |
+ |
/** |
1500 |
+ |
* Submits the given (non-null) task to the common pool, if possible. |
1501 |
+ |
*/ |
1502 |
+ |
static void submitToCommonPool(ForkJoinTask<?> task) { |
1503 |
+ |
ForkJoinPool p; |
1504 |
+ |
if ((p = commonPool) == null) |
1505 |
+ |
throw new RejectedExecutionException("Common Pool Unavailable"); |
1506 |
+ |
p.doSubmit(task); |
1507 |
+ |
} |
1508 |
+ |
|
1509 |
+ |
/** |
1510 |
+ |
* Returns true if the given task was submitted to common pool |
1511 |
+ |
* and has not yet commenced execution, and is available for |
1512 |
+ |
* removal according to execution policies; if so removing the |
1513 |
+ |
* submission from the pool. |
1514 |
+ |
* |
1515 |
+ |
* @param task the task |
1516 |
+ |
* @return true if successful |
1517 |
+ |
*/ |
1518 |
+ |
static boolean tryUnsubmitFromCommonPool(ForkJoinTask<?> task) { |
1519 |
+ |
// Peek, looking for task and eligibility before |
1520 |
+ |
// using trySharedUnpush to actually take it under lock |
1521 |
+ |
ForkJoinPool p; WorkQueue[] ws; WorkQueue q; |
1522 |
+ |
ForkJoinTask<?>[] a; int t, s, n; |
1523 |
+ |
int k = submitters.get().seed & SQMASK; |
1524 |
+ |
return ((p = commonPool) != null && |
1525 |
+ |
(ws = p.workQueues) != null && |
1526 |
+ |
ws.length > (k &= p.submitMask) && |
1527 |
+ |
(q = ws[k]) != null && |
1528 |
+ |
(a = q.array) != null && |
1529 |
+ |
(n = (t = q.top) - q.base) > 0 && |
1530 |
+ |
(n > 1 || (int)(p.ctl >> AC_SHIFT) < 0) && |
1531 |
+ |
(s = t - 1) >= 0 && s < a.length && a[s] == task && |
1532 |
+ |
q.trySharedUnpush(task)); |
1533 |
+ |
} |
1534 |
+ |
|
1535 |
|
// Maintaining ctl counts |
1536 |
|
|
1537 |
|
/** |
1543 |
|
} |
1544 |
|
|
1545 |
|
/** |
1546 |
< |
* Tries to activate or create a worker if too few are active. |
1546 |
> |
* Tries to create one or activate one or more workers if too few are active. |
1547 |
|
*/ |
1548 |
|
final void signalWork() { |
1549 |
|
long c; int u; |
1577 |
|
} |
1578 |
|
} |
1579 |
|
|
1486 |
– |
|
1580 |
|
// Scanning for tasks |
1581 |
|
|
1582 |
|
/** |
1584 |
|
*/ |
1585 |
|
final void runWorker(WorkQueue w) { |
1586 |
|
w.growArray(false); // initialize queue array in this thread |
1587 |
< |
do {} while (w.runTask(scan(w))); |
1587 |
> |
do { w.runTask(scan(w)); } while (w.runState >= 0); |
1588 |
|
} |
1589 |
|
|
1590 |
|
/** |
1627 |
|
* awaiting signal, |
1628 |
|
* |
1629 |
|
* @param w the worker (via its WorkQueue) |
1630 |
< |
* @return a task or null of none found |
1630 |
> |
* @return a task or null if none found |
1631 |
|
*/ |
1632 |
|
private final ForkJoinTask<?> scan(WorkQueue w) { |
1633 |
|
WorkQueue[] ws; // first update random seed |
1644 |
|
t = (ForkJoinTask<?>)U.getObjectVolatile(a, i); |
1645 |
|
if (q.base == b && ec >= 0 && t != null && |
1646 |
|
U.compareAndSwapObject(a, i, t, null)) { |
1647 |
< |
q.base = b + 1; // specialization of pollAt |
1647 |
> |
if (q.top - (q.base = b + 1) > 0) |
1648 |
> |
signalWork(); // help pushes signal |
1649 |
|
return t; |
1650 |
|
} |
1651 |
< |
else if ((t != null || b + 1 != q.top) && |
1558 |
< |
(ec < 0 || j <= m)) { |
1651 |
> |
else if (ec < 0 || j <= m) { |
1652 |
|
rs = 0; // mark scan as imcomplete |
1653 |
|
break; // caller can retry after release |
1654 |
|
} |
1656 |
|
if (--j < 0) |
1657 |
|
break; |
1658 |
|
} |
1659 |
+ |
|
1660 |
|
long c = ctl; int e = (int)c, a = (int)(c >> AC_SHIFT), nr, ns; |
1661 |
|
if (e < 0) // decode ctl on empty scan |
1662 |
|
w.runState = -1; // pool is terminating |
1682 |
|
else { |
1683 |
|
if ((ns = w.nsteals) != 0) { |
1684 |
|
w.nsteals = 0; // set rescans if ran task |
1685 |
< |
w.rescans = (a > 0)? 0 : a + parallelism; |
1685 |
> |
w.rescans = (a > 0) ? 0 : a + parallelism; |
1686 |
|
w.totalSteals += ns; |
1687 |
|
} |
1688 |
|
if (a == 1 - parallelism) // quiescent |
1690 |
|
} |
1691 |
|
} |
1692 |
|
else if (w.eventCount < 0) { // already queued |
1693 |
< |
if ((nr = w.rescans) > 0) { // continue rescanning |
1694 |
< |
int ac = a + parallelism; |
1695 |
< |
if (((w.rescans = (ac < nr) ? ac : nr - 1) & 3) == 0) |
1696 |
< |
Thread.yield(); // yield before block |
1603 |
< |
} |
1604 |
< |
else { |
1693 |
> |
int ac = a + parallelism; |
1694 |
> |
if ((nr = w.rescans) > 0) // continue rescanning |
1695 |
> |
w.rescans = (ac < nr) ? ac : nr - 1; |
1696 |
> |
else if (((w.seed >>> 16) & ac) == 0) { // randomize park |
1697 |
|
Thread.interrupted(); // clear status |
1698 |
|
Thread wt = Thread.currentThread(); |
1699 |
|
U.putObject(wt, PARKBLOCKER, this); |
1711 |
|
/** |
1712 |
|
* If inactivating worker w has caused the pool to become |
1713 |
|
* quiescent, checks for pool termination, and, so long as this is |
1714 |
< |
* not the only worker, waits for event for up to SHRINK_RATE |
1715 |
< |
* nanosecs. On timeout, if ctl has not changed, terminates the |
1714 |
> |
* not the only worker, waits for event for up to a given |
1715 |
> |
* duration. On timeout, if ctl has not changed, terminates the |
1716 |
|
* worker, which will in turn wake up another worker to possibly |
1717 |
|
* repeat this process. |
1718 |
|
* |
1722 |
|
*/ |
1723 |
|
private void idleAwaitWork(WorkQueue w, long currentCtl, long prevCtl) { |
1724 |
|
if (w.eventCount < 0 && !tryTerminate(false, false) && |
1725 |
< |
(int)prevCtl != 0 && ctl == currentCtl) { |
1725 |
> |
(int)prevCtl != 0 && !hasQueuedSubmissions() && ctl == currentCtl) { |
1726 |
> |
int dc = -(short)(currentCtl >>> TC_SHIFT); |
1727 |
> |
long parkTime = dc < 0 ? FAST_IDLE_TIMEOUT: (dc + 1) * IDLE_TIMEOUT; |
1728 |
> |
long deadline = System.nanoTime() + parkTime - 100000L; // 1ms slop |
1729 |
|
Thread wt = Thread.currentThread(); |
1635 |
– |
Thread.yield(); // yield before block |
1730 |
|
while (ctl == currentCtl) { |
1637 |
– |
long startTime = System.nanoTime(); |
1731 |
|
Thread.interrupted(); // timed variant of version in scan() |
1732 |
|
U.putObject(wt, PARKBLOCKER, this); |
1733 |
|
w.parker = wt; |
1734 |
|
if (ctl == currentCtl) |
1735 |
< |
U.park(false, SHRINK_RATE); |
1735 |
> |
U.park(false, parkTime); |
1736 |
|
w.parker = null; |
1737 |
|
U.putObject(wt, PARKBLOCKER, null); |
1738 |
|
if (ctl != currentCtl) |
1739 |
|
break; |
1740 |
< |
if (System.nanoTime() - startTime >= SHRINK_TIMEOUT && |
1740 |
> |
if (deadline - System.nanoTime() <= 0L && |
1741 |
|
U.compareAndSwapLong(this, CTL, currentCtl, prevCtl)) { |
1742 |
|
w.eventCount = (w.eventCount + E_SEQ) | E_MASK; |
1743 |
|
w.runState = -1; // shrink |
1758 |
|
* leaves hints in workers to speed up subsequent calls. The |
1759 |
|
* implementation is very branchy to cope with potential |
1760 |
|
* inconsistencies or loops encountering chains that are stale, |
1761 |
< |
* unknown, or so long that they are likely cyclic. All of these |
1669 |
< |
* cases are dealt with by just retrying by caller. |
1761 |
> |
* unknown, or so long that they are likely cyclic. |
1762 |
|
* |
1763 |
|
* @param joiner the joining worker |
1764 |
|
* @param task the task to join |
1765 |
< |
* @return true if found or ran a task (and so is immediately retryable) |
1765 |
> |
* @return 0 if no progress can be made, negative if task |
1766 |
> |
* known complete, else positive |
1767 |
|
*/ |
1768 |
< |
private boolean tryHelpStealer(WorkQueue joiner, ForkJoinTask<?> task) { |
1769 |
< |
WorkQueue[] ws; |
1770 |
< |
int m, depth = MAX_HELP; // remaining chain depth |
1771 |
< |
boolean progress = false; |
1772 |
< |
if ((ws = workQueues) != null && (m = ws.length - 1) > 0 && |
1773 |
< |
task.status >= 0) { |
1774 |
< |
ForkJoinTask<?> subtask = task; // current target |
1775 |
< |
outer: for (WorkQueue j = joiner;;) { |
1776 |
< |
WorkQueue stealer = null; // find stealer of subtask |
1777 |
< |
WorkQueue v = ws[j.stealHint & m]; // try hint |
1778 |
< |
if (v != null && v.currentSteal == subtask) |
1779 |
< |
stealer = v; |
1780 |
< |
else { // scan |
1781 |
< |
for (int i = 1; i <= m; i += 2) { |
1782 |
< |
if ((v = ws[i]) != null && v.currentSteal == subtask && |
1783 |
< |
v != joiner) { |
1784 |
< |
stealer = v; |
1785 |
< |
j.stealHint = i; // save hint |
1786 |
< |
break; |
1768 |
> |
private int tryHelpStealer(WorkQueue joiner, ForkJoinTask<?> task) { |
1769 |
> |
int stat = 0, steps = 0; // bound to avoid cycles |
1770 |
> |
if (joiner != null && task != null) { // hoist null checks |
1771 |
> |
restart: for (;;) { |
1772 |
> |
ForkJoinTask<?> subtask = task; // current target |
1773 |
> |
for (WorkQueue j = joiner, v;;) { // v is stealer of subtask |
1774 |
> |
WorkQueue[] ws; int m, s, h; |
1775 |
> |
if ((s = task.status) < 0) { |
1776 |
> |
stat = s; |
1777 |
> |
break restart; |
1778 |
> |
} |
1779 |
> |
if ((ws = workQueues) == null || (m = ws.length - 1) <= 0) |
1780 |
> |
break restart; // shutting down |
1781 |
> |
if ((v = ws[h = (j.stealHint | 1) & m]) == null || |
1782 |
> |
v.currentSteal != subtask) { |
1783 |
> |
for (int origin = h;;) { // find stealer |
1784 |
> |
if (((h = (h + 2) & m) & 15) == 1 && |
1785 |
> |
(subtask.status < 0 || j.currentJoin != subtask)) |
1786 |
> |
continue restart; // occasional staleness check |
1787 |
> |
if ((v = ws[h]) != null && |
1788 |
> |
v.currentSteal == subtask) { |
1789 |
> |
j.stealHint = h; // save hint |
1790 |
> |
break; |
1791 |
> |
} |
1792 |
> |
if (h == origin) |
1793 |
> |
break restart; // cannot find stealer |
1794 |
|
} |
1795 |
|
} |
1796 |
< |
if (stealer == null) |
1797 |
< |
break; |
1798 |
< |
} |
1799 |
< |
|
1800 |
< |
for (WorkQueue q = stealer;;) { // try to help stealer |
1801 |
< |
ForkJoinTask[] a; ForkJoinTask<?> t; int b; |
1802 |
< |
if (task.status < 0) |
1803 |
< |
break outer; |
1804 |
< |
if ((b = q.base) - q.top < 0 && (a = q.array) != null) { |
1805 |
< |
progress = true; |
1806 |
< |
int i = (((a.length - 1) & b) << ASHIFT) + ABASE; |
1807 |
< |
t = (ForkJoinTask<?>)U.getObjectVolatile(a, i); |
1808 |
< |
if (subtask.status < 0) // must recheck before taking |
1809 |
< |
break outer; |
1810 |
< |
if (t != null && |
1811 |
< |
q.base == b && |
1812 |
< |
U.compareAndSwapObject(a, i, t, null)) { |
1813 |
< |
q.base = b + 1; |
1814 |
< |
joiner.runSubtask(t); |
1796 |
> |
for (;;) { // help stealer or descend to its stealer |
1797 |
> |
ForkJoinTask[] a; int b; |
1798 |
> |
if (subtask.status < 0) // surround probes with |
1799 |
> |
continue restart; // consistency checks |
1800 |
> |
if ((b = v.base) - v.top < 0 && (a = v.array) != null) { |
1801 |
> |
int i = (((a.length - 1) & b) << ASHIFT) + ABASE; |
1802 |
> |
ForkJoinTask<?> t = |
1803 |
> |
(ForkJoinTask<?>)U.getObjectVolatile(a, i); |
1804 |
> |
if (subtask.status < 0 || j.currentJoin != subtask || |
1805 |
> |
v.currentSteal != subtask) |
1806 |
> |
continue restart; // stale |
1807 |
> |
stat = 1; // apparent progress |
1808 |
> |
if (t != null && v.base == b && |
1809 |
> |
U.compareAndSwapObject(a, i, t, null)) { |
1810 |
> |
v.base = b + 1; // help stealer |
1811 |
> |
joiner.runSubtask(t); |
1812 |
> |
} |
1813 |
> |
else if (v.base == b && ++steps == MAX_HELP) |
1814 |
> |
break restart; // v apparently stalled |
1815 |
> |
} |
1816 |
> |
else { // empty -- try to descend |
1817 |
> |
ForkJoinTask<?> next = v.currentJoin; |
1818 |
> |
if (subtask.status < 0 || j.currentJoin != subtask || |
1819 |
> |
v.currentSteal != subtask) |
1820 |
> |
continue restart; // stale |
1821 |
> |
else if (next == null || ++steps == MAX_HELP) |
1822 |
> |
break restart; // dead-end or maybe cyclic |
1823 |
> |
else { |
1824 |
> |
subtask = next; |
1825 |
> |
j = v; |
1826 |
> |
break; |
1827 |
> |
} |
1828 |
|
} |
1716 |
– |
else if (q.base == b) |
1717 |
– |
break outer; // possibly stalled |
1718 |
– |
} |
1719 |
– |
else { // descend |
1720 |
– |
ForkJoinTask<?> next = stealer.currentJoin; |
1721 |
– |
if (--depth <= 0 || subtask.status < 0 || |
1722 |
– |
next == null || next == subtask) |
1723 |
– |
break outer; // stale, dead-end, or cyclic |
1724 |
– |
subtask = next; |
1725 |
– |
j = stealer; |
1726 |
– |
break; |
1829 |
|
} |
1830 |
|
} |
1831 |
|
} |
1832 |
|
} |
1833 |
< |
return progress; |
1833 |
> |
return stat; |
1834 |
|
} |
1835 |
|
|
1836 |
|
/** |
1859 |
|
* adds a new thread if no idle workers are available and either |
1860 |
|
* pool would become completely starved or: (at least half |
1861 |
|
* starved, and fewer than 50% spares exist, and there is at least |
1862 |
< |
* one task apparently available). Even though the availablity |
1862 |
> |
* one task apparently available). Even though the availability |
1863 |
|
* check requires a full scan, it is worthwhile in reducing false |
1864 |
|
* alarms. |
1865 |
|
* |
1866 |
< |
* @param task if nonnull, a task being waited for |
1867 |
< |
* @param blocker if nonnull, a blocker being waited for |
1866 |
> |
* @param task if non-null, a task being waited for |
1867 |
> |
* @param blocker if non-null, a blocker being waited for |
1868 |
|
* @return true if the caller can block, else should recheck and retry |
1869 |
|
*/ |
1870 |
|
final boolean tryCompensate(ForkJoinTask<?> task, ManagedBlocker blocker) { |
1923 |
|
} |
1924 |
|
|
1925 |
|
/** |
1926 |
< |
* Helps and/or blocks until the given task is done |
1926 |
> |
* Helps and/or blocks until the given task is done. |
1927 |
|
* |
1928 |
|
* @param joiner the joining worker |
1929 |
|
* @param task the task |
1930 |
|
* @return task status on exit |
1931 |
|
*/ |
1932 |
|
final int awaitJoin(WorkQueue joiner, ForkJoinTask<?> task) { |
1933 |
< |
ForkJoinTask<?> prevJoin = joiner.currentJoin; |
1934 |
< |
joiner.currentJoin = task; |
1935 |
< |
long startTime = 0L; |
1936 |
< |
for (int k = 0, s; ; ++k) { |
1937 |
< |
if ((joiner.isEmpty() ? // try to help |
1938 |
< |
!tryHelpStealer(joiner, task) : |
1939 |
< |
!joiner.tryRemoveAndExec(task))) { |
1940 |
< |
if (k == 0) { |
1941 |
< |
startTime = System.nanoTime(); |
1942 |
< |
tryPollForAndExec(joiner, task); // check uncommon case |
1943 |
< |
} |
1944 |
< |
else if ((k & (MAX_HELP - 1)) == 0 && |
1945 |
< |
System.nanoTime() - startTime >= COMPENSATION_DELAY && |
1946 |
< |
tryCompensate(task, null)) { |
1947 |
< |
if (task.trySetSignal() && task.status >= 0) { |
1948 |
< |
synchronized (task) { |
1949 |
< |
if (task.status >= 0) { |
1950 |
< |
try { // see ForkJoinTask |
1951 |
< |
task.wait(); // for explanation |
1952 |
< |
} catch (InterruptedException ie) { |
1933 |
> |
int s; |
1934 |
> |
if ((s = task.status) >= 0) { |
1935 |
> |
ForkJoinTask<?> prevJoin = joiner.currentJoin; |
1936 |
> |
joiner.currentJoin = task; |
1937 |
> |
long startTime = 0L; |
1938 |
> |
for (int k = 0;;) { |
1939 |
> |
if ((s = (joiner.isEmpty() ? // try to help |
1940 |
> |
tryHelpStealer(joiner, task) : |
1941 |
> |
joiner.tryRemoveAndExec(task))) == 0 && |
1942 |
> |
(s = task.status) >= 0) { |
1943 |
> |
if (k == 0) { |
1944 |
> |
startTime = System.nanoTime(); |
1945 |
> |
tryPollForAndExec(joiner, task); // check uncommon case |
1946 |
> |
} |
1947 |
> |
else if ((k & (MAX_HELP - 1)) == 0 && |
1948 |
> |
System.nanoTime() - startTime >= |
1949 |
> |
COMPENSATION_DELAY && |
1950 |
> |
tryCompensate(task, null)) { |
1951 |
> |
if (task.trySetSignal()) { |
1952 |
> |
synchronized (task) { |
1953 |
> |
if (task.status >= 0) { |
1954 |
> |
try { // see ForkJoinTask |
1955 |
> |
task.wait(); // for explanation |
1956 |
> |
} catch (InterruptedException ie) { |
1957 |
> |
} |
1958 |
|
} |
1959 |
+ |
else |
1960 |
+ |
task.notifyAll(); |
1961 |
|
} |
1853 |
– |
else |
1854 |
– |
task.notifyAll(); |
1962 |
|
} |
1963 |
+ |
long c; // re-activate |
1964 |
+ |
do {} while (!U.compareAndSwapLong |
1965 |
+ |
(this, CTL, c = ctl, c + AC_UNIT)); |
1966 |
|
} |
1857 |
– |
long c; // re-activate |
1858 |
– |
do {} while (!U.compareAndSwapLong |
1859 |
– |
(this, CTL, c = ctl, c + AC_UNIT)); |
1967 |
|
} |
1968 |
+ |
if (s < 0 || (s = task.status) < 0) { |
1969 |
+ |
joiner.currentJoin = prevJoin; |
1970 |
+ |
break; |
1971 |
+ |
} |
1972 |
+ |
else if ((k++ & (MAX_HELP - 1)) == MAX_HELP >>> 1) |
1973 |
+ |
Thread.yield(); // for politeness |
1974 |
|
} |
1862 |
– |
if ((s = task.status) < 0) { |
1863 |
– |
joiner.currentJoin = prevJoin; |
1864 |
– |
return s; |
1865 |
– |
} |
1866 |
– |
else if ((k & (MAX_HELP - 1)) == MAX_HELP >>> 1) |
1867 |
– |
Thread.yield(); // for politeness |
1975 |
|
} |
1976 |
+ |
return s; |
1977 |
|
} |
1978 |
|
|
1979 |
|
/** |
1990 |
|
while ((s = task.status) >= 0 && |
1991 |
|
(joiner.isEmpty() ? |
1992 |
|
tryHelpStealer(joiner, task) : |
1993 |
< |
joiner.tryRemoveAndExec(task))) |
1993 |
> |
joiner.tryRemoveAndExec(task)) != 0) |
1994 |
|
; |
1995 |
|
return s; |
1996 |
|
} |
2030 |
|
*/ |
2031 |
|
final void helpQuiescePool(WorkQueue w) { |
2032 |
|
for (boolean active = true;;) { |
2033 |
< |
if (w.base - w.top < 0) |
2034 |
< |
w.runLocalTasks(); // exhaust local queue |
2033 |
> |
ForkJoinTask<?> localTask; // exhaust local queue |
2034 |
> |
while ((localTask = w.nextLocalTask()) != null) |
2035 |
> |
localTask.doExec(); |
2036 |
|
WorkQueue q = findNonEmptyStealQueue(w); |
2037 |
|
if (q != null) { |
2038 |
|
ForkJoinTask<?> t; int b; |
2064 |
|
} |
2065 |
|
|
2066 |
|
/** |
2067 |
+ |
* Restricted version of helpQuiescePool for non-FJ callers |
2068 |
+ |
*/ |
2069 |
+ |
static void externalHelpQuiescePool() { |
2070 |
+ |
ForkJoinPool p; WorkQueue[] ws; WorkQueue w, q; |
2071 |
+ |
ForkJoinTask<?> t; int b; |
2072 |
+ |
int k = submitters.get().seed & SQMASK; |
2073 |
+ |
if ((p = commonPool) != null && |
2074 |
+ |
(ws = p.workQueues) != null && |
2075 |
+ |
ws.length > (k &= p.submitMask) && |
2076 |
+ |
(w = ws[k]) != null && |
2077 |
+ |
(q = p.findNonEmptyStealQueue(w)) != null && |
2078 |
+ |
(b = q.base) - q.top < 0 && |
2079 |
+ |
(t = q.pollAt(b)) != null) |
2080 |
+ |
t.doExec(); |
2081 |
+ |
} |
2082 |
+ |
|
2083 |
+ |
/** |
2084 |
|
* Gets and removes a local or stolen task for the given worker. |
2085 |
|
* |
2086 |
|
* @return a task, if available |
2113 |
|
8); |
2114 |
|
} |
2115 |
|
|
2116 |
+ |
/** |
2117 |
+ |
* Returns approximate submission queue length for the given caller |
2118 |
+ |
*/ |
2119 |
+ |
static int getEstimatedSubmitterQueueLength() { |
2120 |
+ |
ForkJoinPool p; WorkQueue[] ws; WorkQueue q; |
2121 |
+ |
int k = submitters.get().seed & SQMASK; |
2122 |
+ |
return ((p = commonPool) != null && |
2123 |
+ |
p.runState >= 0 && |
2124 |
+ |
(ws = p.workQueues) != null && |
2125 |
+ |
ws.length > (k &= p.submitMask) && |
2126 |
+ |
(q = ws[k]) != null) ? |
2127 |
+ |
q.queueSize() : 0; |
2128 |
+ |
} |
2129 |
+ |
|
2130 |
|
// Termination |
2131 |
|
|
2132 |
|
/** |
2144 |
|
* @return true if now terminating or terminated |
2145 |
|
*/ |
2146 |
|
private boolean tryTerminate(boolean now, boolean enable) { |
2007 |
– |
Mutex lock = this.lock; |
2147 |
|
for (long c;;) { |
2148 |
|
if (((c = ctl) & STOP_BIT) != 0) { // already terminating |
2149 |
|
if ((short)(c >>> TC_SHIFT) == -parallelism) { |
2150 |
< |
lock.lock(); // don't need try/finally |
2151 |
< |
termination.signalAll(); // signal when 0 workers |
2152 |
< |
lock.unlock(); |
2150 |
> |
synchronized(this) { |
2151 |
> |
notifyAll(); // signal when 0 workers |
2152 |
> |
} |
2153 |
|
} |
2154 |
|
return true; |
2155 |
|
} |
2156 |
|
if (runState >= 0) { // not yet enabled |
2157 |
|
if (!enable) |
2158 |
|
return false; |
2159 |
< |
lock.lock(); |
2160 |
< |
runState |= SHUTDOWN; |
2161 |
< |
lock.unlock(); |
2159 |
> |
while (!U.compareAndSwapInt(this, MAINLOCK, 0, 1)) |
2160 |
> |
tryAwaitMainLock(); |
2161 |
> |
try { |
2162 |
> |
runState |= SHUTDOWN; |
2163 |
> |
} finally { |
2164 |
> |
if (!U.compareAndSwapInt(this, MAINLOCK, 1, 0)) { |
2165 |
> |
mainLock = 0; |
2166 |
> |
synchronized (this) { notifyAll(); }; |
2167 |
> |
} |
2168 |
> |
} |
2169 |
|
} |
2170 |
|
if (!now) { // check if idle & no tasks |
2171 |
|
if ((int)(c >> AC_SHIFT) != -parallelism || |
2298 |
|
// Use nearest power 2 for workQueues size. See Hackers Delight sec 3.2. |
2299 |
|
int n = parallelism - 1; |
2300 |
|
n |= n >>> 1; n |= n >>> 2; n |= n >>> 4; n |= n >>> 8; n |= n >>> 16; |
2301 |
< |
int size = (n + 1) << 1; // #slots = 2*#workers |
2156 |
< |
this.submitMask = size - 1; // room for max # of submit queues |
2157 |
< |
this.workQueues = new WorkQueue[size]; |
2158 |
< |
this.termination = (this.lock = new Mutex()).newCondition(); |
2159 |
< |
this.stealCount = new AtomicLong(); |
2160 |
< |
this.nextWorkerNumber = new AtomicInteger(); |
2301 |
> |
this.submitMask = ((n + 1) << 1) - 1; |
2302 |
|
int pn = poolNumberGenerator.incrementAndGet(); |
2303 |
|
StringBuilder sb = new StringBuilder("ForkJoinPool-"); |
2304 |
|
sb.append(Integer.toString(pn)); |
2305 |
|
sb.append("-worker-"); |
2306 |
|
this.workerNamePrefix = sb.toString(); |
2166 |
– |
lock.lock(); |
2307 |
|
this.runState = 1; // set init flag |
2308 |
< |
lock.unlock(); |
2308 |
> |
} |
2309 |
> |
|
2310 |
> |
/** |
2311 |
> |
* Constructor for common pool, suitable only for static initialization. |
2312 |
> |
* Basically the same as above, but uses smallest possible initial footprint. |
2313 |
> |
*/ |
2314 |
> |
ForkJoinPool(int parallelism, int submitMask, |
2315 |
> |
ForkJoinWorkerThreadFactory factory, |
2316 |
> |
Thread.UncaughtExceptionHandler handler) { |
2317 |
> |
this.factory = factory; |
2318 |
> |
this.ueh = handler; |
2319 |
> |
this.submitMask = submitMask; |
2320 |
> |
this.parallelism = parallelism; |
2321 |
> |
long np = (long)(-parallelism); |
2322 |
> |
this.ctl = ((np << AC_SHIFT) & AC_MASK) | ((np << TC_SHIFT) & TC_MASK); |
2323 |
> |
this.localMode = LIFO_QUEUE; |
2324 |
> |
this.workerNamePrefix = "ForkJoinPool.commonPool-worker-"; |
2325 |
> |
this.runState = 1; |
2326 |
> |
} |
2327 |
> |
|
2328 |
> |
/** |
2329 |
> |
* Returns the common pool instance. |
2330 |
> |
* |
2331 |
> |
* @return the common pool instance |
2332 |
> |
*/ |
2333 |
> |
public static ForkJoinPool commonPool() { |
2334 |
> |
ForkJoinPool p; |
2335 |
> |
if ((p = commonPool) == null) |
2336 |
> |
throw new Error("Common Pool Unavailable"); |
2337 |
> |
return p; |
2338 |
|
} |
2339 |
|
|
2340 |
|
// Execution methods |
2510 |
|
} |
2511 |
|
|
2512 |
|
/** |
2513 |
+ |
* Returns the targeted parallelism level of the common pool. |
2514 |
+ |
* |
2515 |
+ |
* @return the targeted parallelism level of the common pool |
2516 |
+ |
*/ |
2517 |
+ |
public static int getCommonPoolParallelism() { |
2518 |
+ |
return commonPoolParallelism; |
2519 |
+ |
} |
2520 |
+ |
|
2521 |
+ |
/** |
2522 |
|
* Returns the number of worker threads that have started but not |
2523 |
|
* yet terminated. The result returned by this method may differ |
2524 |
|
* from {@link #getParallelism} when threads are created to |
2599 |
|
* @return the number of steals |
2600 |
|
*/ |
2601 |
|
public long getStealCount() { |
2602 |
< |
long count = stealCount.get(); |
2602 |
> |
long count = stealCount; |
2603 |
|
WorkQueue[] ws; WorkQueue w; |
2604 |
|
if ((ws = workQueues) != null) { |
2605 |
|
for (int i = 1; i < ws.length; i += 2) { |
2729 |
|
public String toString() { |
2730 |
|
// Use a single pass through workQueues to collect counts |
2731 |
|
long qt = 0L, qs = 0L; int rc = 0; |
2732 |
< |
long st = stealCount.get(); |
2732 |
> |
long st = stealCount; |
2733 |
|
long c = ctl; |
2734 |
|
WorkQueue[] ws; WorkQueue w; |
2735 |
|
if ((ws = workQueues) != null) { |
2770 |
|
} |
2771 |
|
|
2772 |
|
/** |
2773 |
< |
* Initiates an orderly shutdown in which previously submitted |
2774 |
< |
* tasks are executed, but no new tasks will be accepted. |
2775 |
< |
* Invocation has no additional effect if already shut down. |
2776 |
< |
* Tasks that are in the process of being submitted concurrently |
2777 |
< |
* during the course of this method may or may not be rejected. |
2773 |
> |
* Possibly initiates an orderly shutdown in which previously |
2774 |
> |
* submitted tasks are executed, but no new tasks will be |
2775 |
> |
* accepted. Invocation has no effect on execution state if this |
2776 |
> |
* is the {@link #commonPool}, and no additional effect if |
2777 |
> |
* already shut down. Tasks that are in the process of being |
2778 |
> |
* submitted concurrently during the course of this method may or |
2779 |
> |
* may not be rejected. |
2780 |
|
* |
2781 |
|
* @throws SecurityException if a security manager exists and |
2782 |
|
* the caller is not permitted to modify threads |
2785 |
|
*/ |
2786 |
|
public void shutdown() { |
2787 |
|
checkPermission(); |
2788 |
< |
tryTerminate(false, true); |
2788 |
> |
if (this != commonPool) |
2789 |
> |
tryTerminate(false, true); |
2790 |
|
} |
2791 |
|
|
2792 |
|
/** |
2793 |
< |
* Attempts to cancel and/or stop all tasks, and reject all |
2794 |
< |
* subsequently submitted tasks. Tasks that are in the process of |
2795 |
< |
* being submitted or executed concurrently during the course of |
2796 |
< |
* this method may or may not be rejected. This method cancels |
2797 |
< |
* both existing and unexecuted tasks, in order to permit |
2798 |
< |
* termination in the presence of task dependencies. So the method |
2799 |
< |
* always returns an empty list (unlike the case for some other |
2800 |
< |
* Executors). |
2793 |
> |
* Possibly attempts to cancel and/or stop all tasks, and reject |
2794 |
> |
* all subsequently submitted tasks. Invocation has no effect on |
2795 |
> |
* execution state if this is the {@link #commonPool}, and no |
2796 |
> |
* additional effect if already shut down. Otherwise, tasks that |
2797 |
> |
* are in the process of being submitted or executed concurrently |
2798 |
> |
* during the course of this method may or may not be |
2799 |
> |
* rejected. This method cancels both existing and unexecuted |
2800 |
> |
* tasks, in order to permit termination in the presence of task |
2801 |
> |
* dependencies. So the method always returns an empty list |
2802 |
> |
* (unlike the case for some other Executors). |
2803 |
|
* |
2804 |
|
* @return an empty list |
2805 |
|
* @throws SecurityException if a security manager exists and |
2809 |
|
*/ |
2810 |
|
public List<Runnable> shutdownNow() { |
2811 |
|
checkPermission(); |
2812 |
< |
tryTerminate(true, true); |
2812 |
> |
if (this != commonPool) |
2813 |
> |
tryTerminate(true, true); |
2814 |
|
return Collections.emptyList(); |
2815 |
|
} |
2816 |
|
|
2867 |
|
public boolean awaitTermination(long timeout, TimeUnit unit) |
2868 |
|
throws InterruptedException { |
2869 |
|
long nanos = unit.toNanos(timeout); |
2870 |
< |
final Mutex lock = this.lock; |
2871 |
< |
lock.lock(); |
2872 |
< |
try { |
2873 |
< |
for (;;) { |
2874 |
< |
if (isTerminated()) |
2875 |
< |
return true; |
2876 |
< |
if (nanos <= 0) |
2877 |
< |
return false; |
2878 |
< |
nanos = termination.awaitNanos(nanos); |
2870 |
> |
if (isTerminated()) |
2871 |
> |
return true; |
2872 |
> |
long startTime = System.nanoTime(); |
2873 |
> |
boolean terminated = false; |
2874 |
> |
synchronized(this) { |
2875 |
> |
for (long waitTime = nanos, millis = 0L;;) { |
2876 |
> |
if (terminated = isTerminated() || |
2877 |
> |
waitTime <= 0L || |
2878 |
> |
(millis = unit.toMillis(waitTime)) <= 0L) |
2879 |
> |
break; |
2880 |
> |
wait(millis); |
2881 |
> |
waitTime = nanos - (System.nanoTime() - startTime); |
2882 |
|
} |
2696 |
– |
} finally { |
2697 |
– |
lock.unlock(); |
2883 |
|
} |
2884 |
+ |
return terminated; |
2885 |
|
} |
2886 |
|
|
2887 |
|
/** |
3013 |
|
private static final long PARKBLOCKER; |
3014 |
|
private static final int ABASE; |
3015 |
|
private static final int ASHIFT; |
3016 |
+ |
private static final long NEXTWORKERNUMBER; |
3017 |
+ |
private static final long STEALCOUNT; |
3018 |
+ |
private static final long MAINLOCK; |
3019 |
|
|
3020 |
|
static { |
3021 |
|
poolNumberGenerator = new AtomicInteger(); |
3031 |
|
Class<?> ak = ForkJoinTask[].class; |
3032 |
|
CTL = U.objectFieldOffset |
3033 |
|
(k.getDeclaredField("ctl")); |
3034 |
+ |
NEXTWORKERNUMBER = U.objectFieldOffset |
3035 |
+ |
(k.getDeclaredField("nextWorkerNumber")); |
3036 |
+ |
STEALCOUNT = U.objectFieldOffset |
3037 |
+ |
(k.getDeclaredField("stealCount")); |
3038 |
+ |
MAINLOCK = U.objectFieldOffset |
3039 |
+ |
(k.getDeclaredField("mainLock")); |
3040 |
|
Class<?> tk = Thread.class; |
3041 |
|
PARKBLOCKER = U.objectFieldOffset |
3042 |
|
(tk.getDeclaredField("parkBlocker")); |
3043 |
|
ABASE = U.arrayBaseOffset(ak); |
3044 |
|
s = U.arrayIndexScale(ak); |
3045 |
+ |
ASHIFT = 31 - Integer.numberOfLeadingZeros(s); |
3046 |
|
} catch (Exception e) { |
3047 |
|
throw new Error(e); |
3048 |
|
} |
3049 |
|
if ((s & (s-1)) != 0) |
3050 |
|
throw new Error("data type scale not a power of two"); |
3051 |
< |
ASHIFT = 31 - Integer.numberOfLeadingZeros(s); |
3051 |
> |
try { // Establish common pool |
3052 |
> |
String pp = System.getProperty(propPrefix + "parallelism"); |
3053 |
> |
String fp = System.getProperty(propPrefix + "threadFactory"); |
3054 |
> |
String up = System.getProperty(propPrefix + "exceptionHandler"); |
3055 |
> |
ForkJoinWorkerThreadFactory fac = (fp == null) ? |
3056 |
> |
defaultForkJoinWorkerThreadFactory : |
3057 |
> |
((ForkJoinWorkerThreadFactory)ClassLoader. |
3058 |
> |
getSystemClassLoader().loadClass(fp).newInstance()); |
3059 |
> |
Thread.UncaughtExceptionHandler ueh = (up == null)? null : |
3060 |
> |
((Thread.UncaughtExceptionHandler)ClassLoader. |
3061 |
> |
getSystemClassLoader().loadClass(up).newInstance()); |
3062 |
> |
int par; |
3063 |
> |
if ((pp == null || (par = Integer.parseInt(pp)) <= 0)) |
3064 |
> |
par = Runtime.getRuntime().availableProcessors(); |
3065 |
> |
if (par > MAX_CAP) |
3066 |
> |
par = MAX_CAP; |
3067 |
> |
commonPoolParallelism = par; |
3068 |
> |
int n = par - 1; // precompute submit mask |
3069 |
> |
n |= n >>> 1; n |= n >>> 2; n |= n >>> 4; |
3070 |
> |
n |= n >>> 8; n |= n >>> 16; |
3071 |
> |
int mask = ((n + 1) << 1) - 1; |
3072 |
> |
commonPool = new ForkJoinPool(par, mask, fac, ueh); |
3073 |
> |
} catch (Exception e) { |
3074 |
> |
throw new Error(e); |
3075 |
> |
} |
3076 |
|
} |
3077 |
|
|
3078 |
|
/** |