27 |
|
* (eventually blocking if none exist). This makes them efficient when |
28 |
|
* most tasks spawn other subtasks (as do most ForkJoinTasks), as well |
29 |
|
* as the mixed execution of some plain Runnable- or Callable- based |
30 |
< |
* activities along with ForkJoinTasks. Otherwise, other |
30 |
> |
* activities along with ForkJoinTasks. When setting |
31 |
> |
* <tt>setAsyncMode</tt>, a ForkJoinPools may also be appropriate for |
32 |
> |
* use with fine-grained tasks that are never joined. Otherwise, other |
33 |
|
* ExecutorService implementations are typically more appropriate |
34 |
|
* choices. |
35 |
|
* |
41 |
|
* nested <code>ManagedBlocker</code> interface enables extension of |
42 |
|
* the kinds of synchronization accommodated. The target parallelism |
43 |
|
* level may also be changed dynamically (<code>setParallelism</code>) |
44 |
< |
* and dynamically thread construction can be limited using methods |
44 |
> |
* and thread construction can be limited using methods |
45 |
|
* <code>setMaximumPoolSize</code> and/or |
46 |
|
* <code>setMaintainsParallelism</code>. |
47 |
|
* |
133 |
|
new AtomicInteger(); |
134 |
|
|
135 |
|
/** |
136 |
< |
* Array holding all worker threads in the pool. Array size must |
137 |
< |
* be a power of two. Updates and replacements are protected by |
138 |
< |
* workerLock, but it is always kept in a consistent enough state |
139 |
< |
* to be randomly accessed without locking by workers performing |
140 |
< |
* work-stealing. |
136 |
> |
* Array holding all worker threads in the pool. Initialized upon |
137 |
> |
* first use. Array size must be a power of two. Updates and |
138 |
> |
* replacements are protected by workerLock, but it is always kept |
139 |
> |
* in a consistent enough state to be randomly accessed without |
140 |
> |
* locking by workers performing work-stealing. |
141 |
|
*/ |
142 |
|
volatile ForkJoinWorkerThread[] workers; |
143 |
|
|
206 |
|
private volatile int parallelism; |
207 |
|
|
208 |
|
/** |
209 |
+ |
* True if use local fifo, not default lifo, for local polling |
210 |
+ |
*/ |
211 |
+ |
private volatile boolean locallyFifo; |
212 |
+ |
|
213 |
+ |
/** |
214 |
|
* Holds number of total (i.e., created and not yet terminated) |
215 |
|
* and running (i.e., not blocked on joins or other managed sync) |
216 |
|
* threads, packed into one int to ensure consistent snapshot when |
398 |
|
this.termination = workerLock.newCondition(); |
399 |
|
this.stealCount = new AtomicLong(); |
400 |
|
this.submissionQueue = new LinkedTransferQueue<ForkJoinTask<?>>(); |
401 |
< |
createAndStartInitialWorkers(parallelism); |
401 |
> |
// worker array and workers are lazily constructed |
402 |
|
} |
403 |
|
|
404 |
|
/** |
412 |
|
if (w != null) { |
413 |
|
w.poolIndex = index; |
414 |
|
w.setDaemon(true); |
415 |
+ |
w.setAsyncMode(locallyFifo); |
416 |
|
w.setName("ForkJoinPool-" + poolNumber + "-worker-" + index); |
417 |
|
if (h != null) |
418 |
|
w.setUncaughtExceptionHandler(h); |
429 |
|
} |
430 |
|
|
431 |
|
/** |
432 |
< |
* Create or resize array if necessary to hold newLength |
432 |
> |
* Create or resize array if necessary to hold newLength. |
433 |
> |
* Call only under exlusion or lock |
434 |
|
* @return the array |
435 |
|
*/ |
436 |
|
private ForkJoinWorkerThread[] ensureWorkerArrayCapacity(int newLength) { |
448 |
|
*/ |
449 |
|
private void tryShrinkWorkerArray() { |
450 |
|
ForkJoinWorkerThread[] ws = workers; |
451 |
< |
int len = ws.length; |
452 |
< |
int last = len - 1; |
453 |
< |
while (last >= 0 && ws[last] == null) |
454 |
< |
--last; |
455 |
< |
int newLength = arraySizeFor(last+1); |
456 |
< |
if (newLength < len) |
457 |
< |
workers = Arrays.copyOf(ws, newLength); |
451 |
> |
if (ws != null) { |
452 |
> |
int len = ws.length; |
453 |
> |
int last = len - 1; |
454 |
> |
while (last >= 0 && ws[last] == null) |
455 |
> |
--last; |
456 |
> |
int newLength = arraySizeFor(last+1); |
457 |
> |
if (newLength < len) |
458 |
> |
workers = Arrays.copyOf(ws, newLength); |
459 |
> |
} |
460 |
|
} |
461 |
|
|
462 |
|
/** |
463 |
< |
* Initial worker array and worker creation and startup. (This |
453 |
< |
* must be done under lock to avoid interference by some of the |
454 |
< |
* newly started threads while creating others.) |
463 |
> |
* Initialize workers if necessary |
464 |
|
*/ |
465 |
< |
private void createAndStartInitialWorkers(int ps) { |
466 |
< |
final ReentrantLock lock = this.workerLock; |
467 |
< |
lock.lock(); |
468 |
< |
try { |
469 |
< |
ForkJoinWorkerThread[] ws = ensureWorkerArrayCapacity(ps); |
470 |
< |
for (int i = 0; i < ps; ++i) { |
471 |
< |
ForkJoinWorkerThread w = createWorker(i); |
472 |
< |
if (w != null) { |
473 |
< |
ws[i] = w; |
474 |
< |
w.start(); |
475 |
< |
updateWorkerCount(1); |
465 |
> |
final void ensureWorkerInitialization() { |
466 |
> |
ForkJoinWorkerThread[] ws = workers; |
467 |
> |
if (ws == null) { |
468 |
> |
final ReentrantLock lock = this.workerLock; |
469 |
> |
lock.lock(); |
470 |
> |
try { |
471 |
> |
ws = workers; |
472 |
> |
if (ws == null) { |
473 |
> |
int ps = parallelism; |
474 |
> |
ws = ensureWorkerArrayCapacity(ps); |
475 |
> |
for (int i = 0; i < ps; ++i) { |
476 |
> |
ForkJoinWorkerThread w = createWorker(i); |
477 |
> |
if (w != null) { |
478 |
> |
ws[i] = w; |
479 |
> |
w.start(); |
480 |
> |
updateWorkerCount(1); |
481 |
> |
} |
482 |
> |
} |
483 |
|
} |
484 |
+ |
} finally { |
485 |
+ |
lock.unlock(); |
486 |
|
} |
469 |
– |
} finally { |
470 |
– |
lock.unlock(); |
487 |
|
} |
488 |
|
} |
489 |
|
|
529 |
|
private <T> void doSubmit(ForkJoinTask<T> task) { |
530 |
|
if (isShutdown()) |
531 |
|
throw new RejectedExecutionException(); |
532 |
+ |
if (workers == null) |
533 |
+ |
ensureWorkerInitialization(); |
534 |
|
submissionQueue.offer(task); |
535 |
|
signalIdleWorkers(); |
536 |
|
} |
701 |
|
old = ueh; |
702 |
|
ueh = h; |
703 |
|
ForkJoinWorkerThread[] ws = workers; |
704 |
< |
for (int i = 0; i < ws.length; ++i) { |
705 |
< |
ForkJoinWorkerThread w = ws[i]; |
706 |
< |
if (w != null) |
707 |
< |
w.setUncaughtExceptionHandler(h); |
704 |
> |
if (ws != null) { |
705 |
> |
for (int i = 0; i < ws.length; ++i) { |
706 |
> |
ForkJoinWorkerThread w = ws[i]; |
707 |
> |
if (w != null) |
708 |
> |
w.setUncaughtExceptionHandler(h); |
709 |
> |
} |
710 |
|
} |
711 |
|
} finally { |
712 |
|
lock.unlock(); |
813 |
|
} |
814 |
|
|
815 |
|
/** |
816 |
+ |
* Establishes local first-in-first-out scheduling mode for forked |
817 |
+ |
* tasks that are never joined. This mode may be more appropriate |
818 |
+ |
* than default locally stack-based mode in applications in which |
819 |
+ |
* worker threads only process asynchronous tasks. This method is |
820 |
+ |
* designed to be invoked only when pool is quiescent, and |
821 |
+ |
* typically only before any tasks are submitted. The effects of |
822 |
+ |
* invocations at ather times may be unpredictable. |
823 |
+ |
* |
824 |
+ |
* @param async if true, use locally FIFO scheduling |
825 |
+ |
* @return the previous mode. |
826 |
+ |
*/ |
827 |
+ |
public boolean setAsyncMode(boolean async) { |
828 |
+ |
boolean oldMode = locallyFifo; |
829 |
+ |
locallyFifo = async; |
830 |
+ |
ForkJoinWorkerThread[] ws = workers; |
831 |
+ |
if (ws != null) { |
832 |
+ |
for (int i = 0; i < ws.length; ++i) { |
833 |
+ |
ForkJoinWorkerThread t = ws[i]; |
834 |
+ |
if (t != null) |
835 |
+ |
t.setAsyncMode(async); |
836 |
+ |
} |
837 |
+ |
} |
838 |
+ |
return oldMode; |
839 |
+ |
} |
840 |
+ |
|
841 |
+ |
/** |
842 |
+ |
* Returns true if this pool uses local first-in-first-out |
843 |
+ |
* scheduling mode for forked tasks that are never joined. |
844 |
+ |
* |
845 |
+ |
* @return true if this pool uses async mode. |
846 |
+ |
*/ |
847 |
+ |
public boolean getAsyncMode() { |
848 |
+ |
return locallyFifo; |
849 |
+ |
} |
850 |
+ |
|
851 |
+ |
/** |
852 |
|
* Returns an estimate of the number of worker threads that are |
853 |
|
* not blocked waiting to join tasks or for other managed |
854 |
|
* synchronization. |
930 |
|
public long getQueuedTaskCount() { |
931 |
|
long count = 0; |
932 |
|
ForkJoinWorkerThread[] ws = workers; |
933 |
< |
for (int i = 0; i < ws.length; ++i) { |
934 |
< |
ForkJoinWorkerThread t = ws[i]; |
935 |
< |
if (t != null) |
936 |
< |
count += t.getQueueSize(); |
933 |
> |
if (ws != null) { |
934 |
> |
for (int i = 0; i < ws.length; ++i) { |
935 |
> |
ForkJoinWorkerThread t = ws[i]; |
936 |
> |
if (t != null) |
937 |
> |
count += t.getQueueSize(); |
938 |
> |
} |
939 |
|
} |
940 |
|
return count; |
941 |
|
} |
970 |
|
} |
971 |
|
|
972 |
|
/** |
973 |
+ |
* Removes all available unexecuted submitted and forked tasks |
974 |
+ |
* from scheduling queues and adds them to the given collection, |
975 |
+ |
* without altering their execution status. These may include |
976 |
+ |
* artifically generated or wrapped tasks. This method id designed |
977 |
+ |
* to be invoked only when the pool is known to be |
978 |
+ |
* quiescent. Invocations at other times may not remove all |
979 |
+ |
* tasks. A failure encountered while attempting to add elements |
980 |
+ |
* to collection <tt>c</tt> may result in elements being in |
981 |
+ |
* neither, either or both collections when the associated |
982 |
+ |
* exception is thrown. The behavior of this operation is |
983 |
+ |
* undefined if the specified collection is modified while the |
984 |
+ |
* operation is in progress. |
985 |
+ |
* @param c the collection to transfer elements into |
986 |
+ |
* @return the number of elements transferred |
987 |
+ |
*/ |
988 |
+ |
protected int drainTasksTo(Collection<ForkJoinTask<?>> c) { |
989 |
+ |
int n = submissionQueue.drainTo(c); |
990 |
+ |
ForkJoinWorkerThread[] ws = workers; |
991 |
+ |
if (ws != null) { |
992 |
+ |
for (int i = 0; i < ws.length; ++i) { |
993 |
+ |
ForkJoinWorkerThread w = ws[i]; |
994 |
+ |
if (w != null) |
995 |
+ |
n += w.drainTasksTo(c); |
996 |
+ |
} |
997 |
+ |
} |
998 |
+ |
return n; |
999 |
+ |
} |
1000 |
+ |
|
1001 |
+ |
/** |
1002 |
|
* Returns a string identifying this pool, as well as its state, |
1003 |
|
* including indications of run state, parallelism level, and |
1004 |
|
* worker and task counts. |
1059 |
|
* waiting tasks. Tasks that are in the process of being |
1060 |
|
* submitted or executed concurrently during the course of this |
1061 |
|
* method may or may not be rejected. Unlike some other executors, |
1062 |
< |
* this method cancels rather than collects non-executed tasks, |
1063 |
< |
* so always returns an empty list. |
1062 |
> |
* this method cancels rather than collects non-executed tasks |
1063 |
> |
* upon termination, so always returns an empty list. However, you |
1064 |
> |
* can use method <code>drainTasksTo</code> before invoking this |
1065 |
> |
* method to transfer unexecuted tasks to another collection. |
1066 |
|
* @return an empty list |
1067 |
|
* @throws SecurityException if a security manager exists and |
1068 |
|
* the caller is not permitted to modify threads |
1147 |
|
lock.lock(); |
1148 |
|
try { |
1149 |
|
ForkJoinWorkerThread[] ws = workers; |
1150 |
< |
int idx = w.poolIndex; |
1151 |
< |
if (idx >= 0 && idx < ws.length && ws[idx] == w) |
1152 |
< |
ws[idx] = null; |
1153 |
< |
if (totalCountOf(workerCounts) == 0) { |
1154 |
< |
terminate(); // no-op if already terminating |
1155 |
< |
transitionRunStateTo(TERMINATED); |
1156 |
< |
termination.signalAll(); |
1157 |
< |
} |
1158 |
< |
else if (!isTerminating()) { |
1159 |
< |
tryShrinkWorkerArray(); |
1160 |
< |
tryResumeSpare(true); // allow replacement |
1150 |
> |
if (ws != null) { |
1151 |
> |
int idx = w.poolIndex; |
1152 |
> |
if (idx >= 0 && idx < ws.length && ws[idx] == w) |
1153 |
> |
ws[idx] = null; |
1154 |
> |
if (totalCountOf(workerCounts) == 0) { |
1155 |
> |
terminate(); // no-op if already terminating |
1156 |
> |
transitionRunStateTo(TERMINATED); |
1157 |
> |
termination.signalAll(); |
1158 |
> |
} |
1159 |
> |
else if (!isTerminating()) { |
1160 |
> |
tryShrinkWorkerArray(); |
1161 |
> |
tryResumeSpare(true); // allow replacement |
1162 |
> |
} |
1163 |
|
} |
1164 |
|
} finally { |
1165 |
|
lock.unlock(); |
1207 |
|
lock.lock(); |
1208 |
|
try { |
1209 |
|
ForkJoinWorkerThread[] ws = workers; |
1210 |
< |
for (int i = 0; i < ws.length; ++i) { |
1211 |
< |
ForkJoinWorkerThread t = ws[i]; |
1212 |
< |
if (t != null) |
1213 |
< |
t.cancelTasks(); |
1210 |
> |
if (ws != null) { |
1211 |
> |
for (int i = 0; i < ws.length; ++i) { |
1212 |
> |
ForkJoinWorkerThread t = ws[i]; |
1213 |
> |
if (t != null) |
1214 |
> |
t.cancelTasks(); |
1215 |
> |
} |
1216 |
|
} |
1217 |
|
} finally { |
1218 |
|
lock.unlock(); |
1228 |
|
lock.lock(); |
1229 |
|
try { |
1230 |
|
ForkJoinWorkerThread[] ws = workers; |
1231 |
< |
for (int i = 0; i < ws.length; ++i) { |
1232 |
< |
ForkJoinWorkerThread t = ws[i]; |
1233 |
< |
if (t != null) |
1234 |
< |
t.shutdownNow(); |
1231 |
> |
if (ws != null) { |
1232 |
> |
for (int i = 0; i < ws.length; ++i) { |
1233 |
> |
ForkJoinWorkerThread t = ws[i]; |
1234 |
> |
if (t != null) |
1235 |
> |
t.shutdownNow(); |
1236 |
> |
} |
1237 |
|
} |
1238 |
|
} finally { |
1239 |
|
lock.unlock(); |
1250 |
|
lock.lock(); |
1251 |
|
try { |
1252 |
|
ForkJoinWorkerThread[] ws = workers; |
1253 |
< |
for (int i = 0; i < ws.length; ++i) { |
1254 |
< |
ForkJoinWorkerThread t = ws[i]; |
1255 |
< |
if (t != null && !t.isTerminated()) { |
1256 |
< |
try { |
1257 |
< |
t.interrupt(); |
1258 |
< |
} catch (SecurityException ignore) { |
1253 |
> |
if (ws != null) { |
1254 |
> |
for (int i = 0; i < ws.length; ++i) { |
1255 |
> |
ForkJoinWorkerThread t = ws[i]; |
1256 |
> |
if (t != null && !t.isTerminated()) { |
1257 |
> |
try { |
1258 |
> |
t.interrupt(); |
1259 |
> |
} catch (SecurityException ignore) { |
1260 |
> |
} |
1261 |
|
} |
1262 |
|
} |
1263 |
|
} |