13 |
|
import java.util.Collection; |
14 |
|
import java.util.Collections; |
15 |
|
import java.util.List; |
16 |
– |
import java.util.concurrent.locks.Condition; |
16 |
|
import java.util.concurrent.locks.LockSupport; |
17 |
|
import java.util.concurrent.locks.ReentrantLock; |
18 |
|
import java.util.concurrent.atomic.AtomicInteger; |
19 |
< |
import java.util.concurrent.atomic.AtomicLong; |
19 |
> |
import java.util.concurrent.CountDownLatch; |
20 |
|
|
21 |
|
/** |
22 |
|
* An {@link ExecutorService} for running {@link ForkJoinTask}s. |
52 |
|
* ({@link #setParallelism}). The total number of threads may be |
53 |
|
* limited using method {@link #setMaximumPoolSize}, in which case it |
54 |
|
* may become possible for the activities of a pool to stall due to |
55 |
< |
* the lack of available threads to process new tasks. |
55 |
> |
* the lack of available threads to process new tasks. When the pool |
56 |
> |
* is executing tasks, these and other configuration setting methods |
57 |
> |
* may only gradually affect actual pool sizes. It is normally best |
58 |
> |
* practice to invoke these methods only when the pool is known to be |
59 |
> |
* quiescent. |
60 |
|
* |
61 |
|
* <p>In addition to execution and lifecycle control methods, this |
62 |
|
* class provides status check methods (for example |
97 |
|
public class ForkJoinPool extends AbstractExecutorService { |
98 |
|
|
99 |
|
/* |
100 |
< |
* See the extended comments interspersed below for design, |
101 |
< |
* rationale, and walkthroughs. |
100 |
> |
* Implementation Overview |
101 |
> |
* |
102 |
> |
* This class provides the central bookkeeping and control for a |
103 |
> |
* set of worker threads: Submissions from non-FJ threads enter |
104 |
> |
* into a submission queue. Workers take these tasks and typically |
105 |
> |
* split them into subtasks that may be stolen by other workers. |
106 |
> |
* The main work-stealing mechanics implemented in class |
107 |
> |
* ForkJoinWorkerThread give first priority to processing tasks |
108 |
> |
* from their own queues (LIFO or FIFO, depending on mode), then |
109 |
> |
* to randomized FIFO steals of tasks in other worker queues, and |
110 |
> |
* lastly to new submissions. These mechanics do not consider |
111 |
> |
* affinities, loads, cache localities, etc, so rarely provide the |
112 |
> |
* best possible performance on a given machine, but portably |
113 |
> |
* provide good throughput by averaging over these factors. |
114 |
> |
* (Further, even if we did try to use such information, we do not |
115 |
> |
* usually have a basis for exploiting it. For example, some sets |
116 |
> |
* of tasks profit from cache affinities, but others are harmed by |
117 |
> |
* cache pollution effects.) |
118 |
> |
* |
119 |
> |
* The main throughput advantages of work-stealing stem from |
120 |
> |
* decentralized control -- workers mostly steal tasks from each |
121 |
> |
* other. We do not want to negate this by creating bottlenecks |
122 |
> |
* implementing the management responsibilities of this class. So |
123 |
> |
* we use a collection of techniques that avoid, reduce, or cope |
124 |
> |
* well with contention. These entail several instances of |
125 |
> |
* bit-packing into CASable fields to maintain only the minimally |
126 |
> |
* required atomicity. To enable such packing, we restrict maximum |
127 |
> |
* parallelism to (1<<15)-1 (enabling twice this to fit into a 16 |
128 |
> |
* bit field), which is far in excess of normal operating range. |
129 |
> |
* Even though updates to some of these bookkeeping fields do |
130 |
> |
* sometimes contend with each other, they don't normally |
131 |
> |
* cache-contend with updates to others enough to warrant memory |
132 |
> |
* padding or isolation. So they are all held as fields of |
133 |
> |
* ForkJoinPool objects. The main capabilities are as follows: |
134 |
> |
* |
135 |
> |
* 1. Creating and removing workers. Workers are recorded in the |
136 |
> |
* "workers" array. This is an array as opposed to some other data |
137 |
> |
* structure to support index-based random steals by workers. |
138 |
> |
* Updates to the array recording new workers and unrecording |
139 |
> |
* terminated ones are protected from each other by a lock |
140 |
> |
* (workerLock) but the array is otherwise concurrently readable, |
141 |
> |
* and accessed directly by workers. To simplify index-based |
142 |
> |
* operations, the array size is always a power of two, and all |
143 |
> |
* readers must tolerate null slots. Currently, all but the first |
144 |
> |
* worker thread creation is on-demand, triggered by task |
145 |
> |
* submissions, replacement of terminated workers, and/or |
146 |
> |
* compensation for blocked workers. However, all other support |
147 |
> |
* code is set up to work with other policies. |
148 |
> |
* |
149 |
> |
* 2. Bookkeeping for dynamically adding and removing workers. We |
150 |
> |
* maintain a given level of parallelism (or, if |
151 |
> |
* maintainsParallelism is false, at least avoid starvation). When |
152 |
> |
* some workers are known to be blocked (on joins or via |
153 |
> |
* ManagedBlocker), we may create or resume others to take their |
154 |
> |
* place until they unblock (see below). Implementing this |
155 |
> |
* requires counts of the number of "running" threads (i.e., those |
156 |
> |
* that are neither blocked nor artifically suspended) as well as |
157 |
> |
* the total number. These two values are packed into one field, |
158 |
> |
* "workerCounts" because we need accurate snapshots when deciding |
159 |
> |
* to create, resume or suspend. To support these decisions, |
160 |
> |
* updates must be prospective (not retrospective). For example, |
161 |
> |
* the running count is decremented before blocking by a thread |
162 |
> |
* about to block, but incremented by the thread about to unblock |
163 |
> |
* it. (In a few cases, these prospective updates may need to be |
164 |
> |
* rolled back, for example when deciding to create a new worker |
165 |
> |
* but the thread factory fails or returns null. In these cases, |
166 |
> |
* we are no worse off wrt other decisions than we would be |
167 |
> |
* otherwise.) Updates to the workerCounts field sometimes |
168 |
> |
* transiently encounter a fair amount of contention when join |
169 |
> |
* dependencies are such that many threads block or unblock at |
170 |
> |
* about the same time. We alleviate this by sometimes bundling |
171 |
> |
* updates (for example blocking one thread on join and resuming a |
172 |
> |
* spare cancel each other out), and in most other cases |
173 |
> |
* performing an alternative action (like releasing waiters and |
174 |
> |
* finding spares; see below) as a more productive form of |
175 |
> |
* backoff. |
176 |
> |
* |
177 |
> |
* 3. Maintaining global run state. The run state of the pool |
178 |
> |
* consists of a runLevel (SHUTDOWN, TERMINATING, etc) similar to |
179 |
> |
* those in other Executor implementations, as well as a count of |
180 |
> |
* "active" workers -- those that are, or soon will be, or |
181 |
> |
* recently were executing tasks. The runLevel and active count |
182 |
> |
* are packed together in order to correctly trigger shutdown and |
183 |
> |
* termination. Without care, active counts can be subject to very |
184 |
> |
* high contention. We substantially reduce this contention by |
185 |
> |
* relaxing update rules. A worker must claim active status |
186 |
> |
* prospectively, by activating if it sees that a submitted or |
187 |
> |
* stealable task exists (it may find after activating that the |
188 |
> |
* task no longer exists). It stays active while processing this |
189 |
> |
* task (if it exists) and any other local subtasks it produces, |
190 |
> |
* until it cannot find any other tasks. It then tries |
191 |
> |
* inactivating (see method preStep), but upon update contention |
192 |
> |
* instead scans for more tasks, later retrying inactivation if it |
193 |
> |
* doesn't find any. |
194 |
> |
* |
195 |
> |
* 4. Managing idle workers waiting for tasks. We cannot let |
196 |
> |
* workers spin indefinitely scanning for tasks when none are |
197 |
> |
* available. On the other hand, we must quickly prod them into |
198 |
> |
* action when new tasks are submitted or generated. We |
199 |
> |
* park/unpark these idle workers using an event-count scheme. |
200 |
> |
* Field eventCount is incremented upon events that may enable |
201 |
> |
* workers that previously could not find a task to now find one: |
202 |
> |
* Submission of a new task to the pool, or another worker pushing |
203 |
> |
* a task onto a previously empty queue. (We also use this |
204 |
> |
* mechanism for termination and reconfiguration actions that |
205 |
> |
* require wakeups of idle workers). Each worker maintains its |
206 |
> |
* last known event count, and blocks when a scan for work did not |
207 |
> |
* find a task AND its lastEventCount matches the current |
208 |
> |
* eventCount. Waiting idle workers are recorded in a variant of |
209 |
> |
* Treiber stack headed by field eventWaiters which, when nonzero, |
210 |
> |
* encodes the thread index and count awaited for by the worker |
211 |
> |
* thread most recently calling eventSync. This thread in turn has |
212 |
> |
* a record (field nextEventWaiter) for the next waiting worker. |
213 |
> |
* In addition to allowing simpler decisions about need for |
214 |
> |
* wakeup, the event count bits in eventWaiters serve the role of |
215 |
> |
* tags to avoid ABA errors in Treiber stacks. To reduce delays |
216 |
> |
* in task diffusion, workers not otherwise occupied may invoke |
217 |
> |
* method releaseWaiters, that removes and signals (unparks) |
218 |
> |
* workers not waiting on current count. To minimize task |
219 |
> |
* production stalls associate with signalling, any worker pushing |
220 |
> |
* a task on an empty queue invokes the weaker method signalWork, |
221 |
> |
* that only releases idle workers until it detects interference |
222 |
> |
* by other threads trying to release, and lets them take |
223 |
> |
* over. The net effect is a tree-like diffusion of signals, where |
224 |
> |
* released threads and possibly others) help with unparks. To |
225 |
> |
* further reduce contention effects a bit, failed CASes to |
226 |
> |
* increment field eventCount are tolerated without retries. |
227 |
> |
* Conceptually they are merged into the same event, which is OK |
228 |
> |
* when their only purpose is to enable workers to scan for work. |
229 |
> |
* |
230 |
> |
* 5. Managing suspension of extra workers. When a worker is about |
231 |
> |
* to block waiting for a join (or via ManagedBlockers), we may |
232 |
> |
* create a new thread to maintain parallelism level, or at least |
233 |
> |
* avoid starvation (see below). Usually, extra threads are needed |
234 |
> |
* for only very short periods, yet join dependencies are such |
235 |
> |
* that we sometimes need them in bursts. Rather than create new |
236 |
> |
* threads each time this happens, we suspend no-longer-needed |
237 |
> |
* extra ones as "spares". For most purposes, we don't distinguish |
238 |
> |
* "extra" spare threads from normal "core" threads: On each call |
239 |
> |
* to preStep (the only point at which we can do this) a worker |
240 |
> |
* checks to see if there are now too many running workers, and if |
241 |
> |
* so, suspends itself. Methods preJoin and doBlock look for |
242 |
> |
* suspended threads to resume before considering creating a new |
243 |
> |
* replacement. We don't need a special data structure to maintain |
244 |
> |
* spares; simply scanning the workers array looking for |
245 |
> |
* worker.isSuspended() is fine because the calling thread is |
246 |
> |
* otherwise not doing anything useful anyway; we are at least as |
247 |
> |
* happy if after locating a spare, the caller doesn't actually |
248 |
> |
* block because the join is ready before we try to adjust and |
249 |
> |
* compensate. Note that this is intrinsically racy. One thread |
250 |
> |
* may become a spare at about the same time as another is |
251 |
> |
* needlessly being created. We counteract this and related slop |
252 |
> |
* in part by requiring resumed spares to immediately recheck (in |
253 |
> |
* preStep) to see whether they they should re-suspend. The only |
254 |
> |
* effective difference between "extra" and "core" threads is that |
255 |
> |
* we allow the "extra" ones to time out and die if they are not |
256 |
> |
* resumed within a keep-alive interval of a few seconds. This is |
257 |
> |
* implemented mainly within ForkJoinWorkerThread, but requires |
258 |
> |
* some coordination (isTrimmed() -- meaning killed while |
259 |
> |
* suspended) to correctly maintain pool counts. |
260 |
> |
* |
261 |
> |
* 6. Deciding when to create new workers. The main dynamic |
262 |
> |
* control in this class is deciding when to create extra threads, |
263 |
> |
* in methods preJoin and doBlock. We always need to create one |
264 |
> |
* when the number of running threads becomes zero. But because |
265 |
> |
* blocked joins are typically dependent, we don't necessarily |
266 |
> |
* need or want one-to-one replacement. Using a one-to-one |
267 |
> |
* compensation rule often leads to enough useless overhead |
268 |
> |
* creating, suspending, resuming, and/or killing threads to |
269 |
> |
* signficantly degrade throughput. We use a rule reflecting the |
270 |
> |
* idea that, the more spare threads you already have, the more |
271 |
> |
* evidence you need to create another one; where "evidence" is |
272 |
> |
* expressed as the current deficit -- target minus running |
273 |
> |
* threads. To reduce flickering and drift around target values, |
274 |
> |
* the relation is quadratic: adding a spare if (dc*dc)>=(sc*pc) |
275 |
> |
* (where dc is deficit, sc is number of spare threads and pc is |
276 |
> |
* target parallelism.) This effectively reduces churn at the |
277 |
> |
* price of systematically undershooting target parallelism when |
278 |
> |
* many threads are blocked. However, biasing toward undeshooting |
279 |
> |
* partially compensates for the above mechanics to suspend extra |
280 |
> |
* threads, that normally lead to overshoot because we can only |
281 |
> |
* suspend workers in-between top-level actions. It also better |
282 |
> |
* copes with the fact that some of the methods in this class tend |
283 |
> |
* to never become compiled (but are interpreted), so some |
284 |
> |
* components of the entire set of controls might execute many |
285 |
> |
* times faster than others. And similarly for cases where the |
286 |
> |
* apparent lack of work is just due to GC stalls and other |
287 |
> |
* transient system activity. |
288 |
> |
* |
289 |
> |
* 7. Maintaining other configuration parameters and monitoring |
290 |
> |
* statistics. Updates to fields controlling parallelism level, |
291 |
> |
* max size, etc can only meaningfully take effect for individual |
292 |
> |
* threads upon their next top-level actions; i.e., between |
293 |
> |
* stealing/running tasks/submission, which are separated by calls |
294 |
> |
* to preStep. Memory ordering for these (assumed infrequent) |
295 |
> |
* reconfiguration calls is ensured by using reads and writes to |
296 |
> |
* volatile field workerCounts (that must be read in preStep anyway) |
297 |
> |
* as "fences" -- user-level reads are preceded by reads of |
298 |
> |
* workCounts, and writes are followed by no-op CAS to |
299 |
> |
* workerCounts. The values reported by other management and |
300 |
> |
* monitoring methods are either computed on demand, or are kept |
301 |
> |
* in fields that are only updated when threads are otherwise |
302 |
> |
* idle. |
303 |
> |
* |
304 |
> |
* Beware that there is a lot of representation-level coupling |
305 |
> |
* among classes ForkJoinPool, ForkJoinWorkerThread, and |
306 |
> |
* ForkJoinTask. For example, direct access to "workers" array by |
307 |
> |
* workers, and direct access to ForkJoinTask.status by both |
308 |
> |
* ForkJoinPool and ForkJoinWorkerThread. There is little point |
309 |
> |
* trying to reduce this, since any associated future changes in |
310 |
> |
* representations will need to be accompanied by algorithmic |
311 |
> |
* changes anyway. |
312 |
> |
* |
313 |
> |
* Style notes: There are lots of inline assignments (of form |
314 |
> |
* "while ((local = field) != 0)") which are usually the simplest |
315 |
> |
* way to ensure read orderings. Also several occurrences of the |
316 |
> |
* unusual "do {} while(!cas...)" which is the simplest way to |
317 |
> |
* force an update of a CAS'ed variable. There are also a few |
318 |
> |
* other coding oddities that help some methods perform reasonably |
319 |
> |
* even when interpreted (not compiled). |
320 |
> |
* |
321 |
> |
* The order of declarations in this file is: (1) statics (2) |
322 |
> |
* fields (along with constants used when unpacking some of them) |
323 |
> |
* (3) internal control methods (4) callbacks and other support |
324 |
> |
* for ForkJoinTask and ForkJoinWorkerThread classes, (5) exported |
325 |
> |
* methods (plus a few little helpers). |
326 |
|
*/ |
327 |
|
|
101 |
– |
/** Mask for packing and unpacking shorts */ |
102 |
– |
private static final int shortMask = 0xffff; |
103 |
– |
|
104 |
– |
/** Max pool size -- must be a power of two minus 1 */ |
105 |
– |
private static final int MAX_THREADS = 0x7FFF; |
106 |
– |
|
328 |
|
/** |
329 |
|
* Factory for creating new {@link ForkJoinWorkerThread}s. |
330 |
|
* A {@code ForkJoinWorkerThreadFactory} must be defined and used |
348 |
|
static class DefaultForkJoinWorkerThreadFactory |
349 |
|
implements ForkJoinWorkerThreadFactory { |
350 |
|
public ForkJoinWorkerThread newThread(ForkJoinPool pool) { |
351 |
< |
try { |
131 |
< |
return new ForkJoinWorkerThread(pool); |
132 |
< |
} catch (OutOfMemoryError oom) { |
133 |
< |
return null; |
134 |
< |
} |
351 |
> |
return new ForkJoinWorkerThread(pool); |
352 |
|
} |
353 |
|
} |
354 |
|
|
384 |
|
new AtomicInteger(); |
385 |
|
|
386 |
|
/** |
387 |
< |
* Array holding all worker threads in the pool. Initialized upon |
388 |
< |
* first use. Array size must be a power of two. Updates and |
389 |
< |
* replacements are protected by workerLock, but it is always kept |
390 |
< |
* in a consistent enough state to be randomly accessed without |
391 |
< |
* locking by workers performing work-stealing. |
387 |
> |
* Absolute bound for parallelism level. Twice this number must |
388 |
> |
* fit into a 16bit field to enable word-packing for some counts. |
389 |
> |
*/ |
390 |
> |
private static final int MAX_THREADS = 0x7fff; |
391 |
> |
|
392 |
> |
/** |
393 |
> |
* Array holding all worker threads in the pool. Array size must |
394 |
> |
* be a power of two. Updates and replacements are protected by |
395 |
> |
* workerLock, but the array is always kept in a consistent enough |
396 |
> |
* state to be randomly accessed without locking by workers |
397 |
> |
* performing work-stealing, as well as other traversal-based |
398 |
> |
* methods in this class. All readers must tolerate that some |
399 |
> |
* array slots may be null. |
400 |
|
*/ |
401 |
|
volatile ForkJoinWorkerThread[] workers; |
402 |
|
|
403 |
|
/** |
404 |
< |
* Lock protecting access to workers. |
404 |
> |
* Queue for external submissions. |
405 |
|
*/ |
406 |
< |
private final ReentrantLock workerLock; |
406 |
> |
private final LinkedTransferQueue<ForkJoinTask<?>> submissionQueue; |
407 |
|
|
408 |
|
/** |
409 |
< |
* Condition for awaitTermination. |
409 |
> |
* Lock protecting updates to workers array. |
410 |
|
*/ |
411 |
< |
private final Condition termination; |
411 |
> |
private final ReentrantLock workerLock; |
412 |
|
|
413 |
|
/** |
414 |
< |
* The uncaught exception handler used when any worker |
190 |
< |
* abruptly terminates |
414 |
> |
* Latch released upon termination. |
415 |
|
*/ |
416 |
< |
private Thread.UncaughtExceptionHandler ueh; |
416 |
> |
private final CountDownLatch terminationLatch; |
417 |
|
|
418 |
|
/** |
419 |
|
* Creation factory for worker threads. |
421 |
|
private final ForkJoinWorkerThreadFactory factory; |
422 |
|
|
423 |
|
/** |
424 |
< |
* Head of stack of threads that were created to maintain |
425 |
< |
* parallelism when other threads blocked, but have since |
202 |
< |
* suspended when the parallelism level rose. |
424 |
> |
* Sum of per-thread steal counts, updated only when threads are |
425 |
> |
* idle or terminating. |
426 |
|
*/ |
427 |
< |
private volatile WaitQueueNode spareStack; |
427 |
> |
private volatile long stealCount; |
428 |
|
|
429 |
|
/** |
430 |
< |
* Sum of per-thread steal counts, updated only when threads are |
431 |
< |
* idle or terminating. |
430 |
> |
* Encoded record of top of treiber stack of threads waiting for |
431 |
> |
* events. The top 32 bits contain the count being waited for. The |
432 |
> |
* bottom word contains one plus the pool index of waiting worker |
433 |
> |
* thread. |
434 |
|
*/ |
435 |
< |
private final AtomicLong stealCount; |
435 |
> |
private volatile long eventWaiters; |
436 |
> |
|
437 |
> |
private static final int EVENT_COUNT_SHIFT = 32; |
438 |
> |
private static final long WAITER_INDEX_MASK = (1L << EVENT_COUNT_SHIFT)-1L; |
439 |
|
|
440 |
|
/** |
441 |
< |
* Queue for external submissions. |
441 |
> |
* A counter for events that may wake up worker threads: |
442 |
> |
* - Submission of a new task to the pool |
443 |
> |
* - A worker pushing a task on an empty queue |
444 |
> |
* - termination and reconfiguration |
445 |
|
*/ |
446 |
< |
private final LinkedTransferQueue<ForkJoinTask<?>> submissionQueue; |
446 |
> |
private volatile int eventCount; |
447 |
> |
|
448 |
> |
/** |
449 |
> |
* Lifecycle control. The low word contains the number of workers |
450 |
> |
* that are (probably) executing tasks. This value is atomically |
451 |
> |
* incremented before a worker gets a task to run, and decremented |
452 |
> |
* when worker has no tasks and cannot find any. Bits 16-18 |
453 |
> |
* contain runLevel value. When all are zero, the pool is |
454 |
> |
* running. Level transitions are monotonic (running -> shutdown |
455 |
> |
* -> terminating -> terminated) so each transition adds a bit. |
456 |
> |
* These are bundled together to ensure consistent read for |
457 |
> |
* termination checks (i.e., that runLevel is at least SHUTDOWN |
458 |
> |
* and active threads is zero). |
459 |
> |
*/ |
460 |
> |
private volatile int runState; |
461 |
> |
|
462 |
> |
// Note: The order among run level values matters. |
463 |
> |
private static final int RUNLEVEL_SHIFT = 16; |
464 |
> |
private static final int SHUTDOWN = 1 << RUNLEVEL_SHIFT; |
465 |
> |
private static final int TERMINATING = 1 << (RUNLEVEL_SHIFT + 1); |
466 |
> |
private static final int TERMINATED = 1 << (RUNLEVEL_SHIFT + 2); |
467 |
> |
private static final int ACTIVE_COUNT_MASK = (1 << RUNLEVEL_SHIFT) - 1; |
468 |
> |
private static final int ONE_ACTIVE = 1; // active update delta |
469 |
|
|
470 |
|
/** |
471 |
< |
* Head of Treiber stack for barrier sync. See below for explanation. |
471 |
> |
* Holds number of total (i.e., created and not yet terminated) |
472 |
> |
* and running (i.e., not blocked on joins or other managed sync) |
473 |
> |
* threads, packed together to ensure consistent snapshot when |
474 |
> |
* making decisions about creating and suspending spare |
475 |
> |
* threads. Updated only by CAS. Note that adding a new worker |
476 |
> |
* requires incrementing both counts, since workers start off in |
477 |
> |
* running state. This field is also used for memory-fencing |
478 |
> |
* configuration parameters. |
479 |
> |
*/ |
480 |
> |
private volatile int workerCounts; |
481 |
> |
|
482 |
> |
private static final int TOTAL_COUNT_SHIFT = 16; |
483 |
> |
private static final int RUNNING_COUNT_MASK = (1 << TOTAL_COUNT_SHIFT) - 1; |
484 |
> |
private static final int ONE_RUNNING = 1; |
485 |
> |
private static final int ONE_TOTAL = 1 << TOTAL_COUNT_SHIFT; |
486 |
> |
|
487 |
> |
/* |
488 |
> |
* Fields parallelism. maxPoolSize, locallyFifo, |
489 |
> |
* maintainsParallelism, and ueh are non-volatile, but external |
490 |
> |
* reads/writes use workerCount fences to ensure visability. |
491 |
|
*/ |
220 |
– |
private volatile WaitQueueNode syncStack; |
492 |
|
|
493 |
|
/** |
494 |
< |
* The count for event barrier |
494 |
> |
* The target parallelism level. |
495 |
|
*/ |
496 |
< |
private volatile long eventCount; |
496 |
> |
private int parallelism; |
497 |
|
|
498 |
|
/** |
499 |
< |
* Pool number, just for assigning useful names to worker threads |
499 |
> |
* The maximum allowed pool size. |
500 |
|
*/ |
501 |
< |
private final int poolNumber; |
501 |
> |
private int maxPoolSize; |
502 |
|
|
503 |
|
/** |
504 |
< |
* The maximum allowed pool size |
504 |
> |
* True if use local fifo, not default lifo, for local polling |
505 |
> |
* Replicated by ForkJoinWorkerThreads |
506 |
|
*/ |
507 |
< |
private volatile int maxPoolSize; |
507 |
> |
private boolean locallyFifo; |
508 |
|
|
509 |
|
/** |
510 |
< |
* The desired parallelism level, updated only under workerLock. |
510 |
> |
* Controls whether to add spares to maintain parallelism |
511 |
|
*/ |
512 |
< |
private volatile int parallelism; |
512 |
> |
private boolean maintainsParallelism; |
513 |
|
|
514 |
|
/** |
515 |
< |
* True if use local fifo, not default lifo, for local polling |
515 |
> |
* The uncaught exception handler used when any worker |
516 |
> |
* abruptly terminates |
517 |
|
*/ |
518 |
< |
private volatile boolean locallyFifo; |
518 |
> |
private Thread.UncaughtExceptionHandler ueh; |
519 |
|
|
520 |
|
/** |
521 |
< |
* Holds number of total (i.e., created and not yet terminated) |
249 |
< |
* and running (i.e., not blocked on joins or other managed sync) |
250 |
< |
* threads, packed into one int to ensure consistent snapshot when |
251 |
< |
* making decisions about creating and suspending spare |
252 |
< |
* threads. Updated only by CAS. Note: CASes in |
253 |
< |
* updateRunningCount and preJoin assume that running active count |
254 |
< |
* is in low word, so need to be modified if this changes. |
521 |
> |
* Pool number, just for assigning useful names to worker threads |
522 |
|
*/ |
523 |
< |
private volatile int workerCounts; |
523 |
> |
private final int poolNumber; |
524 |
|
|
525 |
< |
private static int totalCountOf(int s) { return s >>> 16; } |
259 |
< |
private static int runningCountOf(int s) { return s & shortMask; } |
260 |
< |
private static int workerCountsFor(int t, int r) { return (t << 16) + r; } |
525 |
> |
// utilities for updating fields |
526 |
|
|
527 |
|
/** |
528 |
< |
* Adds delta (which may be negative) to running count. This must |
264 |
< |
* be called before (with negative arg) and after (with positive) |
265 |
< |
* any managed synchronization (i.e., mainly, joins). |
528 |
> |
* Adds delta to running count. Used mainly by ForkJoinTask. |
529 |
|
* |
530 |
|
* @param delta the number to add |
531 |
|
*/ |
532 |
|
final void updateRunningCount(int delta) { |
533 |
< |
int s; |
534 |
< |
do {} while (!casWorkerCounts(s = workerCounts, s + delta)); |
533 |
> |
int wc; |
534 |
> |
do {} while (!UNSAFE.compareAndSwapInt(this, workerCountsOffset, |
535 |
> |
wc = workerCounts, |
536 |
> |
wc + delta)); |
537 |
|
} |
538 |
|
|
539 |
|
/** |
540 |
< |
* Adds delta (which may be negative) to both total and running |
541 |
< |
* count. This must be called upon creation and termination of |
277 |
< |
* worker threads. |
278 |
< |
* |
279 |
< |
* @param delta the number to add |
540 |
> |
* Write fence for user modifications of pool parameters |
541 |
> |
* (parallelism. etc). Note that it doesn't matter if CAS fails. |
542 |
|
*/ |
543 |
< |
private void updateWorkerCount(int delta) { |
544 |
< |
int d = delta + (delta << 16); // add to both lo and hi parts |
545 |
< |
int s; |
546 |
< |
do {} while (!casWorkerCounts(s = workerCounts, s + d)); |
543 |
> |
private void workerCountWriteFence() { |
544 |
> |
int wc; |
545 |
> |
UNSAFE.compareAndSwapInt(this, workerCountsOffset, |
546 |
> |
wc = workerCounts, wc); |
547 |
|
} |
548 |
|
|
549 |
|
/** |
550 |
< |
* Lifecycle control. High word contains runState, low word |
551 |
< |
* contains the number of workers that are (probably) executing |
552 |
< |
* tasks. This value is atomically incremented before a worker |
553 |
< |
* gets a task to run, and decremented when worker has no tasks |
554 |
< |
* and cannot find any. These two fields are bundled together to |
555 |
< |
* support correct termination triggering. Note: activeCount |
294 |
< |
* CAS'es cheat by assuming active count is in low word, so need |
295 |
< |
* to be modified if this changes |
296 |
< |
*/ |
297 |
< |
private volatile int runControl; |
298 |
< |
|
299 |
< |
// RunState values. Order among values matters |
300 |
< |
private static final int RUNNING = 0; |
301 |
< |
private static final int SHUTDOWN = 1; |
302 |
< |
private static final int TERMINATING = 2; |
303 |
< |
private static final int TERMINATED = 3; |
304 |
< |
|
305 |
< |
private static int runStateOf(int c) { return c >>> 16; } |
306 |
< |
private static int activeCountOf(int c) { return c & shortMask; } |
307 |
< |
private static int runControlFor(int r, int a) { return (r << 16) + a; } |
550 |
> |
* Read fence for external reads of pool parameters |
551 |
> |
* (parallelism. maxPoolSize, etc). |
552 |
> |
*/ |
553 |
> |
private void workerCountReadFence() { |
554 |
> |
int ignore = workerCounts; |
555 |
> |
} |
556 |
|
|
557 |
|
/** |
558 |
|
* Tries incrementing active count; fails on contention. |
559 |
< |
* Called by workers before/during executing tasks. |
559 |
> |
* Called by workers before executing tasks. |
560 |
|
* |
561 |
|
* @return true on success |
562 |
|
*/ |
563 |
|
final boolean tryIncrementActiveCount() { |
564 |
< |
int c = runControl; |
565 |
< |
return casRunControl(c, c+1); |
564 |
> |
int c; |
565 |
> |
return UNSAFE.compareAndSwapInt(this, runStateOffset, |
566 |
> |
c = runState, c + ONE_ACTIVE); |
567 |
|
} |
568 |
|
|
569 |
|
/** |
570 |
|
* Tries decrementing active count; fails on contention. |
571 |
< |
* Possibly triggers termination on success. |
323 |
< |
* Called by workers when they can't find tasks. |
324 |
< |
* |
325 |
< |
* @return true on success |
571 |
> |
* Called when workers cannot find tasks to run. |
572 |
|
*/ |
573 |
|
final boolean tryDecrementActiveCount() { |
574 |
< |
int c = runControl; |
575 |
< |
int nextc = c - 1; |
576 |
< |
if (!casRunControl(c, nextc)) |
574 |
> |
int c; |
575 |
> |
return UNSAFE.compareAndSwapInt(this, runStateOffset, |
576 |
> |
c = runState, c - ONE_ACTIVE); |
577 |
> |
} |
578 |
> |
|
579 |
> |
/** |
580 |
> |
* Advances to at least the given level. Returns true if not |
581 |
> |
* already in at least the given level. |
582 |
> |
*/ |
583 |
> |
private boolean advanceRunLevel(int level) { |
584 |
> |
for (;;) { |
585 |
> |
int s = runState; |
586 |
> |
if ((s & level) != 0) |
587 |
> |
return false; |
588 |
> |
if (UNSAFE.compareAndSwapInt(this, runStateOffset, s, s | level)) |
589 |
> |
return true; |
590 |
> |
} |
591 |
> |
} |
592 |
> |
|
593 |
> |
// workers array maintenance |
594 |
> |
|
595 |
> |
/** |
596 |
> |
* Records and returns a workers array index for new worker. |
597 |
> |
*/ |
598 |
> |
private int recordWorker(ForkJoinWorkerThread w) { |
599 |
> |
// Try using slot totalCount-1. If not available, scan and/or resize |
600 |
> |
int k = (workerCounts >>> TOTAL_COUNT_SHIFT) - 1; |
601 |
> |
final ReentrantLock lock = this.workerLock; |
602 |
> |
lock.lock(); |
603 |
> |
try { |
604 |
> |
ForkJoinWorkerThread[] ws = workers; |
605 |
> |
int len = ws.length; |
606 |
> |
if (k < 0 || k >= len || ws[k] != null) { |
607 |
> |
for (k = 0; k < len && ws[k] != null; ++k) |
608 |
> |
; |
609 |
> |
if (k == len) |
610 |
> |
ws = Arrays.copyOf(ws, len << 1); |
611 |
> |
} |
612 |
> |
ws[k] = w; |
613 |
> |
workers = ws; // volatile array write ensures slot visibility |
614 |
> |
} finally { |
615 |
> |
lock.unlock(); |
616 |
> |
} |
617 |
> |
return k; |
618 |
> |
} |
619 |
> |
|
620 |
> |
/** |
621 |
> |
* Nulls out record of worker in workers array |
622 |
> |
*/ |
623 |
> |
private void forgetWorker(ForkJoinWorkerThread w) { |
624 |
> |
int idx = w.poolIndex; |
625 |
> |
// Locking helps method recordWorker avoid unecessary expansion |
626 |
> |
final ReentrantLock lock = this.workerLock; |
627 |
> |
lock.lock(); |
628 |
> |
try { |
629 |
> |
ForkJoinWorkerThread[] ws = workers; |
630 |
> |
if (idx >= 0 && idx < ws.length && ws[idx] == w) // verify |
631 |
> |
ws[idx] = null; |
632 |
> |
} finally { |
633 |
> |
lock.unlock(); |
634 |
> |
} |
635 |
> |
} |
636 |
> |
|
637 |
> |
// adding and removing workers |
638 |
> |
|
639 |
> |
/** |
640 |
> |
* Tries to create and add new worker. Assumes that worker counts |
641 |
> |
* are already updated to accommodate the worker, so adjusts on |
642 |
> |
* failure. |
643 |
> |
* |
644 |
> |
* @return new worker or null if creation failed |
645 |
> |
*/ |
646 |
> |
private ForkJoinWorkerThread addWorker() { |
647 |
> |
ForkJoinWorkerThread w = null; |
648 |
> |
try { |
649 |
> |
w = factory.newThread(this); |
650 |
> |
} finally { // Adjust on either null or exceptional factory return |
651 |
> |
if (w == null) { |
652 |
> |
onWorkerCreationFailure(); |
653 |
> |
return null; |
654 |
> |
} |
655 |
> |
} |
656 |
> |
w.start(recordWorker(w), locallyFifo, ueh); |
657 |
> |
return w; |
658 |
> |
} |
659 |
> |
|
660 |
> |
/** |
661 |
> |
* Adjusts counts upon failure to create worker |
662 |
> |
*/ |
663 |
> |
private void onWorkerCreationFailure() { |
664 |
> |
int c; |
665 |
> |
do {} while (!UNSAFE.compareAndSwapInt(this, workerCountsOffset, |
666 |
> |
c = workerCounts, |
667 |
> |
c - (ONE_RUNNING|ONE_TOTAL))); |
668 |
> |
tryTerminate(false); // in case of failure during shutdown |
669 |
> |
} |
670 |
> |
|
671 |
> |
/** |
672 |
> |
* Create enough total workers to establish target parallelism, |
673 |
> |
* giving up if terminating or addWorker fails |
674 |
> |
*/ |
675 |
> |
private void ensureEnoughTotalWorkers() { |
676 |
> |
int wc; |
677 |
> |
while (runState < TERMINATING && |
678 |
> |
((wc = workerCounts) >>> TOTAL_COUNT_SHIFT) < parallelism) { |
679 |
> |
if ((UNSAFE.compareAndSwapInt(this, workerCountsOffset, |
680 |
> |
wc, wc + (ONE_RUNNING|ONE_TOTAL)) && |
681 |
> |
addWorker() == null)) |
682 |
> |
break; |
683 |
> |
} |
684 |
> |
} |
685 |
> |
|
686 |
> |
/** |
687 |
> |
* Final callback from terminating worker. Removes record of |
688 |
> |
* worker from array, and adjusts counts. If pool is shutting |
689 |
> |
* down, tries to complete terminatation, else possibly replaces |
690 |
> |
* the worker. |
691 |
> |
* |
692 |
> |
* @param w the worker |
693 |
> |
*/ |
694 |
> |
final void workerTerminated(ForkJoinWorkerThread w) { |
695 |
> |
if (w.active) { // force inactive |
696 |
> |
w.active = false; |
697 |
> |
do {} while (!tryDecrementActiveCount()); |
698 |
> |
} |
699 |
> |
forgetWorker(w); |
700 |
> |
|
701 |
> |
// decrement total count, and if was running, running count |
702 |
> |
int unit = w.isTrimmed()? ONE_TOTAL : (ONE_RUNNING|ONE_TOTAL); |
703 |
> |
int wc; |
704 |
> |
do {} while (!UNSAFE.compareAndSwapInt(this, workerCountsOffset, |
705 |
> |
wc = workerCounts, wc - unit)); |
706 |
> |
|
707 |
> |
accumulateStealCount(w); // collect final count |
708 |
> |
if (!tryTerminate(false)) |
709 |
> |
ensureEnoughTotalWorkers(); |
710 |
> |
} |
711 |
> |
|
712 |
> |
// Waiting for and signalling events |
713 |
> |
|
714 |
> |
/** |
715 |
> |
* Ensures eventCount on exit is different (mod 2^32) than on |
716 |
> |
* entry. CAS failures are OK -- any change in count suffices. |
717 |
> |
*/ |
718 |
> |
private void advanceEventCount() { |
719 |
> |
int c; |
720 |
> |
UNSAFE.compareAndSwapInt(this, eventCountOffset, c = eventCount, c+1); |
721 |
> |
} |
722 |
> |
|
723 |
> |
/** |
724 |
> |
* Releases workers blocked on a count not equal to current count. |
725 |
> |
*/ |
726 |
> |
final void releaseWaiters() { |
727 |
> |
long top; |
728 |
> |
int id; |
729 |
> |
while ((id = (int)((top = eventWaiters) & WAITER_INDEX_MASK)) > 0 && |
730 |
> |
(int)(top >>> EVENT_COUNT_SHIFT) != eventCount) { |
731 |
> |
ForkJoinWorkerThread[] ws = workers; |
732 |
> |
ForkJoinWorkerThread w; |
733 |
> |
if (ws.length >= id && (w = ws[id - 1]) != null && |
734 |
> |
UNSAFE.compareAndSwapLong(this, eventWaitersOffset, |
735 |
> |
top, w.nextWaiter)) |
736 |
> |
LockSupport.unpark(w); |
737 |
> |
} |
738 |
> |
} |
739 |
> |
|
740 |
> |
/** |
741 |
> |
* Advances eventCount and releases waiters until interference by |
742 |
> |
* other releasing threads is detected. |
743 |
> |
*/ |
744 |
> |
final void signalWork() { |
745 |
> |
int ec; |
746 |
> |
UNSAFE.compareAndSwapInt(this, eventCountOffset, ec=eventCount, ec+1); |
747 |
> |
outer:for (;;) { |
748 |
> |
long top = eventWaiters; |
749 |
> |
ec = eventCount; |
750 |
> |
for (;;) { |
751 |
> |
ForkJoinWorkerThread[] ws; ForkJoinWorkerThread w; |
752 |
> |
int id = (int)(top & WAITER_INDEX_MASK); |
753 |
> |
if (id <= 0 || (int)(top >>> EVENT_COUNT_SHIFT) == ec) |
754 |
> |
return; |
755 |
> |
if ((ws = workers).length < id || (w = ws[id - 1]) == null || |
756 |
> |
!UNSAFE.compareAndSwapLong(this, eventWaitersOffset, |
757 |
> |
top, top = w.nextWaiter)) |
758 |
> |
continue outer; // possibly stale; reread |
759 |
> |
LockSupport.unpark(w); |
760 |
> |
if (top != eventWaiters) // let someone else take over |
761 |
> |
return; |
762 |
> |
} |
763 |
> |
} |
764 |
> |
} |
765 |
> |
|
766 |
> |
/** |
767 |
> |
* If worker is inactive, blocks until terminating or event count |
768 |
> |
* advances from last value held by worker; in any case helps |
769 |
> |
* release others. |
770 |
> |
* |
771 |
> |
* @param w the calling worker thread |
772 |
> |
*/ |
773 |
> |
private void eventSync(ForkJoinWorkerThread w) { |
774 |
> |
if (!w.active) { |
775 |
> |
int prev = w.lastEventCount; |
776 |
> |
long nextTop = (((long)prev << EVENT_COUNT_SHIFT) | |
777 |
> |
((long)(w.poolIndex + 1))); |
778 |
> |
long top; |
779 |
> |
while ((runState < SHUTDOWN || !tryTerminate(false)) && |
780 |
> |
(((int)(top = eventWaiters) & WAITER_INDEX_MASK) == 0 || |
781 |
> |
(int)(top >>> EVENT_COUNT_SHIFT) == prev) && |
782 |
> |
eventCount == prev) { |
783 |
> |
if (UNSAFE.compareAndSwapLong(this, eventWaitersOffset, |
784 |
> |
w.nextWaiter = top, nextTop)) { |
785 |
> |
accumulateStealCount(w); // transfer steals while idle |
786 |
> |
Thread.interrupted(); // clear/ignore interrupt |
787 |
> |
while (eventCount == prev) |
788 |
> |
w.doPark(); |
789 |
> |
break; |
790 |
> |
} |
791 |
> |
} |
792 |
> |
w.lastEventCount = eventCount; |
793 |
> |
} |
794 |
> |
releaseWaiters(); |
795 |
> |
} |
796 |
> |
|
797 |
> |
/** |
798 |
> |
* Callback from workers invoked upon each top-level action (i.e., |
799 |
> |
* stealing a task or taking a submission and running |
800 |
> |
* it). Performs one or both of the following: |
801 |
> |
* |
802 |
> |
* * If the worker cannot find work, updates its active status to |
803 |
> |
* inactive and updates activeCount unless there is contention, in |
804 |
> |
* which case it may try again (either in this or a subsequent |
805 |
> |
* call). Additionally, awaits the next task event and/or helps |
806 |
> |
* wake up other releasable waiters. |
807 |
> |
* |
808 |
> |
* * If there are too many running threads, suspends this worker |
809 |
> |
* (first forcing inactivation if necessary). If it is not |
810 |
> |
* resumed before a keepAlive elapses, the worker may be "trimmed" |
811 |
> |
* -- killed while suspended within suspendAsSpare. Otherwise, |
812 |
> |
* upon resume it rechecks to make sure that it is still needed. |
813 |
> |
* |
814 |
> |
* @param w the worker |
815 |
> |
* @param worked false if the worker scanned for work but didn't |
816 |
> |
* find any (in which case it may block waiting for work). |
817 |
> |
*/ |
818 |
> |
final void preStep(ForkJoinWorkerThread w, boolean worked) { |
819 |
> |
boolean active = w.active; |
820 |
> |
boolean inactivate = !worked & active; |
821 |
> |
for (;;) { |
822 |
> |
if (inactivate) { |
823 |
> |
int c = runState; |
824 |
> |
if (UNSAFE.compareAndSwapInt(this, runStateOffset, |
825 |
> |
c, c - ONE_ACTIVE)) |
826 |
> |
inactivate = active = w.active = false; |
827 |
> |
} |
828 |
> |
int wc = workerCounts; |
829 |
> |
if ((wc & RUNNING_COUNT_MASK) <= parallelism) { |
830 |
> |
if (!worked) |
831 |
> |
eventSync(w); |
832 |
> |
return; |
833 |
> |
} |
834 |
> |
if (!(inactivate |= active) && // must inactivate to suspend |
835 |
> |
UNSAFE.compareAndSwapInt(this, workerCountsOffset, |
836 |
> |
wc, wc - ONE_RUNNING) && |
837 |
> |
!w.suspendAsSpare()) // false if trimmed |
838 |
> |
return; |
839 |
> |
} |
840 |
> |
} |
841 |
> |
|
842 |
> |
/** |
843 |
> |
* Adjusts counts and creates or resumes compensating threads for |
844 |
> |
* a worker about to block on task joinMe, returning early if |
845 |
> |
* joinMe becomes ready. First tries resuming an existing spare |
846 |
> |
* (which usually also avoids any count adjustment), but must then |
847 |
> |
* decrement running count to determine whether a new thread is |
848 |
> |
* needed. See above for fuller explanation. |
849 |
> |
*/ |
850 |
> |
final void preJoin(ForkJoinTask<?> joinMe) { |
851 |
> |
boolean dec = false; // true when running count decremented |
852 |
> |
for (;;) { |
853 |
> |
releaseWaiters(); // help other threads progress |
854 |
> |
|
855 |
> |
if (joinMe.status < 0) // surround spare search with done checks |
856 |
> |
return; |
857 |
> |
ForkJoinWorkerThread spare = null; |
858 |
> |
for (ForkJoinWorkerThread w : workers) { |
859 |
> |
if (w != null && w.isSuspended()) { |
860 |
> |
spare = w; |
861 |
> |
break; |
862 |
> |
} |
863 |
> |
} |
864 |
> |
if (joinMe.status < 0) |
865 |
> |
return; |
866 |
> |
|
867 |
> |
if (spare != null && spare.tryUnsuspend()) { |
868 |
> |
if (dec || joinMe.requestSignal() < 0) { |
869 |
> |
int c; |
870 |
> |
do {} while (!UNSAFE.compareAndSwapInt(this, |
871 |
> |
workerCountsOffset, |
872 |
> |
c = workerCounts, |
873 |
> |
c + ONE_RUNNING)); |
874 |
> |
} // else no net count change |
875 |
> |
LockSupport.unpark(spare); |
876 |
> |
return; |
877 |
> |
} |
878 |
> |
|
879 |
> |
int wc = workerCounts; // decrement running count |
880 |
> |
if (!dec && (wc & RUNNING_COUNT_MASK) != 0 && |
881 |
> |
(dec = UNSAFE.compareAndSwapInt(this, workerCountsOffset, |
882 |
> |
wc, wc -= ONE_RUNNING)) && |
883 |
> |
joinMe.requestSignal() < 0) { // cannot block |
884 |
> |
int c; // back out |
885 |
> |
do {} while (!UNSAFE.compareAndSwapInt(this, |
886 |
> |
workerCountsOffset, |
887 |
> |
c = workerCounts, |
888 |
> |
c + ONE_RUNNING)); |
889 |
> |
return; |
890 |
> |
} |
891 |
> |
|
892 |
> |
if (dec) { |
893 |
> |
int tc = wc >>> TOTAL_COUNT_SHIFT; |
894 |
> |
int pc = parallelism; |
895 |
> |
int dc = pc - (wc & RUNNING_COUNT_MASK); // deficit count |
896 |
> |
if ((dc < pc && (dc <= 0 || (dc * dc < (tc - pc) * pc) || |
897 |
> |
!maintainsParallelism)) || |
898 |
> |
tc >= maxPoolSize) // cannot add |
899 |
> |
return; |
900 |
> |
if (spare == null && |
901 |
> |
UNSAFE.compareAndSwapInt(this, workerCountsOffset, wc, |
902 |
> |
wc + (ONE_RUNNING|ONE_TOTAL))) { |
903 |
> |
addWorker(); |
904 |
> |
return; |
905 |
> |
} |
906 |
> |
} |
907 |
> |
} |
908 |
> |
} |
909 |
> |
|
910 |
> |
/** |
911 |
> |
* Same idea as preJoin but with too many differing details to |
912 |
> |
* integrate: There are no task-based signal counts, and only one |
913 |
> |
* way to do the actual blocking. So for simplicity it is directly |
914 |
> |
* incorporated into this method. |
915 |
> |
*/ |
916 |
> |
final void doBlock(ManagedBlocker blocker, boolean maintainPar) |
917 |
> |
throws InterruptedException { |
918 |
> |
maintainPar &= maintainsParallelism; // override |
919 |
> |
boolean dec = false; |
920 |
> |
boolean done = false; |
921 |
> |
for (;;) { |
922 |
> |
releaseWaiters(); |
923 |
> |
if (done = blocker.isReleasable()) |
924 |
> |
break; |
925 |
> |
ForkJoinWorkerThread spare = null; |
926 |
> |
for (ForkJoinWorkerThread w : workers) { |
927 |
> |
if (w != null && w.isSuspended()) { |
928 |
> |
spare = w; |
929 |
> |
break; |
930 |
> |
} |
931 |
> |
} |
932 |
> |
if (done = blocker.isReleasable()) |
933 |
> |
break; |
934 |
> |
if (spare != null && spare.tryUnsuspend()) { |
935 |
> |
if (dec) { |
936 |
> |
int c; |
937 |
> |
do {} while (!UNSAFE.compareAndSwapInt(this, |
938 |
> |
workerCountsOffset, |
939 |
> |
c = workerCounts, |
940 |
> |
c + ONE_RUNNING)); |
941 |
> |
} |
942 |
> |
LockSupport.unpark(spare); |
943 |
> |
break; |
944 |
> |
} |
945 |
> |
int wc = workerCounts; |
946 |
> |
if (!dec && (wc & RUNNING_COUNT_MASK) != 0) |
947 |
> |
dec = UNSAFE.compareAndSwapInt(this, workerCountsOffset, |
948 |
> |
wc, wc -= ONE_RUNNING); |
949 |
> |
if (dec) { |
950 |
> |
int tc = wc >>> TOTAL_COUNT_SHIFT; |
951 |
> |
int pc = parallelism; |
952 |
> |
int dc = pc - (wc & RUNNING_COUNT_MASK); |
953 |
> |
if ((dc < pc && (dc <= 0 || (dc * dc < (tc - pc) * pc) || |
954 |
> |
!maintainPar)) || |
955 |
> |
tc >= maxPoolSize) |
956 |
> |
break; |
957 |
> |
if (spare == null && |
958 |
> |
UNSAFE.compareAndSwapInt(this, workerCountsOffset, wc, |
959 |
> |
wc + (ONE_RUNNING|ONE_TOTAL))){ |
960 |
> |
addWorker(); |
961 |
> |
break; |
962 |
> |
} |
963 |
> |
} |
964 |
> |
} |
965 |
> |
|
966 |
> |
try { |
967 |
> |
if (!done) |
968 |
> |
do {} while (!blocker.isReleasable() && !blocker.block()); |
969 |
> |
} finally { |
970 |
> |
if (dec) { |
971 |
> |
int c; |
972 |
> |
do {} while (!UNSAFE.compareAndSwapInt(this, |
973 |
> |
workerCountsOffset, |
974 |
> |
c = workerCounts, |
975 |
> |
c + ONE_RUNNING)); |
976 |
> |
} |
977 |
> |
} |
978 |
> |
} |
979 |
> |
|
980 |
> |
/** |
981 |
> |
* Possibly initiates and/or completes termination. |
982 |
> |
* |
983 |
> |
* @param now if true, unconditionally terminate, else only |
984 |
> |
* if shutdown and empty queue and no active workers |
985 |
> |
* @return true if now terminating or terminated |
986 |
> |
*/ |
987 |
> |
private boolean tryTerminate(boolean now) { |
988 |
> |
if (now) |
989 |
> |
advanceRunLevel(SHUTDOWN); // ensure at least SHUTDOWN |
990 |
> |
else if (runState < SHUTDOWN || |
991 |
> |
!submissionQueue.isEmpty() || |
992 |
> |
(runState & ACTIVE_COUNT_MASK) != 0) |
993 |
|
return false; |
994 |
< |
if (canTerminateOnShutdown(nextc)) |
995 |
< |
terminateOnShutdown(); |
994 |
> |
|
995 |
> |
if (advanceRunLevel(TERMINATING)) |
996 |
> |
startTerminating(); |
997 |
> |
|
998 |
> |
// Finish now if all threads terminated; else in some subsequent call |
999 |
> |
if ((workerCounts >>> TOTAL_COUNT_SHIFT) == 0) { |
1000 |
> |
advanceRunLevel(TERMINATED); |
1001 |
> |
terminationLatch.countDown(); |
1002 |
> |
} |
1003 |
|
return true; |
1004 |
|
} |
1005 |
|
|
1006 |
|
/** |
1007 |
< |
* Returns {@code true} if argument represents zero active count |
339 |
< |
* and nonzero runstate, which is the triggering condition for |
340 |
< |
* terminating on shutdown. |
1007 |
> |
* Actions on transition to TERMINATING |
1008 |
|
*/ |
1009 |
< |
private static boolean canTerminateOnShutdown(int c) { |
1010 |
< |
// i.e. least bit is nonzero runState bit |
1011 |
< |
return ((c & -c) >>> 16) != 0; |
1009 |
> |
private void startTerminating() { |
1010 |
> |
// Clear out and cancel submissions, ignoring exceptions |
1011 |
> |
ForkJoinTask<?> task; |
1012 |
> |
while ((task = submissionQueue.poll()) != null) { |
1013 |
> |
try { |
1014 |
> |
task.cancel(false); |
1015 |
> |
} catch (Throwable ignore) { |
1016 |
> |
} |
1017 |
> |
} |
1018 |
> |
// Propagate run level |
1019 |
> |
for (ForkJoinWorkerThread w : workers) { |
1020 |
> |
if (w != null) |
1021 |
> |
w.shutdown(); // also resumes suspended workers |
1022 |
> |
} |
1023 |
> |
// Ensure no straggling local tasks |
1024 |
> |
for (ForkJoinWorkerThread w : workers) { |
1025 |
> |
if (w != null) |
1026 |
> |
w.cancelTasks(); |
1027 |
> |
} |
1028 |
> |
// Wake up idle workers |
1029 |
> |
advanceEventCount(); |
1030 |
> |
releaseWaiters(); |
1031 |
> |
// Unstick pending joins |
1032 |
> |
for (ForkJoinWorkerThread w : workers) { |
1033 |
> |
if (w != null && !w.isTerminated()) { |
1034 |
> |
try { |
1035 |
> |
w.interrupt(); |
1036 |
> |
} catch (SecurityException ignore) { |
1037 |
> |
} |
1038 |
> |
} |
1039 |
> |
} |
1040 |
|
} |
1041 |
|
|
1042 |
+ |
// misc support for ForkJoinWorkerThread |
1043 |
+ |
|
1044 |
|
/** |
1045 |
< |
* Transition run state to at least the given state. Return true |
349 |
< |
* if not already at least given state. |
1045 |
> |
* Returns pool number |
1046 |
|
*/ |
1047 |
< |
private boolean transitionRunStateTo(int state) { |
1048 |
< |
for (;;) { |
1049 |
< |
int c = runControl; |
1050 |
< |
if (runStateOf(c) >= state) |
1051 |
< |
return false; |
1052 |
< |
if (casRunControl(c, runControlFor(state, activeCountOf(c)))) |
1053 |
< |
return true; |
1047 |
> |
final int getPoolNumber() { |
1048 |
> |
return poolNumber; |
1049 |
> |
} |
1050 |
> |
|
1051 |
> |
/** |
1052 |
> |
* Accumulates steal count from a worker, clearing |
1053 |
> |
* the worker's value |
1054 |
> |
*/ |
1055 |
> |
final void accumulateStealCount(ForkJoinWorkerThread w) { |
1056 |
> |
int sc = w.stealCount; |
1057 |
> |
if (sc != 0) { |
1058 |
> |
long c; |
1059 |
> |
w.stealCount = 0; |
1060 |
> |
do {} while (!UNSAFE.compareAndSwapLong(this, stealCountOffset, |
1061 |
> |
c = stealCount, c + sc)); |
1062 |
|
} |
1063 |
|
} |
1064 |
|
|
1065 |
|
/** |
1066 |
< |
* Controls whether to add spares to maintain parallelism |
1066 |
> |
* Returns the approximate (non-atomic) number of idle threads per |
1067 |
> |
* active thread. |
1068 |
|
*/ |
1069 |
< |
private volatile boolean maintainsParallelism; |
1069 |
> |
final int idlePerActive() { |
1070 |
> |
int ac = runState; // no mask -- artifically boosts during shutdown |
1071 |
> |
int pc = parallelism; // use targeted parallelism, not rc |
1072 |
> |
// Use exact results for small values, saturate past 4 |
1073 |
> |
return pc <= ac? 0 : pc >>> 1 <= ac? 1 : pc >>> 2 <= ac? 3 : pc >>> 3; |
1074 |
> |
} |
1075 |
> |
|
1076 |
> |
/** |
1077 |
> |
* Returns the approximate (non-atomic) difference between running |
1078 |
> |
* and active counts. |
1079 |
> |
*/ |
1080 |
> |
final int inactiveCount() { |
1081 |
> |
return (workerCounts & RUNNING_COUNT_MASK) - |
1082 |
> |
(runState & ACTIVE_COUNT_MASK); |
1083 |
> |
} |
1084 |
> |
|
1085 |
> |
// Public and protected methods |
1086 |
|
|
1087 |
|
// Constructors |
1088 |
|
|
1149 |
|
* java.lang.RuntimePermission}{@code ("modifyThread")} |
1150 |
|
*/ |
1151 |
|
public ForkJoinPool(int parallelism, ForkJoinWorkerThreadFactory factory) { |
1152 |
< |
if (parallelism <= 0 || parallelism > MAX_THREADS) |
432 |
< |
throw new IllegalArgumentException(); |
1152 |
> |
checkPermission(); |
1153 |
|
if (factory == null) |
1154 |
|
throw new NullPointerException(); |
1155 |
< |
checkPermission(); |
1156 |
< |
this.factory = factory; |
1155 |
> |
if (parallelism <= 0 || parallelism > MAX_THREADS) |
1156 |
> |
throw new IllegalArgumentException(); |
1157 |
> |
this.poolNumber = poolNumberGenerator.incrementAndGet(); |
1158 |
> |
int arraySize = initialArraySizeFor(parallelism); |
1159 |
|
this.parallelism = parallelism; |
1160 |
+ |
this.factory = factory; |
1161 |
|
this.maxPoolSize = MAX_THREADS; |
1162 |
|
this.maintainsParallelism = true; |
1163 |
< |
this.poolNumber = poolNumberGenerator.incrementAndGet(); |
441 |
< |
this.workerLock = new ReentrantLock(); |
442 |
< |
this.termination = workerLock.newCondition(); |
443 |
< |
this.stealCount = new AtomicLong(); |
1163 |
> |
this.workers = new ForkJoinWorkerThread[arraySize]; |
1164 |
|
this.submissionQueue = new LinkedTransferQueue<ForkJoinTask<?>>(); |
1165 |
< |
// worker array and workers are lazily constructed |
1166 |
< |
} |
1167 |
< |
|
1168 |
< |
/** |
1169 |
< |
* Creates a new worker thread using factory. |
450 |
< |
* |
451 |
< |
* @param index the index to assign worker |
452 |
< |
* @return new worker, or null if factory failed |
453 |
< |
*/ |
454 |
< |
private ForkJoinWorkerThread createWorker(int index) { |
455 |
< |
Thread.UncaughtExceptionHandler h = ueh; |
456 |
< |
ForkJoinWorkerThread w = factory.newThread(this); |
457 |
< |
if (w != null) { |
458 |
< |
w.poolIndex = index; |
459 |
< |
w.setDaemon(true); |
460 |
< |
w.setAsyncMode(locallyFifo); |
461 |
< |
w.setName("ForkJoinPool-" + poolNumber + "-worker-" + index); |
462 |
< |
if (h != null) |
463 |
< |
w.setUncaughtExceptionHandler(h); |
464 |
< |
} |
465 |
< |
return w; |
466 |
< |
} |
467 |
< |
|
468 |
< |
/** |
469 |
< |
* Returns a good size for worker array given pool size. |
470 |
< |
* Currently requires size to be a power of two. |
471 |
< |
*/ |
472 |
< |
private static int arraySizeFor(int poolSize) { |
473 |
< |
if (poolSize <= 1) |
474 |
< |
return 1; |
475 |
< |
// See Hackers Delight, sec 3.2 |
476 |
< |
int c = poolSize >= MAX_THREADS ? MAX_THREADS : (poolSize - 1); |
477 |
< |
c |= c >>> 1; |
478 |
< |
c |= c >>> 2; |
479 |
< |
c |= c >>> 4; |
480 |
< |
c |= c >>> 8; |
481 |
< |
c |= c >>> 16; |
482 |
< |
return c + 1; |
483 |
< |
} |
484 |
< |
|
485 |
< |
/** |
486 |
< |
* Creates or resizes array if necessary to hold newLength. |
487 |
< |
* Call only under exclusion. |
488 |
< |
* |
489 |
< |
* @return the array |
490 |
< |
*/ |
491 |
< |
private ForkJoinWorkerThread[] ensureWorkerArrayCapacity(int newLength) { |
492 |
< |
ForkJoinWorkerThread[] ws = workers; |
493 |
< |
if (ws == null) |
494 |
< |
return workers = new ForkJoinWorkerThread[arraySizeFor(newLength)]; |
495 |
< |
else if (newLength > ws.length) |
496 |
< |
return workers = Arrays.copyOf(ws, arraySizeFor(newLength)); |
497 |
< |
else |
498 |
< |
return ws; |
499 |
< |
} |
500 |
< |
|
501 |
< |
/** |
502 |
< |
* Tries to shrink workers into smaller array after one or more terminate. |
503 |
< |
*/ |
504 |
< |
private void tryShrinkWorkerArray() { |
505 |
< |
ForkJoinWorkerThread[] ws = workers; |
506 |
< |
if (ws != null) { |
507 |
< |
int len = ws.length; |
508 |
< |
int last = len - 1; |
509 |
< |
while (last >= 0 && ws[last] == null) |
510 |
< |
--last; |
511 |
< |
int newLength = arraySizeFor(last+1); |
512 |
< |
if (newLength < len) |
513 |
< |
workers = Arrays.copyOf(ws, newLength); |
514 |
< |
} |
515 |
< |
} |
516 |
< |
|
517 |
< |
/** |
518 |
< |
* Initializes workers if necessary. |
519 |
< |
*/ |
520 |
< |
final void ensureWorkerInitialization() { |
521 |
< |
ForkJoinWorkerThread[] ws = workers; |
522 |
< |
if (ws == null) { |
523 |
< |
final ReentrantLock lock = this.workerLock; |
524 |
< |
lock.lock(); |
525 |
< |
try { |
526 |
< |
ws = workers; |
527 |
< |
if (ws == null) { |
528 |
< |
int ps = parallelism; |
529 |
< |
updateWorkerCount(ps); |
530 |
< |
ws = ensureWorkerArrayCapacity(ps); |
531 |
< |
for (int i = 0; i < ps; ++i) { |
532 |
< |
ForkJoinWorkerThread w = createWorker(i); |
533 |
< |
if (w != null) { |
534 |
< |
ws[i] = w; |
535 |
< |
w.start(); |
536 |
< |
} |
537 |
< |
else |
538 |
< |
updateWorkerCount(-1); |
539 |
< |
} |
540 |
< |
} |
541 |
< |
} finally { |
542 |
< |
lock.unlock(); |
543 |
< |
} |
544 |
< |
} |
1165 |
> |
this.workerLock = new ReentrantLock(); |
1166 |
> |
this.terminationLatch = new CountDownLatch(1); |
1167 |
> |
// Start first worker; remaining workers added upon first submission |
1168 |
> |
workerCounts = ONE_RUNNING | ONE_TOTAL; |
1169 |
> |
addWorker(); |
1170 |
|
} |
1171 |
|
|
1172 |
|
/** |
1173 |
< |
* Worker creation and startup for threads added via setParallelism. |
1174 |
< |
*/ |
1175 |
< |
private void createAndStartAddedWorkers() { |
1176 |
< |
resumeAllSpares(); // Allow spares to convert to nonspare |
1177 |
< |
int ps = parallelism; |
1178 |
< |
ForkJoinWorkerThread[] ws = ensureWorkerArrayCapacity(ps); |
1179 |
< |
int len = ws.length; |
1180 |
< |
// Sweep through slots, to keep lowest indices most populated |
1181 |
< |
int k = 0; |
1182 |
< |
while (k < len) { |
1183 |
< |
if (ws[k] != null) { |
559 |
< |
++k; |
560 |
< |
continue; |
561 |
< |
} |
562 |
< |
int s = workerCounts; |
563 |
< |
int tc = totalCountOf(s); |
564 |
< |
int rc = runningCountOf(s); |
565 |
< |
if (rc >= ps || tc >= ps) |
566 |
< |
break; |
567 |
< |
if (casWorkerCounts (s, workerCountsFor(tc+1, rc+1))) { |
568 |
< |
ForkJoinWorkerThread w = createWorker(k); |
569 |
< |
if (w != null) { |
570 |
< |
ws[k++] = w; |
571 |
< |
w.start(); |
572 |
< |
} |
573 |
< |
else { |
574 |
< |
updateWorkerCount(-1); // back out on failed creation |
575 |
< |
break; |
576 |
< |
} |
577 |
< |
} |
578 |
< |
} |
1173 |
> |
* Returns initial power of two size for workers array. |
1174 |
> |
* @param pc the initial parallelism level |
1175 |
> |
*/ |
1176 |
> |
private static int initialArraySizeFor(int pc) { |
1177 |
> |
// See Hackers Delight, sec 3.2. We know MAX_THREADS < (1 >>> 16) |
1178 |
> |
int size = pc < MAX_THREADS ? pc + 1 : MAX_THREADS; |
1179 |
> |
size |= size >>> 1; |
1180 |
> |
size |= size >>> 2; |
1181 |
> |
size |= size >>> 4; |
1182 |
> |
size |= size >>> 8; |
1183 |
> |
return size + 1; |
1184 |
|
} |
1185 |
|
|
1186 |
|
// Execution methods |
1191 |
|
private <T> void doSubmit(ForkJoinTask<T> task) { |
1192 |
|
if (task == null) |
1193 |
|
throw new NullPointerException(); |
1194 |
< |
if (isShutdown()) |
1194 |
> |
if (runState >= SHUTDOWN) |
1195 |
|
throw new RejectedExecutionException(); |
591 |
– |
if (workers == null) |
592 |
– |
ensureWorkerInitialization(); |
1196 |
|
submissionQueue.offer(task); |
1197 |
< |
signalIdleWorkers(); |
1197 |
> |
advanceEventCount(); |
1198 |
> |
releaseWaiters(); |
1199 |
> |
if ((workerCounts >>> TOTAL_COUNT_SHIFT) < parallelism) |
1200 |
> |
ensureEnoughTotalWorkers(); |
1201 |
|
} |
1202 |
|
|
1203 |
|
/** |
1293 |
|
return task; |
1294 |
|
} |
1295 |
|
|
690 |
– |
|
1296 |
|
/** |
1297 |
|
* @throws NullPointerException {@inheritDoc} |
1298 |
|
* @throws RejectedExecutionException {@inheritDoc} |
1319 |
|
private static final long serialVersionUID = -7914297376763021607L; |
1320 |
|
} |
1321 |
|
|
717 |
– |
// Configuration and status settings and queries |
718 |
– |
|
1322 |
|
/** |
1323 |
|
* Returns the factory used for constructing new workers. |
1324 |
|
* |
1335 |
|
* @return the handler, or {@code null} if none |
1336 |
|
*/ |
1337 |
|
public Thread.UncaughtExceptionHandler getUncaughtExceptionHandler() { |
1338 |
< |
Thread.UncaughtExceptionHandler h; |
1339 |
< |
final ReentrantLock lock = this.workerLock; |
737 |
< |
lock.lock(); |
738 |
< |
try { |
739 |
< |
h = ueh; |
740 |
< |
} finally { |
741 |
< |
lock.unlock(); |
742 |
< |
} |
743 |
< |
return h; |
1338 |
> |
workerCountReadFence(); |
1339 |
> |
return ueh; |
1340 |
|
} |
1341 |
|
|
1342 |
|
/** |
1355 |
|
public Thread.UncaughtExceptionHandler |
1356 |
|
setUncaughtExceptionHandler(Thread.UncaughtExceptionHandler h) { |
1357 |
|
checkPermission(); |
1358 |
< |
Thread.UncaughtExceptionHandler old = null; |
1359 |
< |
final ReentrantLock lock = this.workerLock; |
1360 |
< |
lock.lock(); |
765 |
< |
try { |
766 |
< |
old = ueh; |
1358 |
> |
workerCountReadFence(); |
1359 |
> |
Thread.UncaughtExceptionHandler old = ueh; |
1360 |
> |
if (h != old) { |
1361 |
|
ueh = h; |
1362 |
< |
ForkJoinWorkerThread[] ws = workers; |
1363 |
< |
if (ws != null) { |
1364 |
< |
for (int i = 0; i < ws.length; ++i) { |
1365 |
< |
ForkJoinWorkerThread w = ws[i]; |
772 |
< |
if (w != null) |
773 |
< |
w.setUncaughtExceptionHandler(h); |
774 |
< |
} |
1362 |
> |
workerCountWriteFence(); |
1363 |
> |
for (ForkJoinWorkerThread w : workers) { |
1364 |
> |
if (w != null) |
1365 |
> |
w.setUncaughtExceptionHandler(h); |
1366 |
|
} |
776 |
– |
} finally { |
777 |
– |
lock.unlock(); |
1367 |
|
} |
1368 |
|
return old; |
1369 |
|
} |
1370 |
|
|
782 |
– |
|
1371 |
|
/** |
1372 |
|
* Sets the target parallelism level of this pool. |
1373 |
|
* |
1383 |
|
checkPermission(); |
1384 |
|
if (parallelism <= 0 || parallelism > maxPoolSize) |
1385 |
|
throw new IllegalArgumentException(); |
1386 |
< |
final ReentrantLock lock = this.workerLock; |
1387 |
< |
lock.lock(); |
1388 |
< |
try { |
1389 |
< |
if (isProcessingTasks()) { |
1390 |
< |
int p = this.parallelism; |
1391 |
< |
this.parallelism = parallelism; |
1392 |
< |
if (workers != null) { |
1393 |
< |
if (parallelism > p) |
1394 |
< |
createAndStartAddedWorkers(); |
1395 |
< |
else |
808 |
< |
trimSpares(); |
1386 |
> |
workerCountReadFence(); |
1387 |
> |
int pc = this.parallelism; |
1388 |
> |
if (pc != parallelism) { |
1389 |
> |
this.parallelism = parallelism; |
1390 |
> |
workerCountWriteFence(); |
1391 |
> |
// Release spares. If too many, some will die after re-suspend |
1392 |
> |
for (ForkJoinWorkerThread w : workers) { |
1393 |
> |
if (w != null && w.tryUnsuspend()) { |
1394 |
> |
updateRunningCount(1); |
1395 |
> |
LockSupport.unpark(w); |
1396 |
|
} |
1397 |
|
} |
1398 |
< |
} finally { |
1399 |
< |
lock.unlock(); |
1398 |
> |
ensureEnoughTotalWorkers(); |
1399 |
> |
advanceEventCount(); |
1400 |
> |
releaseWaiters(); // force config recheck by existing workers |
1401 |
|
} |
814 |
– |
signalIdleWorkers(); |
1402 |
|
} |
1403 |
|
|
1404 |
|
/** |
1407 |
|
* @return the targeted parallelism level of this pool |
1408 |
|
*/ |
1409 |
|
public int getParallelism() { |
1410 |
+ |
// workerCountReadFence(); // inlined below |
1411 |
+ |
int ignore = workerCounts; |
1412 |
|
return parallelism; |
1413 |
|
} |
1414 |
|
|
1421 |
|
* @return the number of worker threads |
1422 |
|
*/ |
1423 |
|
public int getPoolSize() { |
1424 |
< |
return totalCountOf(workerCounts); |
1424 |
> |
return workerCounts >>> TOTAL_COUNT_SHIFT; |
1425 |
|
} |
1426 |
|
|
1427 |
|
/** |
1433 |
|
* @return the maximum |
1434 |
|
*/ |
1435 |
|
public int getMaximumPoolSize() { |
1436 |
+ |
workerCountReadFence(); |
1437 |
|
return maxPoolSize; |
1438 |
|
} |
1439 |
|
|
1451 |
|
if (newMax < 0 || newMax > MAX_THREADS) |
1452 |
|
throw new IllegalArgumentException(); |
1453 |
|
maxPoolSize = newMax; |
1454 |
+ |
workerCountWriteFence(); |
1455 |
|
} |
1456 |
|
|
866 |
– |
|
1457 |
|
/** |
1458 |
|
* Returns {@code true} if this pool dynamically maintains its |
1459 |
|
* target parallelism level. If false, new threads are added only |
1462 |
|
* @return {@code true} if maintains parallelism |
1463 |
|
*/ |
1464 |
|
public boolean getMaintainsParallelism() { |
1465 |
+ |
workerCountReadFence(); |
1466 |
|
return maintainsParallelism; |
1467 |
|
} |
1468 |
|
|
1475 |
|
*/ |
1476 |
|
public void setMaintainsParallelism(boolean enable) { |
1477 |
|
maintainsParallelism = enable; |
1478 |
+ |
workerCountWriteFence(); |
1479 |
|
} |
1480 |
|
|
1481 |
|
/** |
1492 |
|
* @see #getAsyncMode |
1493 |
|
*/ |
1494 |
|
public boolean setAsyncMode(boolean async) { |
1495 |
+ |
workerCountReadFence(); |
1496 |
|
boolean oldMode = locallyFifo; |
1497 |
< |
locallyFifo = async; |
1498 |
< |
ForkJoinWorkerThread[] ws = workers; |
1499 |
< |
if (ws != null) { |
1500 |
< |
for (int i = 0; i < ws.length; ++i) { |
1501 |
< |
ForkJoinWorkerThread t = ws[i]; |
1502 |
< |
if (t != null) |
910 |
< |
t.setAsyncMode(async); |
1497 |
> |
if (oldMode != async) { |
1498 |
> |
locallyFifo = async; |
1499 |
> |
workerCountWriteFence(); |
1500 |
> |
for (ForkJoinWorkerThread w : workers) { |
1501 |
> |
if (w != null) |
1502 |
> |
w.setAsyncMode(async); |
1503 |
|
} |
1504 |
|
} |
1505 |
|
return oldMode; |
1513 |
|
* @see #setAsyncMode |
1514 |
|
*/ |
1515 |
|
public boolean getAsyncMode() { |
1516 |
+ |
workerCountReadFence(); |
1517 |
|
return locallyFifo; |
1518 |
|
} |
1519 |
|
|
1520 |
|
/** |
1521 |
|
* Returns an estimate of the number of worker threads that are |
1522 |
|
* not blocked waiting to join tasks or for other managed |
1523 |
< |
* synchronization. |
1523 |
> |
* synchronization. This method may overestimate the |
1524 |
> |
* number of running threads. |
1525 |
|
* |
1526 |
|
* @return the number of worker threads |
1527 |
|
*/ |
1528 |
|
public int getRunningThreadCount() { |
1529 |
< |
return runningCountOf(workerCounts); |
1529 |
> |
return workerCounts & RUNNING_COUNT_MASK; |
1530 |
|
} |
1531 |
|
|
1532 |
|
/** |
1537 |
|
* @return the number of active threads |
1538 |
|
*/ |
1539 |
|
public int getActiveThreadCount() { |
1540 |
< |
return activeCountOf(runControl); |
947 |
< |
} |
948 |
< |
|
949 |
< |
/** |
950 |
< |
* Returns an estimate of the number of threads that are currently |
951 |
< |
* idle waiting for tasks. This method may underestimate the |
952 |
< |
* number of idle threads. |
953 |
< |
* |
954 |
< |
* @return the number of idle threads |
955 |
< |
*/ |
956 |
< |
final int getIdleThreadCount() { |
957 |
< |
int c = runningCountOf(workerCounts) - activeCountOf(runControl); |
958 |
< |
return (c <= 0) ? 0 : c; |
1540 |
> |
return runState & ACTIVE_COUNT_MASK; |
1541 |
|
} |
1542 |
|
|
1543 |
|
/** |
1552 |
|
* @return {@code true} if all threads are currently idle |
1553 |
|
*/ |
1554 |
|
public boolean isQuiescent() { |
1555 |
< |
return activeCountOf(runControl) == 0; |
1555 |
> |
return (runState & ACTIVE_COUNT_MASK) == 0; |
1556 |
|
} |
1557 |
|
|
1558 |
|
/** |
1567 |
|
* @return the number of steals |
1568 |
|
*/ |
1569 |
|
public long getStealCount() { |
1570 |
< |
return stealCount.get(); |
989 |
< |
} |
990 |
< |
|
991 |
< |
/** |
992 |
< |
* Accumulates steal count from a worker. |
993 |
< |
* Call only when worker known to be idle. |
994 |
< |
*/ |
995 |
< |
private void updateStealCount(ForkJoinWorkerThread w) { |
996 |
< |
int sc = w.getAndClearStealCount(); |
997 |
< |
if (sc != 0) |
998 |
< |
stealCount.addAndGet(sc); |
1570 |
> |
return stealCount; |
1571 |
|
} |
1572 |
|
|
1573 |
|
/** |
1582 |
|
*/ |
1583 |
|
public long getQueuedTaskCount() { |
1584 |
|
long count = 0; |
1585 |
< |
ForkJoinWorkerThread[] ws = workers; |
1586 |
< |
if (ws != null) { |
1587 |
< |
for (int i = 0; i < ws.length; ++i) { |
1016 |
< |
ForkJoinWorkerThread t = ws[i]; |
1017 |
< |
if (t != null) |
1018 |
< |
count += t.getQueueSize(); |
1019 |
< |
} |
1585 |
> |
for (ForkJoinWorkerThread w : workers) { |
1586 |
> |
if (w != null) |
1587 |
> |
count += w.getQueueSize(); |
1588 |
|
} |
1589 |
|
return count; |
1590 |
|
} |
1640 |
|
*/ |
1641 |
|
protected int drainTasksTo(Collection<? super ForkJoinTask<?>> c) { |
1642 |
|
int n = submissionQueue.drainTo(c); |
1643 |
< |
ForkJoinWorkerThread[] ws = workers; |
1644 |
< |
if (ws != null) { |
1645 |
< |
for (int i = 0; i < ws.length; ++i) { |
1078 |
< |
ForkJoinWorkerThread w = ws[i]; |
1079 |
< |
if (w != null) |
1080 |
< |
n += w.drainTasksTo(c); |
1081 |
< |
} |
1643 |
> |
for (ForkJoinWorkerThread w : workers) { |
1644 |
> |
if (w != null) |
1645 |
> |
n += w.drainTasksTo(c); |
1646 |
|
} |
1647 |
|
return n; |
1648 |
|
} |
1655 |
|
* @return a string identifying this pool, as well as its state |
1656 |
|
*/ |
1657 |
|
public String toString() { |
1094 |
– |
int ps = parallelism; |
1095 |
– |
int wc = workerCounts; |
1096 |
– |
int rc = runControl; |
1658 |
|
long st = getStealCount(); |
1659 |
|
long qt = getQueuedTaskCount(); |
1660 |
|
long qs = getQueuedSubmissionCount(); |
1661 |
+ |
int wc = workerCounts; |
1662 |
+ |
int tc = wc >>> TOTAL_COUNT_SHIFT; |
1663 |
+ |
int rc = wc & RUNNING_COUNT_MASK; |
1664 |
+ |
int pc = parallelism; |
1665 |
+ |
int rs = runState; |
1666 |
+ |
int ac = rs & ACTIVE_COUNT_MASK; |
1667 |
|
return super.toString() + |
1668 |
< |
"[" + runStateToString(runStateOf(rc)) + |
1669 |
< |
", parallelism = " + ps + |
1670 |
< |
", size = " + totalCountOf(wc) + |
1671 |
< |
", active = " + activeCountOf(rc) + |
1672 |
< |
", running = " + runningCountOf(wc) + |
1668 |
> |
"[" + runLevelToString(rs) + |
1669 |
> |
", parallelism = " + pc + |
1670 |
> |
", size = " + tc + |
1671 |
> |
", active = " + ac + |
1672 |
> |
", running = " + rc + |
1673 |
|
", steals = " + st + |
1674 |
|
", tasks = " + qt + |
1675 |
|
", submissions = " + qs + |
1676 |
|
"]"; |
1677 |
|
} |
1678 |
|
|
1679 |
< |
private static String runStateToString(int rs) { |
1680 |
< |
switch (rs) { |
1681 |
< |
case RUNNING: return "Running"; |
1682 |
< |
case SHUTDOWN: return "Shutting down"; |
1683 |
< |
case TERMINATING: return "Terminating"; |
1117 |
< |
case TERMINATED: return "Terminated"; |
1118 |
< |
default: throw new Error("Unknown run state"); |
1119 |
< |
} |
1679 |
> |
private static String runLevelToString(int s) { |
1680 |
> |
return ((s & TERMINATED) != 0 ? "Terminated" : |
1681 |
> |
((s & TERMINATING) != 0 ? "Terminating" : |
1682 |
> |
((s & SHUTDOWN) != 0 ? "Shutting down" : |
1683 |
> |
"Running"))); |
1684 |
|
} |
1685 |
|
|
1122 |
– |
// lifecycle control |
1123 |
– |
|
1686 |
|
/** |
1687 |
|
* Initiates an orderly shutdown in which previously submitted |
1688 |
|
* tasks are executed, but no new tasks will be accepted. |
1697 |
|
*/ |
1698 |
|
public void shutdown() { |
1699 |
|
checkPermission(); |
1700 |
< |
transitionRunStateTo(SHUTDOWN); |
1701 |
< |
if (canTerminateOnShutdown(runControl)) { |
1140 |
< |
if (workers == null) { // shutting down before workers created |
1141 |
< |
final ReentrantLock lock = this.workerLock; |
1142 |
< |
lock.lock(); |
1143 |
< |
try { |
1144 |
< |
if (workers == null) { |
1145 |
< |
terminate(); |
1146 |
< |
transitionRunStateTo(TERMINATED); |
1147 |
< |
termination.signalAll(); |
1148 |
< |
} |
1149 |
< |
} finally { |
1150 |
< |
lock.unlock(); |
1151 |
< |
} |
1152 |
< |
} |
1153 |
< |
terminateOnShutdown(); |
1154 |
< |
} |
1700 |
> |
advanceRunLevel(SHUTDOWN); |
1701 |
> |
tryTerminate(false); |
1702 |
|
} |
1703 |
|
|
1704 |
|
/** |
1719 |
|
*/ |
1720 |
|
public List<Runnable> shutdownNow() { |
1721 |
|
checkPermission(); |
1722 |
< |
terminate(); |
1722 |
> |
tryTerminate(true); |
1723 |
|
return Collections.emptyList(); |
1724 |
|
} |
1725 |
|
|
1729 |
|
* @return {@code true} if all tasks have completed following shut down |
1730 |
|
*/ |
1731 |
|
public boolean isTerminated() { |
1732 |
< |
return runStateOf(runControl) == TERMINATED; |
1732 |
> |
return runState >= TERMINATED; |
1733 |
|
} |
1734 |
|
|
1735 |
|
/** |
1743 |
|
* @return {@code true} if terminating but not yet terminated |
1744 |
|
*/ |
1745 |
|
public boolean isTerminating() { |
1746 |
< |
return runStateOf(runControl) == TERMINATING; |
1746 |
> |
return (runState & (TERMINATING|TERMINATED)) == TERMINATING; |
1747 |
|
} |
1748 |
|
|
1749 |
|
/** |
1752 |
|
* @return {@code true} if this pool has been shut down |
1753 |
|
*/ |
1754 |
|
public boolean isShutdown() { |
1755 |
< |
return runStateOf(runControl) >= SHUTDOWN; |
1209 |
< |
} |
1210 |
< |
|
1211 |
< |
/** |
1212 |
< |
* Returns true if pool is not terminating or terminated. |
1213 |
< |
* Used internally to suppress execution when terminating. |
1214 |
< |
*/ |
1215 |
< |
final boolean isProcessingTasks() { |
1216 |
< |
return runStateOf(runControl) < TERMINATING; |
1755 |
> |
return runState >= SHUTDOWN; |
1756 |
|
} |
1757 |
|
|
1758 |
|
/** |
1768 |
|
*/ |
1769 |
|
public boolean awaitTermination(long timeout, TimeUnit unit) |
1770 |
|
throws InterruptedException { |
1771 |
< |
long nanos = unit.toNanos(timeout); |
1233 |
< |
final ReentrantLock lock = this.workerLock; |
1234 |
< |
lock.lock(); |
1235 |
< |
try { |
1236 |
< |
for (;;) { |
1237 |
< |
if (isTerminated()) |
1238 |
< |
return true; |
1239 |
< |
if (nanos <= 0) |
1240 |
< |
return false; |
1241 |
< |
nanos = termination.awaitNanos(nanos); |
1242 |
< |
} |
1243 |
< |
} finally { |
1244 |
< |
lock.unlock(); |
1245 |
< |
} |
1246 |
< |
} |
1247 |
< |
|
1248 |
< |
// Shutdown and termination support |
1249 |
< |
|
1250 |
< |
/** |
1251 |
< |
* Callback from terminating worker. Nulls out the corresponding |
1252 |
< |
* workers slot, and if terminating, tries to terminate; else |
1253 |
< |
* tries to shrink workers array. |
1254 |
< |
* |
1255 |
< |
* @param w the worker |
1256 |
< |
*/ |
1257 |
< |
final void workerTerminated(ForkJoinWorkerThread w) { |
1258 |
< |
updateStealCount(w); |
1259 |
< |
updateWorkerCount(-1); |
1260 |
< |
final ReentrantLock lock = this.workerLock; |
1261 |
< |
lock.lock(); |
1262 |
< |
try { |
1263 |
< |
ForkJoinWorkerThread[] ws = workers; |
1264 |
< |
if (ws != null) { |
1265 |
< |
int idx = w.poolIndex; |
1266 |
< |
if (idx >= 0 && idx < ws.length && ws[idx] == w) |
1267 |
< |
ws[idx] = null; |
1268 |
< |
if (totalCountOf(workerCounts) == 0) { |
1269 |
< |
terminate(); // no-op if already terminating |
1270 |
< |
transitionRunStateTo(TERMINATED); |
1271 |
< |
termination.signalAll(); |
1272 |
< |
} |
1273 |
< |
else if (isProcessingTasks()) { |
1274 |
< |
tryShrinkWorkerArray(); |
1275 |
< |
tryResumeSpare(true); // allow replacement |
1276 |
< |
} |
1277 |
< |
} |
1278 |
< |
} finally { |
1279 |
< |
lock.unlock(); |
1280 |
< |
} |
1281 |
< |
signalIdleWorkers(); |
1282 |
< |
} |
1283 |
< |
|
1284 |
< |
/** |
1285 |
< |
* Initiates termination. |
1286 |
< |
*/ |
1287 |
< |
private void terminate() { |
1288 |
< |
if (transitionRunStateTo(TERMINATING)) { |
1289 |
< |
stopAllWorkers(); |
1290 |
< |
resumeAllSpares(); |
1291 |
< |
signalIdleWorkers(); |
1292 |
< |
cancelQueuedSubmissions(); |
1293 |
< |
cancelQueuedWorkerTasks(); |
1294 |
< |
interruptUnterminatedWorkers(); |
1295 |
< |
signalIdleWorkers(); // resignal after interrupt |
1296 |
< |
} |
1297 |
< |
} |
1298 |
< |
|
1299 |
< |
/** |
1300 |
< |
* Possibly terminates when on shutdown state. |
1301 |
< |
*/ |
1302 |
< |
private void terminateOnShutdown() { |
1303 |
< |
if (!hasQueuedSubmissions() && canTerminateOnShutdown(runControl)) |
1304 |
< |
terminate(); |
1305 |
< |
} |
1306 |
< |
|
1307 |
< |
/** |
1308 |
< |
* Clears out and cancels submissions. |
1309 |
< |
*/ |
1310 |
< |
private void cancelQueuedSubmissions() { |
1311 |
< |
ForkJoinTask<?> task; |
1312 |
< |
while ((task = pollSubmission()) != null) |
1313 |
< |
task.cancel(false); |
1314 |
< |
} |
1315 |
< |
|
1316 |
< |
/** |
1317 |
< |
* Cleans out worker queues. |
1318 |
< |
*/ |
1319 |
< |
private void cancelQueuedWorkerTasks() { |
1320 |
< |
final ReentrantLock lock = this.workerLock; |
1321 |
< |
lock.lock(); |
1322 |
< |
try { |
1323 |
< |
ForkJoinWorkerThread[] ws = workers; |
1324 |
< |
if (ws != null) { |
1325 |
< |
for (int i = 0; i < ws.length; ++i) { |
1326 |
< |
ForkJoinWorkerThread t = ws[i]; |
1327 |
< |
if (t != null) |
1328 |
< |
t.cancelTasks(); |
1329 |
< |
} |
1330 |
< |
} |
1331 |
< |
} finally { |
1332 |
< |
lock.unlock(); |
1333 |
< |
} |
1334 |
< |
} |
1335 |
< |
|
1336 |
< |
/** |
1337 |
< |
* Sets each worker's status to terminating. Requires lock to avoid |
1338 |
< |
* conflicts with add/remove. |
1339 |
< |
*/ |
1340 |
< |
private void stopAllWorkers() { |
1341 |
< |
final ReentrantLock lock = this.workerLock; |
1342 |
< |
lock.lock(); |
1343 |
< |
try { |
1344 |
< |
ForkJoinWorkerThread[] ws = workers; |
1345 |
< |
if (ws != null) { |
1346 |
< |
for (int i = 0; i < ws.length; ++i) { |
1347 |
< |
ForkJoinWorkerThread t = ws[i]; |
1348 |
< |
if (t != null) |
1349 |
< |
t.shutdownNow(); |
1350 |
< |
} |
1351 |
< |
} |
1352 |
< |
} finally { |
1353 |
< |
lock.unlock(); |
1354 |
< |
} |
1355 |
< |
} |
1356 |
< |
|
1357 |
< |
/** |
1358 |
< |
* Interrupts all unterminated workers. This is not required for |
1359 |
< |
* sake of internal control, but may help unstick user code during |
1360 |
< |
* shutdown. |
1361 |
< |
*/ |
1362 |
< |
private void interruptUnterminatedWorkers() { |
1363 |
< |
final ReentrantLock lock = this.workerLock; |
1364 |
< |
lock.lock(); |
1365 |
< |
try { |
1366 |
< |
ForkJoinWorkerThread[] ws = workers; |
1367 |
< |
if (ws != null) { |
1368 |
< |
for (int i = 0; i < ws.length; ++i) { |
1369 |
< |
ForkJoinWorkerThread t = ws[i]; |
1370 |
< |
if (t != null && !t.isTerminated()) { |
1371 |
< |
try { |
1372 |
< |
t.interrupt(); |
1373 |
< |
} catch (SecurityException ignore) { |
1374 |
< |
} |
1375 |
< |
} |
1376 |
< |
} |
1377 |
< |
} |
1378 |
< |
} finally { |
1379 |
< |
lock.unlock(); |
1380 |
< |
} |
1381 |
< |
} |
1382 |
< |
|
1383 |
< |
/* |
1384 |
< |
* Nodes for event barrier to manage idle threads. Queue nodes |
1385 |
< |
* are basic Treiber stack nodes, also used for spare stack. |
1386 |
< |
* |
1387 |
< |
* The event barrier has an event count and a wait queue (actually |
1388 |
< |
* a Treiber stack). Workers are enabled to look for work when |
1389 |
< |
* the eventCount is incremented. If they fail to find work, they |
1390 |
< |
* may wait for next count. Upon release, threads help others wake |
1391 |
< |
* up. |
1392 |
< |
* |
1393 |
< |
* Synchronization events occur only in enough contexts to |
1394 |
< |
* maintain overall liveness: |
1395 |
< |
* |
1396 |
< |
* - Submission of a new task to the pool |
1397 |
< |
* - Resizes or other changes to the workers array |
1398 |
< |
* - pool termination |
1399 |
< |
* - A worker pushing a task on an empty queue |
1400 |
< |
* |
1401 |
< |
* The case of pushing a task occurs often enough, and is heavy |
1402 |
< |
* enough compared to simple stack pushes, to require special |
1403 |
< |
* handling: Method signalWork returns without advancing count if |
1404 |
< |
* the queue appears to be empty. This would ordinarily result in |
1405 |
< |
* races causing some queued waiters not to be woken up. To avoid |
1406 |
< |
* this, the first worker enqueued in method sync rescans for |
1407 |
< |
* tasks after being enqueued, and helps signal if any are |
1408 |
< |
* found. This works well because the worker has nothing better to |
1409 |
< |
* do, and so might as well help alleviate the overhead and |
1410 |
< |
* contention on the threads actually doing work. Also, since |
1411 |
< |
* event counts increments on task availability exist to maintain |
1412 |
< |
* liveness (rather than to force refreshes etc), it is OK for |
1413 |
< |
* callers to exit early if contending with another signaller. |
1414 |
< |
*/ |
1415 |
< |
static final class WaitQueueNode { |
1416 |
< |
WaitQueueNode next; // only written before enqueued |
1417 |
< |
volatile ForkJoinWorkerThread thread; // nulled to cancel wait |
1418 |
< |
final long count; // unused for spare stack |
1419 |
< |
|
1420 |
< |
WaitQueueNode(long c, ForkJoinWorkerThread w) { |
1421 |
< |
count = c; |
1422 |
< |
thread = w; |
1423 |
< |
} |
1424 |
< |
|
1425 |
< |
/** |
1426 |
< |
* Wakes up waiter, also clearing thread field |
1427 |
< |
*/ |
1428 |
< |
void signal() { |
1429 |
< |
ForkJoinWorkerThread t = thread; |
1430 |
< |
if (t != null) { |
1431 |
< |
thread = null; |
1432 |
< |
LockSupport.unpark(t); |
1433 |
< |
} |
1434 |
< |
} |
1435 |
< |
} |
1436 |
< |
|
1437 |
< |
/** |
1438 |
< |
* Ensures that no thread is waiting for count to advance from the |
1439 |
< |
* current value of eventCount read on entry to this method, by |
1440 |
< |
* releasing waiting threads if necessary. |
1441 |
< |
*/ |
1442 |
< |
final void ensureSync() { |
1443 |
< |
long c = eventCount; |
1444 |
< |
WaitQueueNode q; |
1445 |
< |
while ((q = syncStack) != null && q.count < c) { |
1446 |
< |
if (casBarrierStack(q, null)) { |
1447 |
< |
do { |
1448 |
< |
q.signal(); |
1449 |
< |
} while ((q = q.next) != null); |
1450 |
< |
break; |
1451 |
< |
} |
1452 |
< |
} |
1453 |
< |
} |
1454 |
< |
|
1455 |
< |
/** |
1456 |
< |
* Increments event count and releases waiting threads. |
1457 |
< |
*/ |
1458 |
< |
private void signalIdleWorkers() { |
1459 |
< |
long c; |
1460 |
< |
do {} while (!casEventCount(c = eventCount, c+1)); |
1461 |
< |
ensureSync(); |
1462 |
< |
} |
1463 |
< |
|
1464 |
< |
/** |
1465 |
< |
* Signals threads waiting to poll a task. Because method sync |
1466 |
< |
* rechecks availability, it is OK to only proceed if queue |
1467 |
< |
* appears to be non-empty, and OK if CAS to increment count |
1468 |
< |
* fails (since some other thread succeeded). |
1469 |
< |
*/ |
1470 |
< |
final void signalWork() { |
1471 |
< |
if (syncStack != null) { |
1472 |
< |
long c = eventCount; |
1473 |
< |
casEventCount(c, c+1); |
1474 |
< |
WaitQueueNode q = syncStack; |
1475 |
< |
if (q != null && q.count <= c) { |
1476 |
< |
if (casBarrierStack(q, q.next)) |
1477 |
< |
q.signal(); |
1478 |
< |
else |
1479 |
< |
ensureSync(); // awaken all on contention |
1480 |
< |
} |
1481 |
< |
} |
1482 |
< |
} |
1483 |
< |
|
1484 |
< |
/** |
1485 |
< |
* Possibly blocks until event count advances from last value held |
1486 |
< |
* by caller, or if excess threads, caller is resumed as spare, or |
1487 |
< |
* caller or pool is terminating. Updates caller's event on exit. |
1488 |
< |
* |
1489 |
< |
* @param w the calling worker thread |
1490 |
< |
*/ |
1491 |
< |
final void sync(ForkJoinWorkerThread w) { |
1492 |
< |
updateStealCount(w); // Transfer w's count while it is idle |
1493 |
< |
|
1494 |
< |
if (!w.isShutdown() && isProcessingTasks() && !suspendIfSpare(w)) { |
1495 |
< |
long prev = w.lastEventCount; |
1496 |
< |
WaitQueueNode node = null; |
1497 |
< |
WaitQueueNode h; |
1498 |
< |
long c; |
1499 |
< |
while ((c = eventCount) == prev && |
1500 |
< |
((h = syncStack) == null || h.count == prev)) { |
1501 |
< |
if (node == null) |
1502 |
< |
node = new WaitQueueNode(prev, w); |
1503 |
< |
if (casBarrierStack(node.next = h, node)) { |
1504 |
< |
if (!Thread.interrupted() && |
1505 |
< |
node.thread != null && |
1506 |
< |
eventCount == prev && |
1507 |
< |
(h != null || // cover signalWork race |
1508 |
< |
(!ForkJoinWorkerThread.hasQueuedTasks(workers) && |
1509 |
< |
eventCount == prev))) |
1510 |
< |
LockSupport.park(this); |
1511 |
< |
c = eventCount; |
1512 |
< |
if (node.thread != null) { // help signal if not unparked |
1513 |
< |
node.thread = null; |
1514 |
< |
if (c == prev) |
1515 |
< |
casEventCount(prev, prev + 1); |
1516 |
< |
} |
1517 |
< |
break; |
1518 |
< |
} |
1519 |
< |
} |
1520 |
< |
w.lastEventCount = c; |
1521 |
< |
ensureSync(); |
1522 |
< |
} |
1523 |
< |
} |
1524 |
< |
|
1525 |
< |
/** |
1526 |
< |
* Returns {@code true} if a new sync event occurred since last |
1527 |
< |
* call to sync or this method, if so, updating caller's count. |
1528 |
< |
*/ |
1529 |
< |
final boolean hasNewSyncEvent(ForkJoinWorkerThread w) { |
1530 |
< |
long wc = w.lastEventCount; |
1531 |
< |
long c = eventCount; |
1532 |
< |
if (wc != c) |
1533 |
< |
w.lastEventCount = c; |
1534 |
< |
ensureSync(); |
1535 |
< |
return wc != c || wc != eventCount; |
1536 |
< |
} |
1537 |
< |
|
1538 |
< |
// Parallelism maintenance |
1539 |
< |
|
1540 |
< |
/** |
1541 |
< |
* Decrements running count; if too low, adds spare. |
1542 |
< |
* |
1543 |
< |
* Conceptually, all we need to do here is add or resume a |
1544 |
< |
* spare thread when one is about to block (and remove or |
1545 |
< |
* suspend it later when unblocked -- see suspendIfSpare). |
1546 |
< |
* However, implementing this idea requires coping with |
1547 |
< |
* several problems: we have imperfect information about the |
1548 |
< |
* states of threads. Some count updates can and usually do |
1549 |
< |
* lag run state changes, despite arrangements to keep them |
1550 |
< |
* accurate (for example, when possible, updating counts |
1551 |
< |
* before signalling or resuming), especially when running on |
1552 |
< |
* dynamic JVMs that don't optimize the infrequent paths that |
1553 |
< |
* update counts. Generating too many threads can make these |
1554 |
< |
* problems become worse, because excess threads are more |
1555 |
< |
* likely to be context-switched with others, slowing them all |
1556 |
< |
* down, especially if there is no work available, so all are |
1557 |
< |
* busy scanning or idling. Also, excess spare threads can |
1558 |
< |
* only be suspended or removed when they are idle, not |
1559 |
< |
* immediately when they aren't needed. So adding threads will |
1560 |
< |
* raise parallelism level for longer than necessary. Also, |
1561 |
< |
* FJ applications often encounter highly transient peaks when |
1562 |
< |
* many threads are blocked joining, but for less time than it |
1563 |
< |
* takes to create or resume spares. |
1564 |
< |
* |
1565 |
< |
* @param joinMe if non-null, return early if done |
1566 |
< |
* @param maintainParallelism if true, try to stay within |
1567 |
< |
* target counts, else create only to avoid starvation |
1568 |
< |
* @return true if joinMe known to be done |
1569 |
< |
*/ |
1570 |
< |
final boolean preJoin(ForkJoinTask<?> joinMe, |
1571 |
< |
boolean maintainParallelism) { |
1572 |
< |
maintainParallelism &= maintainsParallelism; // overrride |
1573 |
< |
boolean dec = false; // true when running count decremented |
1574 |
< |
while (spareStack == null || !tryResumeSpare(dec)) { |
1575 |
< |
int counts = workerCounts; |
1576 |
< |
if (dec || (dec = casWorkerCounts(counts, --counts))) { |
1577 |
< |
if (!needSpare(counts, maintainParallelism)) |
1578 |
< |
break; |
1579 |
< |
if (joinMe.status < 0) |
1580 |
< |
return true; |
1581 |
< |
if (tryAddSpare(counts)) |
1582 |
< |
break; |
1583 |
< |
} |
1584 |
< |
} |
1585 |
< |
return false; |
1586 |
< |
} |
1587 |
< |
|
1588 |
< |
/** |
1589 |
< |
* Same idea as preJoin |
1590 |
< |
*/ |
1591 |
< |
final boolean preBlock(ManagedBlocker blocker, |
1592 |
< |
boolean maintainParallelism) { |
1593 |
< |
maintainParallelism &= maintainsParallelism; |
1594 |
< |
boolean dec = false; |
1595 |
< |
while (spareStack == null || !tryResumeSpare(dec)) { |
1596 |
< |
int counts = workerCounts; |
1597 |
< |
if (dec || (dec = casWorkerCounts(counts, --counts))) { |
1598 |
< |
if (!needSpare(counts, maintainParallelism)) |
1599 |
< |
break; |
1600 |
< |
if (blocker.isReleasable()) |
1601 |
< |
return true; |
1602 |
< |
if (tryAddSpare(counts)) |
1603 |
< |
break; |
1604 |
< |
} |
1605 |
< |
} |
1606 |
< |
return false; |
1607 |
< |
} |
1608 |
< |
|
1609 |
< |
/** |
1610 |
< |
* Returns {@code true} if a spare thread appears to be needed. |
1611 |
< |
* If maintaining parallelism, returns true when the deficit in |
1612 |
< |
* running threads is more than the surplus of total threads, and |
1613 |
< |
* there is apparently some work to do. This self-limiting rule |
1614 |
< |
* means that the more threads that have already been added, the |
1615 |
< |
* less parallelism we will tolerate before adding another. |
1616 |
< |
* |
1617 |
< |
* @param counts current worker counts |
1618 |
< |
* @param maintainParallelism try to maintain parallelism |
1619 |
< |
*/ |
1620 |
< |
private boolean needSpare(int counts, boolean maintainParallelism) { |
1621 |
< |
int ps = parallelism; |
1622 |
< |
int rc = runningCountOf(counts); |
1623 |
< |
int tc = totalCountOf(counts); |
1624 |
< |
int runningDeficit = ps - rc; |
1625 |
< |
int totalSurplus = tc - ps; |
1626 |
< |
return (tc < maxPoolSize && |
1627 |
< |
(rc == 0 || totalSurplus < 0 || |
1628 |
< |
(maintainParallelism && |
1629 |
< |
runningDeficit > totalSurplus && |
1630 |
< |
ForkJoinWorkerThread.hasQueuedTasks(workers)))); |
1631 |
< |
} |
1632 |
< |
|
1633 |
< |
/** |
1634 |
< |
* Adds a spare worker if lock available and no more than the |
1635 |
< |
* expected numbers of threads exist. |
1636 |
< |
* |
1637 |
< |
* @return true if successful |
1638 |
< |
*/ |
1639 |
< |
private boolean tryAddSpare(int expectedCounts) { |
1640 |
< |
final ReentrantLock lock = this.workerLock; |
1641 |
< |
int expectedRunning = runningCountOf(expectedCounts); |
1642 |
< |
int expectedTotal = totalCountOf(expectedCounts); |
1643 |
< |
boolean success = false; |
1644 |
< |
boolean locked = false; |
1645 |
< |
// confirm counts while locking; CAS after obtaining lock |
1646 |
< |
try { |
1647 |
< |
for (;;) { |
1648 |
< |
int s = workerCounts; |
1649 |
< |
int tc = totalCountOf(s); |
1650 |
< |
int rc = runningCountOf(s); |
1651 |
< |
if (rc > expectedRunning || tc > expectedTotal) |
1652 |
< |
break; |
1653 |
< |
if (!locked && !(locked = lock.tryLock())) |
1654 |
< |
break; |
1655 |
< |
if (casWorkerCounts(s, workerCountsFor(tc+1, rc+1))) { |
1656 |
< |
createAndStartSpare(tc); |
1657 |
< |
success = true; |
1658 |
< |
break; |
1659 |
< |
} |
1660 |
< |
} |
1661 |
< |
} finally { |
1662 |
< |
if (locked) |
1663 |
< |
lock.unlock(); |
1664 |
< |
} |
1665 |
< |
return success; |
1666 |
< |
} |
1667 |
< |
|
1668 |
< |
/** |
1669 |
< |
* Adds the kth spare worker. On entry, pool counts are already |
1670 |
< |
* adjusted to reflect addition. |
1671 |
< |
*/ |
1672 |
< |
private void createAndStartSpare(int k) { |
1673 |
< |
ForkJoinWorkerThread w = null; |
1674 |
< |
ForkJoinWorkerThread[] ws = ensureWorkerArrayCapacity(k + 1); |
1675 |
< |
int len = ws.length; |
1676 |
< |
// Probably, we can place at slot k. If not, find empty slot |
1677 |
< |
if (k < len && ws[k] != null) { |
1678 |
< |
for (k = 0; k < len && ws[k] != null; ++k) |
1679 |
< |
; |
1680 |
< |
} |
1681 |
< |
if (k < len && isProcessingTasks() && (w = createWorker(k)) != null) { |
1682 |
< |
ws[k] = w; |
1683 |
< |
w.start(); |
1684 |
< |
} |
1685 |
< |
else |
1686 |
< |
updateWorkerCount(-1); // adjust on failure |
1687 |
< |
signalIdleWorkers(); |
1688 |
< |
} |
1689 |
< |
|
1690 |
< |
/** |
1691 |
< |
* Suspends calling thread w if there are excess threads. Called |
1692 |
< |
* only from sync. Spares are enqueued in a Treiber stack using |
1693 |
< |
* the same WaitQueueNodes as barriers. They are resumed mainly |
1694 |
< |
* in preJoin, but are also woken on pool events that require all |
1695 |
< |
* threads to check run state. |
1696 |
< |
* |
1697 |
< |
* @param w the caller |
1698 |
< |
*/ |
1699 |
< |
private boolean suspendIfSpare(ForkJoinWorkerThread w) { |
1700 |
< |
WaitQueueNode node = null; |
1701 |
< |
for (;;) { |
1702 |
< |
int s = workerCounts; |
1703 |
< |
int rc = runningCountOf(s); |
1704 |
< |
int tc = totalCountOf(s); |
1705 |
< |
int ps = parallelism; |
1706 |
< |
// use tc as bound if rc transiently out of sync |
1707 |
< |
if (tc <= ps || rc <= ps) |
1708 |
< |
return false; // not a spare |
1709 |
< |
if (node == null) |
1710 |
< |
node = new WaitQueueNode(0, w); |
1711 |
< |
if (casWorkerCounts(s, workerCountsFor(tc, rc - 1))) |
1712 |
< |
break; |
1713 |
< |
} |
1714 |
< |
// push onto stack |
1715 |
< |
do {} while (!casSpareStack(node.next = spareStack, node)); |
1716 |
< |
// block until released by resumeSpare |
1717 |
< |
while (!Thread.interrupted() && node.thread != null) |
1718 |
< |
LockSupport.park(this); |
1719 |
< |
return true; |
1720 |
< |
} |
1721 |
< |
|
1722 |
< |
/** |
1723 |
< |
* Tries to pop and resume a spare thread. |
1724 |
< |
* |
1725 |
< |
* @param updateCount if true, increment running count on success |
1726 |
< |
* @return true if successful |
1727 |
< |
*/ |
1728 |
< |
private boolean tryResumeSpare(boolean updateCount) { |
1729 |
< |
WaitQueueNode q; |
1730 |
< |
while ((q = spareStack) != null) { |
1731 |
< |
if (casSpareStack(q, q.next)) { |
1732 |
< |
if (updateCount) |
1733 |
< |
updateRunningCount(1); |
1734 |
< |
q.signal(); |
1735 |
< |
return true; |
1736 |
< |
} |
1737 |
< |
} |
1738 |
< |
return false; |
1739 |
< |
} |
1740 |
< |
|
1741 |
< |
/** |
1742 |
< |
* Pops and resumes all spare threads. Same idea as ensureSync. |
1743 |
< |
* |
1744 |
< |
* @return true if any spares released |
1745 |
< |
*/ |
1746 |
< |
private boolean resumeAllSpares() { |
1747 |
< |
WaitQueueNode q; |
1748 |
< |
while ( (q = spareStack) != null) { |
1749 |
< |
if (casSpareStack(q, null)) { |
1750 |
< |
do { |
1751 |
< |
updateRunningCount(1); |
1752 |
< |
q.signal(); |
1753 |
< |
} while ((q = q.next) != null); |
1754 |
< |
return true; |
1755 |
< |
} |
1756 |
< |
} |
1757 |
< |
return false; |
1758 |
< |
} |
1759 |
< |
|
1760 |
< |
/** |
1761 |
< |
* Pops and shuts down excessive spare threads. Call only while |
1762 |
< |
* holding lock. This is not guaranteed to eliminate all excess |
1763 |
< |
* threads, only those suspended as spares, which are the ones |
1764 |
< |
* unlikely to be needed in the future. |
1765 |
< |
*/ |
1766 |
< |
private void trimSpares() { |
1767 |
< |
int surplus = totalCountOf(workerCounts) - parallelism; |
1768 |
< |
WaitQueueNode q; |
1769 |
< |
while (surplus > 0 && (q = spareStack) != null) { |
1770 |
< |
if (casSpareStack(q, null)) { |
1771 |
< |
do { |
1772 |
< |
updateRunningCount(1); |
1773 |
< |
ForkJoinWorkerThread w = q.thread; |
1774 |
< |
if (w != null && surplus > 0 && |
1775 |
< |
runningCountOf(workerCounts) > 0 && w.shutdown()) |
1776 |
< |
--surplus; |
1777 |
< |
q.signal(); |
1778 |
< |
} while ((q = q.next) != null); |
1779 |
< |
} |
1780 |
< |
} |
1771 |
> |
return terminationLatch.await(timeout, unit); |
1772 |
|
} |
1773 |
|
|
1774 |
|
/** |
1851 |
|
boolean maintainParallelism) |
1852 |
|
throws InterruptedException { |
1853 |
|
Thread t = Thread.currentThread(); |
1854 |
< |
ForkJoinPool pool = ((t instanceof ForkJoinWorkerThread) ? |
1855 |
< |
((ForkJoinWorkerThread) t).pool : null); |
1856 |
< |
if (!blocker.isReleasable()) { |
1857 |
< |
try { |
1858 |
< |
if (pool == null || |
1868 |
< |
!pool.preBlock(blocker, maintainParallelism)) |
1869 |
< |
awaitBlocker(blocker); |
1870 |
< |
} finally { |
1871 |
< |
if (pool != null) |
1872 |
< |
pool.updateRunningCount(1); |
1873 |
< |
} |
1874 |
< |
} |
1854 |
> |
if (t instanceof ForkJoinWorkerThread) |
1855 |
> |
((ForkJoinWorkerThread) t).pool. |
1856 |
> |
doBlock(blocker, maintainParallelism); |
1857 |
> |
else |
1858 |
> |
awaitBlocker(blocker); |
1859 |
|
} |
1860 |
|
|
1861 |
+ |
/** |
1862 |
+ |
* Performs Non-FJ blocking |
1863 |
+ |
*/ |
1864 |
|
private static void awaitBlocker(ManagedBlocker blocker) |
1865 |
|
throws InterruptedException { |
1866 |
|
do {} while (!blocker.isReleasable() && !blocker.block()); |
1881 |
|
// Unsafe mechanics |
1882 |
|
|
1883 |
|
private static final sun.misc.Unsafe UNSAFE = getUnsafe(); |
1897 |
– |
private static final long eventCountOffset = |
1898 |
– |
objectFieldOffset("eventCount", ForkJoinPool.class); |
1884 |
|
private static final long workerCountsOffset = |
1885 |
|
objectFieldOffset("workerCounts", ForkJoinPool.class); |
1886 |
< |
private static final long runControlOffset = |
1887 |
< |
objectFieldOffset("runControl", ForkJoinPool.class); |
1888 |
< |
private static final long syncStackOffset = |
1889 |
< |
objectFieldOffset("syncStack",ForkJoinPool.class); |
1890 |
< |
private static final long spareStackOffset = |
1891 |
< |
objectFieldOffset("spareStack", ForkJoinPool.class); |
1892 |
< |
|
1893 |
< |
private boolean casEventCount(long cmp, long val) { |
1894 |
< |
return UNSAFE.compareAndSwapLong(this, eventCountOffset, cmp, val); |
1910 |
< |
} |
1911 |
< |
private boolean casWorkerCounts(int cmp, int val) { |
1912 |
< |
return UNSAFE.compareAndSwapInt(this, workerCountsOffset, cmp, val); |
1913 |
< |
} |
1914 |
< |
private boolean casRunControl(int cmp, int val) { |
1915 |
< |
return UNSAFE.compareAndSwapInt(this, runControlOffset, cmp, val); |
1916 |
< |
} |
1917 |
< |
private boolean casSpareStack(WaitQueueNode cmp, WaitQueueNode val) { |
1918 |
< |
return UNSAFE.compareAndSwapObject(this, spareStackOffset, cmp, val); |
1919 |
< |
} |
1920 |
< |
private boolean casBarrierStack(WaitQueueNode cmp, WaitQueueNode val) { |
1921 |
< |
return UNSAFE.compareAndSwapObject(this, syncStackOffset, cmp, val); |
1922 |
< |
} |
1886 |
> |
private static final long runStateOffset = |
1887 |
> |
objectFieldOffset("runState", ForkJoinPool.class); |
1888 |
> |
private static final long eventCountOffset = |
1889 |
> |
objectFieldOffset("eventCount", ForkJoinPool.class); |
1890 |
> |
private static final long eventWaitersOffset = |
1891 |
> |
objectFieldOffset("eventWaiters",ForkJoinPool.class); |
1892 |
> |
private static final long stealCountOffset = |
1893 |
> |
objectFieldOffset("stealCount",ForkJoinPool.class); |
1894 |
> |
|
1895 |
|
|
1896 |
|
private static long objectFieldOffset(String field, Class<?> klazz) { |
1897 |
|
try { |