1 |
|
/* |
2 |
|
* Written by Doug Lea with assistance from members of JCP JSR-166 |
3 |
|
* Expert Group and released to the public domain, as explained at |
4 |
< |
* http://creativecommons.org/licenses/publicdomain |
4 |
> |
* http://creativecommons.org/publicdomain/zero/1.0/ |
5 |
|
*/ |
6 |
|
|
7 |
|
package jsr166y; |
19 |
|
import java.util.concurrent.RejectedExecutionException; |
20 |
|
import java.util.concurrent.RunnableFuture; |
21 |
|
import java.util.concurrent.TimeUnit; |
22 |
– |
import java.util.concurrent.TimeoutException; |
22 |
|
import java.util.concurrent.atomic.AtomicInteger; |
23 |
< |
import java.util.concurrent.locks.LockSupport; |
23 |
> |
import java.util.concurrent.atomic.AtomicLong; |
24 |
|
import java.util.concurrent.locks.ReentrantLock; |
25 |
|
import java.util.concurrent.locks.Condition; |
26 |
|
|
33 |
|
* <p>A {@code ForkJoinPool} differs from other kinds of {@link |
34 |
|
* ExecutorService} mainly by virtue of employing |
35 |
|
* <em>work-stealing</em>: all threads in the pool attempt to find and |
36 |
< |
* execute subtasks created by other active tasks (eventually blocking |
37 |
< |
* waiting for work if none exist). This enables efficient processing |
38 |
< |
* when most tasks spawn other subtasks (as do most {@code |
39 |
< |
* ForkJoinTask}s). When setting <em>asyncMode</em> to true in |
40 |
< |
* constructors, {@code ForkJoinPool}s may also be appropriate for use |
41 |
< |
* with event-style tasks that are never joined. |
36 |
> |
* execute tasks submitted to the pool and/or created by other active |
37 |
> |
* tasks (eventually blocking waiting for work if none exist). This |
38 |
> |
* enables efficient processing when most tasks spawn other subtasks |
39 |
> |
* (as do most {@code ForkJoinTask}s), as well as when many small |
40 |
> |
* tasks are submitted to the pool from external clients. Especially |
41 |
> |
* when setting <em>asyncMode</em> to true in constructors, {@code |
42 |
> |
* ForkJoinPool}s may also be appropriate for use with event-style |
43 |
> |
* tasks that are never joined. |
44 |
|
* |
45 |
|
* <p>A {@code ForkJoinPool} is constructed with a given target |
46 |
|
* parallelism level; by default, equal to the number of available |
60 |
|
* convenient form for informal monitoring. |
61 |
|
* |
62 |
|
* <p> As is the case with other ExecutorServices, there are three |
63 |
< |
* main task execution methods summarized in the following |
64 |
< |
* table. These are designed to be used by clients not already engaged |
65 |
< |
* in fork/join computations in the current pool. The main forms of |
66 |
< |
* these methods accept instances of {@code ForkJoinTask}, but |
67 |
< |
* overloaded forms also allow mixed execution of plain {@code |
63 |
> |
* main task execution methods summarized in the following table. |
64 |
> |
* These are designed to be used primarily by clients not already |
65 |
> |
* engaged in fork/join computations in the current pool. The main |
66 |
> |
* forms of these methods accept instances of {@code ForkJoinTask}, |
67 |
> |
* but overloaded forms also allow mixed execution of plain {@code |
68 |
|
* Runnable}- or {@code Callable}- based activities as well. However, |
69 |
< |
* tasks that are already executing in a pool should normally |
70 |
< |
* <em>NOT</em> use these pool execution methods, but instead use the |
71 |
< |
* within-computation forms listed in the table. |
69 |
> |
* tasks that are already executing in a pool should normally instead |
70 |
> |
* use the within-computation forms listed in the table unless using |
71 |
> |
* async event-style tasks that are not usually joined, in which case |
72 |
> |
* there is little difference among choice of methods. |
73 |
|
* |
74 |
|
* <table BORDER CELLPADDING=3 CELLSPACING=1> |
75 |
|
* <tr> |
104 |
|
* daemon} mode, there is typically no need to explicitly {@link |
105 |
|
* #shutdown} such a pool upon program exit. |
106 |
|
* |
107 |
< |
* <pre> |
107 |
> |
* <pre> {@code |
108 |
|
* static final ForkJoinPool mainPool = new ForkJoinPool(); |
109 |
|
* ... |
110 |
|
* public void sort(long[] array) { |
111 |
|
* mainPool.invoke(new SortTask(array, 0, array.length)); |
112 |
< |
* } |
111 |
< |
* </pre> |
112 |
> |
* }}</pre> |
113 |
|
* |
114 |
|
* <p><b>Implementation notes</b>: This implementation restricts the |
115 |
|
* maximum number of running threads to 32767. Attempts to create |
128 |
|
/* |
129 |
|
* Implementation Overview |
130 |
|
* |
131 |
< |
* This class provides the central bookkeeping and control for a |
132 |
< |
* set of worker threads: Submissions from non-FJ threads enter |
133 |
< |
* into a submission queue. Workers take these tasks and typically |
134 |
< |
* split them into subtasks that may be stolen by other workers. |
135 |
< |
* Preference rules give first priority to processing tasks from |
136 |
< |
* their own queues (LIFO or FIFO, depending on mode), then to |
137 |
< |
* randomized FIFO steals of tasks in other worker queues, and |
138 |
< |
* lastly to new submissions. |
131 |
> |
* This class and its nested classes provide the main |
132 |
> |
* functionality and control for a set of worker threads: |
133 |
> |
* Submissions from non-FJ threads enter into submission queues. |
134 |
> |
* Workers take these tasks and typically split them into subtasks |
135 |
> |
* that may be stolen by other workers. Preference rules give |
136 |
> |
* first priority to processing tasks from their own queues (LIFO |
137 |
> |
* or FIFO, depending on mode), then to randomized FIFO steals of |
138 |
> |
* tasks in other queues. |
139 |
> |
* |
140 |
> |
* WorkQueues |
141 |
> |
* ========== |
142 |
> |
* |
143 |
> |
* Most operations occur within work-stealing queues (in nested |
144 |
> |
* class WorkQueue). These are special forms of Deques that |
145 |
> |
* support only three of the four possible end-operations -- push, |
146 |
> |
* pop, and poll (aka steal), under the further constraints that |
147 |
> |
* push and pop are called only from the owning thread (or, as |
148 |
> |
* extended here, under a lock), while poll may be called from |
149 |
> |
* other threads. (If you are unfamiliar with them, you probably |
150 |
> |
* want to read Herlihy and Shavit's book "The Art of |
151 |
> |
* Multiprocessor programming", chapter 16 describing these in |
152 |
> |
* more detail before proceeding.) The main work-stealing queue |
153 |
> |
* design is roughly similar to those in the papers "Dynamic |
154 |
> |
* Circular Work-Stealing Deque" by Chase and Lev, SPAA 2005 |
155 |
> |
* (http://research.sun.com/scalable/pubs/index.html) and |
156 |
> |
* "Idempotent work stealing" by Michael, Saraswat, and Vechev, |
157 |
> |
* PPoPP 2009 (http://portal.acm.org/citation.cfm?id=1504186). |
158 |
> |
* The main differences ultimately stem from GC requirements that |
159 |
> |
* we null out taken slots as soon as we can, to maintain as small |
160 |
> |
* a footprint as possible even in programs generating huge |
161 |
> |
* numbers of tasks. To accomplish this, we shift the CAS |
162 |
> |
* arbitrating pop vs poll (steal) from being on the indices |
163 |
> |
* ("base" and "top") to the slots themselves. So, both a |
164 |
> |
* successful pop and poll mainly entail a CAS of a slot from |
165 |
> |
* non-null to null. Because we rely on CASes of references, we |
166 |
> |
* do not need tag bits on base or top. They are simple ints as |
167 |
> |
* used in any circular array-based queue (see for example |
168 |
> |
* ArrayDeque). Updates to the indices must still be ordered in a |
169 |
> |
* way that guarantees that top == base means the queue is empty, |
170 |
> |
* but otherwise may err on the side of possibly making the queue |
171 |
> |
* appear nonempty when a push, pop, or poll have not fully |
172 |
> |
* committed. Note that this means that the poll operation, |
173 |
> |
* considered individually, is not wait-free. One thief cannot |
174 |
> |
* successfully continue until another in-progress one (or, if |
175 |
> |
* previously empty, a push) completes. However, in the |
176 |
> |
* aggregate, we ensure at least probabilistic non-blockingness. |
177 |
> |
* If an attempted steal fails, a thief always chooses a different |
178 |
> |
* random victim target to try next. So, in order for one thief to |
179 |
> |
* progress, it suffices for any in-progress poll or new push on |
180 |
> |
* any empty queue to complete. |
181 |
> |
* |
182 |
> |
* This approach also enables support of a user mode in which local |
183 |
> |
* task processing is in FIFO, not LIFO order, simply by using |
184 |
> |
* poll rather than pop. This can be useful in message-passing |
185 |
> |
* frameworks in which tasks are never joined. However neither |
186 |
> |
* mode considers affinities, loads, cache localities, etc, so |
187 |
> |
* rarely provide the best possible performance on a given |
188 |
> |
* machine, but portably provide good throughput by averaging over |
189 |
> |
* these factors. (Further, even if we did try to use such |
190 |
> |
* information, we do not usually have a basis for exploiting it. |
191 |
> |
* For example, some sets of tasks profit from cache affinities, |
192 |
> |
* but others are harmed by cache pollution effects.) |
193 |
> |
* |
194 |
> |
* WorkQueues are also used in a similar way for tasks submitted |
195 |
> |
* to the pool. We cannot mix these tasks in the same queues used |
196 |
> |
* for work-stealing (this would contaminate lifo/fifo |
197 |
> |
* processing). Instead, we loosely associate submission queues |
198 |
> |
* with submitting threads, using a form of hashing. The |
199 |
> |
* ThreadLocal Submitter class contains a value initially used as |
200 |
> |
* a hash code for choosing existing queues, but may be randomly |
201 |
> |
* repositioned upon contention with other submitters. In |
202 |
> |
* essence, submitters act like workers except that they never |
203 |
> |
* take tasks, and they are multiplexed on to a finite number of |
204 |
> |
* shared work queues. However, classes are set up so that future |
205 |
> |
* extensions could allow submitters to optionally help perform |
206 |
> |
* tasks as well. Pool submissions from internal workers are also |
207 |
> |
* allowed, but use randomized rather than thread-hashed queue |
208 |
> |
* indices to avoid imbalance. Insertion of tasks in shared mode |
209 |
> |
* requires a lock (mainly to protect in the case of resizing) but |
210 |
> |
* we use only a simple spinlock (using bits in field runState), |
211 |
> |
* because submitters encountering a busy queue try or create |
212 |
> |
* others so never block. |
213 |
> |
* |
214 |
> |
* Management |
215 |
> |
* ========== |
216 |
|
* |
217 |
|
* The main throughput advantages of work-stealing stem from |
218 |
|
* decentralized control -- workers mostly take tasks from |
219 |
|
* themselves or each other. We cannot negate this in the |
220 |
|
* implementation of other management responsibilities. The main |
221 |
|
* tactic for avoiding bottlenecks is packing nearly all |
222 |
< |
* essentially atomic control state into a single 64bit volatile |
223 |
< |
* variable ("ctl"). This variable is read on the order of 10-100 |
224 |
< |
* times as often as it is modified (always via CAS). (There is |
225 |
< |
* some additional control state, for example variable "shutdown" |
226 |
< |
* for which we can cope with uncoordinated updates.) This |
227 |
< |
* streamlines synchronization and control at the expense of messy |
228 |
< |
* constructions needed to repack status bits upon updates. |
229 |
< |
* Updates tend not to contend with each other except during |
230 |
< |
* bursts while submitted tasks begin or end. In some cases when |
231 |
< |
* they do contend, threads can instead do something else |
232 |
< |
* (usually, scan for tesks) until contention subsides. |
233 |
< |
* |
234 |
< |
* To enable packing, we restrict maximum parallelism to (1<<15)-1 |
235 |
< |
* (which is far in excess of normal operating range) to allow |
236 |
< |
* ids, counts, and their negations (used for thresholding) to fit |
237 |
< |
* into 16bit fields. |
238 |
< |
* |
239 |
< |
* Recording Workers. Workers are recorded in the "workers" array |
240 |
< |
* that is created upon pool construction and expanded if (rarely) |
241 |
< |
* necessary. This is an array as opposed to some other data |
242 |
< |
* structure to support index-based random steals by workers. |
243 |
< |
* Updates to the array recording new workers and unrecording |
244 |
< |
* terminated ones are protected from each other by a seqLock |
245 |
< |
* (scanGuard) but the array is otherwise concurrently readable, |
168 |
< |
* and accessed directly by workers. To simplify index-based |
222 |
> |
* essentially atomic control state into two volatile variables |
223 |
> |
* that are by far most often read (not written) as status and |
224 |
> |
* consistency checks. |
225 |
> |
* |
226 |
> |
* Field "ctl" contains 64 bits holding all the information needed |
227 |
> |
* to atomically decide to add, inactivate, enqueue (on an event |
228 |
> |
* queue), dequeue, and/or re-activate workers. To enable this |
229 |
> |
* packing, we restrict maximum parallelism to (1<<15)-1 (which is |
230 |
> |
* far in excess of normal operating range) to allow ids, counts, |
231 |
> |
* and their negations (used for thresholding) to fit into 16bit |
232 |
> |
* fields. |
233 |
> |
* |
234 |
> |
* Field "runState" contains 32 bits needed to register and |
235 |
> |
* deregister WorkQueues, as well as to enable shutdown. It is |
236 |
> |
* only modified under a lock (normally briefly held, but |
237 |
> |
* occasionally protecting allocations and resizings) but even |
238 |
> |
* when locked remains available to check consistency. |
239 |
> |
* |
240 |
> |
* Recording WorkQueues. WorkQueues are recorded in the |
241 |
> |
* "workQueues" array that is created upon pool construction and |
242 |
> |
* expanded if necessary. Updates to the array while recording |
243 |
> |
* new workers and unrecording terminated ones are protected from |
244 |
> |
* each other by a lock but the array is otherwise concurrently |
245 |
> |
* readable, and accessed directly. To simplify index-based |
246 |
|
* operations, the array size is always a power of two, and all |
247 |
< |
* readers must tolerate null slots. To avoid flailing during |
248 |
< |
* start-up, the array is presized to hold twice #parallelism |
249 |
< |
* workers (which is unlikely to need further resizing during |
250 |
< |
* execution). But to avoid dealing with so many null slots, |
251 |
< |
* variable scanGuard includes a mask for the nearest power of two |
252 |
< |
* that contains all current workers. All worker thread creation |
253 |
< |
* is on-demand, triggered by task submissions, replacement of |
254 |
< |
* terminated workers, and/or compensation for blocked |
255 |
< |
* workers. However, all other support code is set up to work with |
256 |
< |
* other policies. To ensure that we do not hold on to worker |
257 |
< |
* references that would prevent GC, ALL accesses to workers are |
258 |
< |
* via indices into the workers array (which is one source of some |
259 |
< |
* of the messy code constructions here). In essence, the workers |
260 |
< |
* array serves as a weak reference mechanism. Thus for example |
261 |
< |
* the wait queue field of ctl stores worker indices, not worker |
262 |
< |
* references. Access to the workers in associated methods (for |
263 |
< |
* example signalWork) must both index-check and null-check the |
264 |
< |
* IDs. All such accesses ignore bad IDs by returning out early |
265 |
< |
* from what they are doing, since this can only be associated |
266 |
< |
* with termination, in which case it is OK to give up. |
267 |
< |
* |
268 |
< |
* All uses of the workers array, as well as queue arrays, check |
269 |
< |
* that the array is non-null (even if previously non-null). This |
270 |
< |
* allows nulling during termination, which is currently not |
271 |
< |
* necessary, but remains an option for resource-revocation-based |
272 |
< |
* shutdown schemes. |
273 |
< |
* |
274 |
< |
* Wait Queuing. Unlike HPC work-stealing frameworks, we cannot |
275 |
< |
* let workers spin indefinitely scanning for tasks when none are |
276 |
< |
* can be immediately found, and we cannot start/resume workers |
277 |
< |
* unless there appear to be tasks available. On the other hand, |
278 |
< |
* we must quickly prod them into action when new tasks are |
279 |
< |
* submitted or generated. We park/unpark workers after placing |
280 |
< |
* in an event wait queue when they cannot find work. This "queue" |
281 |
< |
* is actually a simple Treiber stack, headed by the "id" field of |
282 |
< |
* ctl, plus a 15bit counter value to both wake up waiters (by |
283 |
< |
* advancing their count) and avoid ABA effects. Successors are |
284 |
< |
* held in worker field "nextWait". Queuing deals with several |
285 |
< |
* intrinsic races, mainly that a task-producing thread can miss |
286 |
< |
* seeing (and signalling) another thread that gave up looking for |
287 |
< |
* work but has not yet entered the wait queue. We solve this by |
288 |
< |
* requiring a full sweep of all workers both before (in scan()) |
289 |
< |
* and after (in awaitWork()) a newly waiting worker is added to |
290 |
< |
* the wait queue. During a rescan, the worker might release some |
291 |
< |
* other queued worker rather than itself, which has the same net |
292 |
< |
* effect. |
247 |
> |
* readers must tolerate null slots. Shared (submission) queues |
248 |
> |
* are at even indices, worker queues at odd indices. Grouping |
249 |
> |
* them together in this way simplifies and speeds up task |
250 |
> |
* scanning. To avoid flailing during start-up, the array is |
251 |
> |
* presized to hold twice #parallelism workers (which is unlikely |
252 |
> |
* to need further resizing during execution). But to avoid |
253 |
> |
* dealing with so many null slots, variable runState includes a |
254 |
> |
* mask for the nearest power of two that contains all current |
255 |
> |
* workers. All worker thread creation is on-demand, triggered by |
256 |
> |
* task submissions, replacement of terminated workers, and/or |
257 |
> |
* compensation for blocked workers. However, all other support |
258 |
> |
* code is set up to work with other policies. To ensure that we |
259 |
> |
* do not hold on to worker references that would prevent GC, ALL |
260 |
> |
* accesses to workQueues are via indices into the workQueues |
261 |
> |
* array (which is one source of some of the messy code |
262 |
> |
* constructions here). In essence, the workQueues array serves as |
263 |
> |
* a weak reference mechanism. Thus for example the wait queue |
264 |
> |
* field of ctl stores indices, not references. Access to the |
265 |
> |
* workQueues in associated methods (for example signalWork) must |
266 |
> |
* both index-check and null-check the IDs. All such accesses |
267 |
> |
* ignore bad IDs by returning out early from what they are doing, |
268 |
> |
* since this can only be associated with termination, in which |
269 |
> |
* case it is OK to give up. |
270 |
> |
* |
271 |
> |
* All uses of the workQueues array check that it is non-null |
272 |
> |
* (even if previously non-null). This allows nulling during |
273 |
> |
* termination, which is currently not necessary, but remains an |
274 |
> |
* option for resource-revocation-based shutdown schemes. It also |
275 |
> |
* helps reduce JIT issuance of uncommon-trap code, which tends to |
276 |
> |
* unnecessarily complicate control flow in some methods. |
277 |
> |
* |
278 |
> |
* Event Queuing. Unlike HPC work-stealing frameworks, we cannot |
279 |
> |
* let workers spin indefinitely scanning for tasks when none can |
280 |
> |
* be found immediately, and we cannot start/resume workers unless |
281 |
> |
* there appear to be tasks available. On the other hand, we must |
282 |
> |
* quickly prod them into action when new tasks are submitted or |
283 |
> |
* generated. In many usages, ramp-up time to activate workers is |
284 |
> |
* the main limiting factor in overall performance (this is |
285 |
> |
* compounded at program start-up by JIT compilation and |
286 |
> |
* allocation). So we try to streamline this as much as possible. |
287 |
> |
* We park/unpark workers after placing in an event wait queue |
288 |
> |
* when they cannot find work. This "queue" is actually a simple |
289 |
> |
* Treiber stack, headed by the "id" field of ctl, plus a 15bit |
290 |
> |
* counter value (that reflects the number of times a worker has |
291 |
> |
* been inactivated) to avoid ABA effects (we need only as many |
292 |
> |
* version numbers as worker threads). Successors are held in |
293 |
> |
* field WorkQueue.nextWait. Queuing deals with several intrinsic |
294 |
> |
* races, mainly that a task-producing thread can miss seeing (and |
295 |
> |
* signalling) another thread that gave up looking for work but |
296 |
> |
* has not yet entered the wait queue. We solve this by requiring |
297 |
> |
* a full sweep of all workers (via repeated calls to method |
298 |
> |
* scan()) both before and after a newly waiting worker is added |
299 |
> |
* to the wait queue. During a rescan, the worker might release |
300 |
> |
* some other queued worker rather than itself, which has the same |
301 |
> |
* net effect. Because enqueued workers may actually be rescanning |
302 |
> |
* rather than waiting, we set and clear the "parker" field of |
303 |
> |
* WorkQueues to reduce unnecessary calls to unpark. (This |
304 |
> |
* requires a secondary recheck to avoid missed signals.) Note |
305 |
> |
* the unusual conventions about Thread.interrupts surrounding |
306 |
> |
* parking and other blocking: Because interrupts are used solely |
307 |
> |
* to alert threads to check termination, which is checked anyway |
308 |
> |
* upon blocking, we clear status (using Thread.interrupted) |
309 |
> |
* before any call to park, so that park does not immediately |
310 |
> |
* return due to status being set via some other unrelated call to |
311 |
> |
* interrupt in user code. |
312 |
|
* |
313 |
|
* Signalling. We create or wake up workers only when there |
314 |
|
* appears to be at least one task they might be able to find and |
315 |
|
* execute. When a submission is added or another worker adds a |
316 |
< |
* task to a queue that previously had two or fewer tasks, they |
316 |
> |
* task to a queue that previously had fewer than two tasks, they |
317 |
|
* signal waiting workers (or trigger creation of new ones if |
318 |
|
* fewer than the given parallelism level -- see signalWork). |
319 |
< |
* These primary signals are buttressed by signals during rescans |
320 |
< |
* as well as those performed when a worker steals a task and |
321 |
< |
* notices that there are more tasks too; together these cover the |
322 |
< |
* signals needed in cases when more than two tasks are pushed |
227 |
< |
* but untaken. |
319 |
> |
* These primary signals are buttressed by signals during rescans; |
320 |
> |
* together these cover the signals needed in cases when more |
321 |
> |
* tasks are pushed but untaken, and improve performance compared |
322 |
> |
* to having one thread wake up all workers. |
323 |
|
* |
324 |
|
* Trimming workers. To release resources after periods of lack of |
325 |
|
* use, a worker starting to wait when the pool is quiescent will |
326 |
|
* time out and terminate if the pool has remained quiescent for |
327 |
< |
* SHRINK_RATE nanosecs. |
327 |
> |
* SHRINK_RATE nanosecs. This will slowly propagate, eventually |
328 |
> |
* terminating all workers after long periods of non-use. |
329 |
|
* |
330 |
< |
* Submissions. External submissions are maintained in an |
331 |
< |
* array-based queue that is structured identically to |
332 |
< |
* ForkJoinWorkerThread queues (which see) except for the use of |
333 |
< |
* submissionLock in method addSubmission. Unlike worker queues, |
334 |
< |
* multiple external threads can add new submissions. |
335 |
< |
* |
336 |
< |
* Compensation. Beyond work-stealing support and lifecycle |
337 |
< |
* control, the main responsibility of this framework is to take |
338 |
< |
* actions when one worker is waiting to join a task stolen (or |
339 |
< |
* always held by) another. Because we are multiplexing many |
340 |
< |
* tasks on to a pool of workers, we can't just let them block (as |
341 |
< |
* in Thread.join). We also cannot just reassign the joiner's |
342 |
< |
* run-time stack with another and replace it later, which would |
343 |
< |
* be a form of "continuation", that even if possible is not |
344 |
< |
* necessarily a good idea since we sometimes need both an |
345 |
< |
* unblocked task and its continuation to progress. Instead we |
346 |
< |
* combine two tactics: |
330 |
> |
* Shutdown and Termination. A call to shutdownNow atomically sets |
331 |
> |
* a runState bit and then (non-atomically) sets each worker's |
332 |
> |
* runState status, cancels all unprocessed tasks, and wakes up |
333 |
> |
* all waiting workers. Detecting whether termination should |
334 |
> |
* commence after a non-abrupt shutdown() call requires more work |
335 |
> |
* and bookkeeping. We need consensus about quiescence (i.e., that |
336 |
> |
* there is no more work). The active count provides a primary |
337 |
> |
* indication but non-abrupt shutdown still requires a rechecking |
338 |
> |
* scan for any workers that are inactive but not queued. |
339 |
> |
* |
340 |
> |
* Joining Tasks |
341 |
> |
* ============= |
342 |
> |
* |
343 |
> |
* Any of several actions may be taken when one worker is waiting |
344 |
> |
* to join a task stolen (or always held) by another. Because we |
345 |
> |
* are multiplexing many tasks on to a pool of workers, we can't |
346 |
> |
* just let them block (as in Thread.join). We also cannot just |
347 |
> |
* reassign the joiner's run-time stack with another and replace |
348 |
> |
* it later, which would be a form of "continuation", that even if |
349 |
> |
* possible is not necessarily a good idea since we sometimes need |
350 |
> |
* both an unblocked task and its continuation to progress. |
351 |
> |
* Instead we combine two tactics: |
352 |
|
* |
353 |
|
* Helping: Arranging for the joiner to execute some task that it |
354 |
< |
* would be running if the steal had not occurred. Method |
254 |
< |
* ForkJoinWorkerThread.joinTask tracks joining->stealing |
255 |
< |
* links to try to find such a task. |
354 |
> |
* would be running if the steal had not occurred. |
355 |
|
* |
356 |
|
* Compensating: Unless there are already enough live threads, |
357 |
< |
* method tryPreBlock() may create or re-activate a spare |
358 |
< |
* thread to compensate for blocked joiners until they |
359 |
< |
* unblock. |
357 |
> |
* method tryCompensate() may create or re-activate a spare |
358 |
> |
* thread to compensate for blocked joiners until they unblock. |
359 |
> |
* |
360 |
> |
* A third form (implemented in tryRemoveAndExec and |
361 |
> |
* tryPollForAndExec) amounts to helping a hypothetical |
362 |
> |
* compensator: If we can readily tell that a possible action of a |
363 |
> |
* compensator is to steal and execute the task being joined, the |
364 |
> |
* joining thread can do so directly, without the need for a |
365 |
> |
* compensation thread (although at the expense of larger run-time |
366 |
> |
* stacks, but the tradeoff is typically worthwhile). |
367 |
|
* |
368 |
|
* The ManagedBlocker extension API can't use helping so relies |
369 |
|
* only on compensation in method awaitBlocker. |
370 |
|
* |
371 |
+ |
* The algorithm in tryHelpStealer entails a form of "linear" |
372 |
+ |
* helping: Each worker records (in field currentSteal) the most |
373 |
+ |
* recent task it stole from some other worker. Plus, it records |
374 |
+ |
* (in field currentJoin) the task it is currently actively |
375 |
+ |
* joining. Method tryHelpStealer uses these markers to try to |
376 |
+ |
* find a worker to help (i.e., steal back a task from and execute |
377 |
+ |
* it) that could hasten completion of the actively joined task. |
378 |
+ |
* In essence, the joiner executes a task that would be on its own |
379 |
+ |
* local deque had the to-be-joined task not been stolen. This may |
380 |
+ |
* be seen as a conservative variant of the approach in Wagner & |
381 |
+ |
* Calder "Leapfrogging: a portable technique for implementing |
382 |
+ |
* efficient futures" SIGPLAN Notices, 1993 |
383 |
+ |
* (http://portal.acm.org/citation.cfm?id=155354). It differs in |
384 |
+ |
* that: (1) We only maintain dependency links across workers upon |
385 |
+ |
* steals, rather than use per-task bookkeeping. This sometimes |
386 |
+ |
* requires a linear scan of workers array to locate stealers, but |
387 |
+ |
* often doesn't because stealers leave hints (that may become |
388 |
+ |
* stale/wrong) of where to locate them. A stealHint is only a |
389 |
+ |
* hint because a worker might have had multiple steals and the |
390 |
+ |
* hint records only one of them (usually the most current). |
391 |
+ |
* Hinting isolates cost to when it is needed, rather than adding |
392 |
+ |
* to per-task overhead. (2) It is "shallow", ignoring nesting |
393 |
+ |
* and potentially cyclic mutual steals. (3) It is intentionally |
394 |
+ |
* racy: field currentJoin is updated only while actively joining, |
395 |
+ |
* which means that we miss links in the chain during long-lived |
396 |
+ |
* tasks, GC stalls etc (which is OK since blocking in such cases |
397 |
+ |
* is usually a good idea). (4) We bound the number of attempts |
398 |
+ |
* to find work (see MAX_HELP_DEPTH) and fall back to suspending |
399 |
+ |
* the worker and if necessary replacing it with another. |
400 |
+ |
* |
401 |
|
* It is impossible to keep exactly the target parallelism number |
402 |
|
* of threads running at any given time. Determining the |
403 |
|
* existence of conservatively safe helping targets, the |
404 |
|
* availability of already-created spares, and the apparent need |
405 |
< |
* to create new spares are all racy and require heuristic |
406 |
< |
* guidance, so we rely on multiple retries of each. Currently, |
407 |
< |
* in keeping with on-demand signalling policy, we compensate only |
408 |
< |
* if blocking would leave less than one active (non-waiting, |
409 |
< |
* non-blocked) worker. Additionally, to avoid some false alarms |
410 |
< |
* due to GC, lagging counters, system activity, etc, compensated |
411 |
< |
* blocking for joins is only attempted after a number of rechecks |
412 |
< |
* proportional to the current apparent deficit (where retries are |
413 |
< |
* interspersed with Thread.yield, for good citizenship). The |
278 |
< |
* variable blockedCount, incremented before blocking and |
279 |
< |
* decremented after, is sometimes needed to distinguish cases of |
280 |
< |
* waiting for work vs blocking on joins or other managed sync, |
281 |
< |
* but both the cases are equivalent for most pool control, so we |
282 |
< |
* can update non-atomically. (Additionally, contention on |
283 |
< |
* blockedCount alleviates some contention on ctl). |
284 |
< |
* |
285 |
< |
* Shutdown and Termination. A call to shutdownNow atomically sets |
286 |
< |
* the ctl stop bit and then (non-atomically) sets each workers |
287 |
< |
* "terminate" status, cancels all unprocessed tasks, and wakes up |
288 |
< |
* all waiting workers. Detecting whether termination should |
289 |
< |
* commence after a non-abrupt shutdown() call requires more work |
290 |
< |
* and bookkeeping. We need consensus about quiesence (i.e., that |
291 |
< |
* there is no more work) which is reflected in active counts so |
292 |
< |
* long as there are no current blockers, as well as possible |
293 |
< |
* re-evaluations during independent changes in blocking or |
294 |
< |
* quiescing workers. |
405 |
> |
* to create new spares are all racy, so we rely on multiple |
406 |
> |
* retries of each. Currently, in keeping with on-demand |
407 |
> |
* signalling policy, we compensate only if blocking would leave |
408 |
> |
* less than one active (non-waiting, non-blocked) worker. |
409 |
> |
* Additionally, to avoid some false alarms due to GC, lagging |
410 |
> |
* counters, system activity, etc, compensated blocking for joins |
411 |
> |
* is only attempted after rechecks stabilize in |
412 |
> |
* ForkJoinTask.awaitJoin. (Retries are interspersed with |
413 |
> |
* Thread.yield, for good citizenship.) |
414 |
|
* |
415 |
|
* Style notes: There is a lot of representation-level coupling |
416 |
|
* among classes ForkJoinPool, ForkJoinWorkerThread, and |
417 |
< |
* ForkJoinTask. Most fields of ForkJoinWorkerThread maintain |
418 |
< |
* data structures managed by ForkJoinPool, so are directly |
419 |
< |
* accessed. Conversely we allow access to "workers" array by |
420 |
< |
* workers, and direct access to ForkJoinTask.status by both |
421 |
< |
* ForkJoinPool and ForkJoinWorkerThread. There is little point |
422 |
< |
* trying to reduce this, since any associated future changes in |
423 |
< |
* representations will need to be accompanied by algorithmic |
424 |
< |
* changes anyway. All together, these low-level implementation |
425 |
< |
* choices produce as much as a factor of 4 performance |
307 |
< |
* improvement compared to naive implementations, and enable the |
308 |
< |
* processing of billions of tasks per second, at the expense of |
309 |
< |
* some ugliness. |
417 |
> |
* ForkJoinTask. The fields of WorkQueue maintain data structures |
418 |
> |
* managed by ForkJoinPool, so are directly accessed. There is |
419 |
> |
* little point trying to reduce this, since any associated future |
420 |
> |
* changes in representations will need to be accompanied by |
421 |
> |
* algorithmic changes anyway. All together, these low-level |
422 |
> |
* implementation choices produce as much as a factor of 4 |
423 |
> |
* performance improvement compared to naive implementations, and |
424 |
> |
* enable the processing of billions of tasks per second, at the |
425 |
> |
* expense of some ugliness. |
426 |
|
* |
427 |
< |
* Methods signalWork() and scan() are the main bottlenecks so are |
427 |
> |
* Methods signalWork() and scan() are the main bottlenecks, so are |
428 |
|
* especially heavily micro-optimized/mangled. There are lots of |
429 |
|
* inline assignments (of form "while ((local = field) != 0)") |
430 |
|
* which are usually the simplest way to ensure the required read |
436 |
|
* coding oddities that help some methods perform reasonably even |
437 |
|
* when interpreted (not compiled). |
438 |
|
* |
439 |
< |
* The order of declarations in this file is: (1) declarations of |
440 |
< |
* statics (2) fields (along with constants used when unpacking |
441 |
< |
* some of them), listed in an order that tends to reduce |
442 |
< |
* contention among them a bit under most JVMs. (3) internal |
443 |
< |
* control methods (4) callbacks and other support for |
444 |
< |
* ForkJoinTask and ForkJoinWorkerThread classes, (5) exported |
445 |
< |
* methods (plus a few little helpers). (6) static block |
446 |
< |
* initializing all statics in a minimally dependent order. |
439 |
> |
* The order of declarations in this file is: |
440 |
> |
* (1) statics |
441 |
> |
* (2) fields (along with constants used when unpacking some of |
442 |
> |
* them), listed in an order that tends to reduce contention |
443 |
> |
* among them a bit under most JVMs; |
444 |
> |
* (3) nested classes |
445 |
> |
* (4) internal control methods |
446 |
> |
* (5) callbacks and other support for ForkJoinTask methods |
447 |
> |
* (6) exported methods (plus a few little helpers) |
448 |
> |
* (7) static block initializing all statics in a minimally |
449 |
> |
* dependent order. |
450 |
|
*/ |
451 |
|
|
452 |
|
/** |
505 |
|
private static final AtomicInteger poolNumberGenerator; |
506 |
|
|
507 |
|
/** |
508 |
< |
* Generator for initial random seeds for worker victim |
509 |
< |
* selection. This is used only to create initial seeds. Random |
510 |
< |
* steals use a cheaper xorshift generator per steal attempt. We |
392 |
< |
* don't expect much contention on seedGenerator, so just use a |
393 |
< |
* plain Random. |
394 |
< |
*/ |
395 |
< |
static final Random workerSeedGenerator; |
396 |
< |
|
397 |
< |
/** |
398 |
< |
* Array holding all worker threads in the pool. Initialized upon |
399 |
< |
* construction. Array size must be a power of two. Updates and |
400 |
< |
* replacements are protected by scanGuard, but the array is |
401 |
< |
* always kept in a consistent enough state to be randomly |
402 |
< |
* accessed without locking by workers performing work-stealing, |
403 |
< |
* as well as other traversal-based methods in this class, so long |
404 |
< |
* as reads memory-acquire by first reading ctl. All readers must |
405 |
< |
* tolerate that some array slots may be null. |
406 |
< |
*/ |
407 |
< |
ForkJoinWorkerThread[] workers; |
408 |
< |
|
409 |
< |
/** |
410 |
< |
* Initial size for submission queue array. Must be a power of |
411 |
< |
* two. In many applications, these always stay small so we use a |
412 |
< |
* small initial cap. |
413 |
< |
*/ |
414 |
< |
private static final int INITIAL_QUEUE_CAPACITY = 8; |
415 |
< |
|
416 |
< |
/** |
417 |
< |
* Maximum size for submission queue array. Must be a power of two |
418 |
< |
* less than or equal to 1 << (31 - width of array entry) to |
419 |
< |
* ensure lack of index wraparound, but is capped at a lower |
420 |
< |
* value to help users trap runaway computations. |
421 |
< |
*/ |
422 |
< |
private static final int MAXIMUM_QUEUE_CAPACITY = 1 << 24; // 16M |
423 |
< |
|
424 |
< |
/** |
425 |
< |
* Array serving as submission queue. Initialized upon construction. |
426 |
< |
*/ |
427 |
< |
private ForkJoinTask<?>[] submissionQueue; |
428 |
< |
|
429 |
< |
/** |
430 |
< |
* Lock protecting submissions array for addSubmission |
431 |
< |
*/ |
432 |
< |
private final ReentrantLock submissionLock; |
433 |
< |
|
434 |
< |
/** |
435 |
< |
* Condition for awaitTermination, using submissionLock for |
436 |
< |
* convenience. |
437 |
< |
*/ |
438 |
< |
private final Condition termination; |
439 |
< |
|
440 |
< |
/** |
441 |
< |
* Creation factory for worker threads. |
442 |
< |
*/ |
443 |
< |
private final ForkJoinWorkerThreadFactory factory; |
444 |
< |
|
445 |
< |
/** |
446 |
< |
* The uncaught exception handler used when any worker abruptly |
447 |
< |
* terminates. |
448 |
< |
*/ |
449 |
< |
final Thread.UncaughtExceptionHandler ueh; |
450 |
< |
|
451 |
< |
/** |
452 |
< |
* Prefix for assigning names to worker threads |
453 |
< |
*/ |
454 |
< |
private final String workerNamePrefix; |
455 |
< |
|
456 |
< |
/** |
457 |
< |
* Sum of per-thread steal counts, updated only when threads are |
458 |
< |
* idle or terminating. |
459 |
< |
*/ |
460 |
< |
private volatile long stealCount; |
461 |
< |
|
462 |
< |
/** |
463 |
< |
* Main pool control -- a long packed with: |
508 |
> |
* Bits and masks for control variables |
509 |
> |
* |
510 |
> |
* Field ctl is a long packed with: |
511 |
|
* AC: Number of active running workers minus target parallelism (16 bits) |
512 |
< |
* TC: Number of total workers minus target parallelism (16bits) |
512 |
> |
* TC: Number of total workers minus target parallelism (16 bits) |
513 |
|
* ST: true if pool is terminating (1 bit) |
514 |
|
* EC: the wait count of top waiting thread (15 bits) |
515 |
< |
* ID: ~poolIndex of top of Treiber stack of waiting threads (16 bits) |
515 |
> |
* ID: ~(poolIndex >>> 1) of top of Treiber stack of waiters (16 bits) |
516 |
|
* |
517 |
|
* When convenient, we can extract the upper 32 bits of counts and |
518 |
|
* the lower 32 bits of queue state, u = (int)(ctl >>> 32) and e = |
525 |
|
* negative, there is at least one waiting worker, and when e is |
526 |
|
* negative, the pool is terminating. To deal with these possibly |
527 |
|
* negative fields, we use casts in and out of "short" and/or |
528 |
< |
* signed shifts to maintain signedness. Note: AC_SHIFT is |
529 |
< |
* redundantly declared in ForkJoinWorkerThread in order to |
530 |
< |
* integrate a surplus-threads check. |
528 |
> |
* signed shifts to maintain signedness. |
529 |
> |
* |
530 |
> |
* When a thread is queued (inactivated), its eventCount field is |
531 |
> |
* negative, which is the only way to tell if a worker is |
532 |
> |
* prevented from executing tasks, even though it must continue to |
533 |
> |
* scan for them to avoid queuing races. |
534 |
> |
* |
535 |
> |
* Field runState is an int packed with: |
536 |
> |
* SHUTDOWN: true if shutdown is enabled (1 bit) |
537 |
> |
* SEQ: a sequence number updated upon (de)registering workers (15 bits) |
538 |
> |
* MASK: mask (power of 2 - 1) covering all registered poolIndexes (16 bits) |
539 |
> |
* |
540 |
> |
* The combination of mask and sequence number enables simple |
541 |
> |
* consistency checks: Staleness of read-only operations on the |
542 |
> |
* workers and queues arrays can be checked by comparing runState |
543 |
> |
* before vs after the reads. The low 16 bits (i.e, anding with |
544 |
> |
* SMASK) hold the smallest power of two covering all worker |
545 |
> |
* indices, minus one. The mask for queues (vs workers) is twice |
546 |
> |
* this value plus 1. |
547 |
|
*/ |
485 |
– |
volatile long ctl; |
548 |
|
|
549 |
|
// bit positions/shifts for fields |
550 |
|
private static final int AC_SHIFT = 48; |
576 |
|
private static final int UTC_UNIT = 1 << UTC_SHIFT; |
577 |
|
|
578 |
|
// masks and units for dealing with e = (int)ctl |
579 |
< |
private static final int E_MASK = 0x7fffffff; // no STOP_BIT |
580 |
< |
private static final int EC_UNIT = 1 << EC_SHIFT; |
579 |
> |
private static final int E_MASK = 0x7fffffff; // no STOP_BIT |
580 |
> |
private static final int E_SEQ = 1 << EC_SHIFT; |
581 |
> |
|
582 |
> |
// runState bits |
583 |
> |
private static final int SHUTDOWN = 1 << 31; |
584 |
> |
private static final int RS_SEQ = 1 << 16; |
585 |
> |
private static final int RS_SEQ_MASK = 0x7fff0000; |
586 |
> |
|
587 |
> |
// access mode for WorkQueue |
588 |
> |
static final int LIFO_QUEUE = 0; |
589 |
> |
static final int FIFO_QUEUE = 1; |
590 |
> |
static final int SHARED_QUEUE = -1; |
591 |
|
|
592 |
|
/** |
593 |
< |
* The target parallelism level. |
593 |
> |
* The wakeup interval (in nanoseconds) for a worker waiting for a |
594 |
> |
* task when the pool is quiescent to instead try to shrink the |
595 |
> |
* number of workers. The exact value does not matter too |
596 |
> |
* much. It must be short enough to release resources during |
597 |
> |
* sustained periods of idleness, but not so short that threads |
598 |
> |
* are continually re-created. |
599 |
|
*/ |
600 |
< |
final int parallelism; |
600 |
> |
private static final long SHRINK_RATE = |
601 |
> |
4L * 1000L * 1000L * 1000L; // 4 seconds |
602 |
|
|
603 |
|
/** |
604 |
< |
* Index (mod submission queue length) of next element to take |
605 |
< |
* from submission queue. |
604 |
> |
* The timeout value for attempted shrinkage, includes |
605 |
> |
* some slop to cope with system timer imprecision. |
606 |
|
*/ |
607 |
< |
volatile int queueBase; |
607 |
> |
private static final long SHRINK_TIMEOUT = SHRINK_RATE - (SHRINK_RATE / 10); |
608 |
|
|
609 |
|
/** |
610 |
< |
* Index (mod submission queue length) of next element to add |
611 |
< |
* in submission queue. |
610 |
> |
* The maximum stolen->joining link depth allowed in tryHelpStealer. |
611 |
> |
* Depths for legitimate chains are unbounded, but we use a fixed |
612 |
> |
* constant to avoid (otherwise unchecked) cycles and to bound |
613 |
> |
* staleness of traversal parameters at the expense of sometimes |
614 |
> |
* blocking when we could be helping. |
615 |
|
*/ |
616 |
< |
int queueTop; |
616 |
> |
private static final int MAX_HELP_DEPTH = 16; |
617 |
|
|
618 |
< |
/** |
619 |
< |
* True when shutdown() has been called. |
618 |
> |
/* |
619 |
> |
* Field layout order in this class tends to matter more than one |
620 |
> |
* would like. Runtime layout order is only loosely related to |
621 |
> |
* declaration order and may differ across JVMs, but the following |
622 |
> |
* empirically works OK on current JVMs. |
623 |
> |
*/ |
624 |
> |
|
625 |
> |
volatile long ctl; // main pool control |
626 |
> |
final int parallelism; // parallelism level |
627 |
> |
final int localMode; // per-worker scheduling mode |
628 |
> |
int nextPoolIndex; // hint used in registerWorker |
629 |
> |
volatile int runState; // shutdown status, seq, and mask |
630 |
> |
WorkQueue[] workQueues; // main registry |
631 |
> |
final ReentrantLock lock; // for registration |
632 |
> |
final Condition termination; // for awaitTermination |
633 |
> |
final ForkJoinWorkerThreadFactory factory; // factory for new workers |
634 |
> |
final Thread.UncaughtExceptionHandler ueh; // per-worker UEH |
635 |
> |
final AtomicLong stealCount; // collect counts when terminated |
636 |
> |
final AtomicInteger nextWorkerNumber; // to create worker name string |
637 |
> |
final String workerNamePrefix; // Prefix for assigning worker names |
638 |
> |
|
639 |
> |
/** |
640 |
> |
* Queues supporting work-stealing as well as external task |
641 |
> |
* submission. See above for main rationale and algorithms. |
642 |
> |
* Implementation relies heavily on "Unsafe" intrinsics |
643 |
> |
* and selective use of "volatile": |
644 |
> |
* |
645 |
> |
* Field "base" is the index (mod array.length) of the least valid |
646 |
> |
* queue slot, which is always the next position to steal (poll) |
647 |
> |
* from if nonempty. Reads and writes require volatile orderings |
648 |
> |
* but not CAS, because updates are only performed after slot |
649 |
> |
* CASes. |
650 |
> |
* |
651 |
> |
* Field "top" is the index (mod array.length) of the next queue |
652 |
> |
* slot to push to or pop from. It is written only by owner thread |
653 |
> |
* for push, or under lock for trySharedPush, and accessed by |
654 |
> |
* other threads only after reading (volatile) base. Both top and |
655 |
> |
* base are allowed to wrap around on overflow, but (top - base) |
656 |
> |
* (or more commonly -(base - top) to force volatile read of base |
657 |
> |
* before top) still estimates size. |
658 |
> |
* |
659 |
> |
* The array slots are read and written using the emulation of |
660 |
> |
* volatiles/atomics provided by Unsafe. Insertions must in |
661 |
> |
* general use putOrderedObject as a form of releasing store to |
662 |
> |
* ensure that all writes to the task object are ordered before |
663 |
> |
* its publication in the queue. (Although we can avoid one case |
664 |
> |
* of this when locked in trySharedPush.) All removals entail a |
665 |
> |
* CAS to null. The array is always a power of two. To ensure |
666 |
> |
* safety of Unsafe array operations, all accesses perform |
667 |
> |
* explicit null checks and implicit bounds checks via |
668 |
> |
* power-of-two masking. |
669 |
> |
* |
670 |
> |
* In addition to basic queuing support, this class contains |
671 |
> |
* fields described elsewhere to control execution. It turns out |
672 |
> |
* to work better memory-layout-wise to include them in this |
673 |
> |
* class rather than a separate class. |
674 |
> |
* |
675 |
> |
* Performance on most platforms is very sensitive to placement of |
676 |
> |
* instances of both WorkQueues and their arrays -- we absolutely |
677 |
> |
* do not want multiple WorkQueue instances or multiple queue |
678 |
> |
* arrays sharing cache lines. (It would be best for queue objects |
679 |
> |
* and their arrays to share, but there is nothing available to |
680 |
> |
* help arrange that). Unfortunately, because they are recorded |
681 |
> |
* in a common array, WorkQueue instances are often moved to be |
682 |
> |
* adjacent by garbage collectors. To reduce impact, we use field |
683 |
> |
* padding that works OK on common platforms; this effectively |
684 |
> |
* trades off slightly slower average field access for the sake of |
685 |
> |
* avoiding really bad worst-case access. (Until better JVM |
686 |
> |
* support is in place, this padding is dependent on transient |
687 |
> |
* properties of JVM field layout rules.) We also take care in |
688 |
> |
* allocating and sizing and resizing the array. Non-shared queue |
689 |
> |
* arrays are initialized (via method growArray) by workers before |
690 |
> |
* use. Others are allocated on first use. |
691 |
|
*/ |
692 |
< |
volatile boolean shutdown; |
692 |
> |
static final class WorkQueue { |
693 |
> |
/** |
694 |
> |
* Capacity of work-stealing queue array upon initialization. |
695 |
> |
* Must be a power of two; at least 4, but set larger to |
696 |
> |
* reduce cacheline sharing among queues. |
697 |
> |
*/ |
698 |
> |
static final int INITIAL_QUEUE_CAPACITY = 1 << 8; |
699 |
> |
|
700 |
> |
/** |
701 |
> |
* Maximum size for queue arrays. Must be a power of two less |
702 |
> |
* than or equal to 1 << (31 - width of array entry) to ensure |
703 |
> |
* lack of wraparound of index calculations, but defined to a |
704 |
> |
* value a bit less than this to help users trap runaway |
705 |
> |
* programs before saturating systems. |
706 |
> |
*/ |
707 |
> |
static final int MAXIMUM_QUEUE_CAPACITY = 1 << 26; // 64M |
708 |
> |
|
709 |
> |
volatile long totalSteals; // cumulative number of steals |
710 |
> |
int seed; // for random scanning; initialize nonzero |
711 |
> |
volatile int eventCount; // encoded inactivation count; < 0 if inactive |
712 |
> |
int nextWait; // encoded record of next event waiter |
713 |
> |
int rescans; // remaining scans until block |
714 |
> |
int nsteals; // top-level task executions since last idle |
715 |
> |
final int mode; // lifo, fifo, or shared |
716 |
> |
int poolIndex; // index of this queue in pool (or 0) |
717 |
> |
int stealHint; // index of most recent known stealer |
718 |
> |
volatile int runState; // 1: locked, -1: terminate; else 0 |
719 |
> |
volatile int base; // index of next slot for poll |
720 |
> |
int top; // index of next slot for push |
721 |
> |
ForkJoinTask<?>[] array; // the elements (initially unallocated) |
722 |
> |
final ForkJoinWorkerThread owner; // owning thread or null if shared |
723 |
> |
volatile Thread parker; // == owner during call to park; else null |
724 |
> |
ForkJoinTask<?> currentJoin; // task being joined in awaitJoin |
725 |
> |
ForkJoinTask<?> currentSteal; // current non-local task being executed |
726 |
> |
// Heuristic padding to ameliorate unfortunate memory placements |
727 |
> |
Object p00, p01, p02, p03, p04, p05, p06, p07, p08, p09, p0a; |
728 |
> |
|
729 |
> |
WorkQueue(ForkJoinWorkerThread owner, int mode) { |
730 |
> |
this.owner = owner; |
731 |
> |
this.mode = mode; |
732 |
> |
// Place indices in the center of array (that is not yet allocated) |
733 |
> |
base = top = INITIAL_QUEUE_CAPACITY >>> 1; |
734 |
> |
} |
735 |
> |
|
736 |
> |
/** |
737 |
> |
* Returns number of tasks in the queue. |
738 |
> |
*/ |
739 |
> |
final int queueSize() { |
740 |
> |
int n = base - top; // non-owner callers must read base first |
741 |
> |
return (n >= 0) ? 0 : -n; |
742 |
> |
} |
743 |
> |
|
744 |
> |
/** |
745 |
> |
* Pushes a task. Call only by owner in unshared queues. |
746 |
> |
* |
747 |
> |
* @param task the task. Caller must ensure non-null. |
748 |
> |
* @param p if non-null, pool to signal if necessary |
749 |
> |
* @throw RejectedExecutionException if array cannot be resized |
750 |
> |
*/ |
751 |
> |
final void push(ForkJoinTask<?> task, ForkJoinPool p) { |
752 |
> |
ForkJoinTask<?>[] a; |
753 |
> |
int s = top, m, n; |
754 |
> |
if ((a = array) != null) { // ignore if queue removed |
755 |
> |
U.putOrderedObject |
756 |
> |
(a, (((m = a.length - 1) & s) << ASHIFT) + ABASE, task); |
757 |
> |
if ((n = (top = s + 1) - base) <= 2) { |
758 |
> |
if (p != null) |
759 |
> |
p.signalWork(); |
760 |
> |
} |
761 |
> |
else if (n >= m) |
762 |
> |
growArray(true); |
763 |
> |
} |
764 |
> |
} |
765 |
> |
|
766 |
> |
/** |
767 |
> |
* Pushes a task if lock is free and array is either big |
768 |
> |
* enough or can be resized to be big enough. |
769 |
> |
* |
770 |
> |
* @param task the task. Caller must ensure non-null. |
771 |
> |
* @return true if submitted |
772 |
> |
*/ |
773 |
> |
final boolean trySharedPush(ForkJoinTask<?> task) { |
774 |
> |
boolean submitted = false; |
775 |
> |
if (runState == 0 && U.compareAndSwapInt(this, RUNSTATE, 0, 1)) { |
776 |
> |
ForkJoinTask<?>[] a = array; |
777 |
> |
int s = top, n = s - base; |
778 |
> |
try { |
779 |
> |
if ((a != null && n < a.length - 1) || |
780 |
> |
(a = growArray(false)) != null) { // must presize |
781 |
> |
int j = (((a.length - 1) & s) << ASHIFT) + ABASE; |
782 |
> |
U.putObject(a, (long)j, task); // don't need "ordered" |
783 |
> |
top = s + 1; |
784 |
> |
submitted = true; |
785 |
> |
} |
786 |
> |
} finally { |
787 |
> |
runState = 0; // unlock |
788 |
> |
} |
789 |
> |
} |
790 |
> |
return submitted; |
791 |
> |
} |
792 |
> |
|
793 |
> |
/** |
794 |
> |
* Takes next task, if one exists, in FIFO order. |
795 |
> |
*/ |
796 |
> |
final ForkJoinTask<?> poll() { |
797 |
> |
ForkJoinTask<?>[] a; int b, i; |
798 |
> |
while ((b = base) - top < 0 && (a = array) != null && |
799 |
> |
(i = (a.length - 1) & b) >= 0) { |
800 |
> |
int j = (i << ASHIFT) + ABASE; |
801 |
> |
ForkJoinTask<?> t = (ForkJoinTask<?>)U.getObjectVolatile(a, j); |
802 |
> |
if (t != null && base == b && |
803 |
> |
U.compareAndSwapObject(a, j, t, null)) { |
804 |
> |
base = b + 1; |
805 |
> |
return t; |
806 |
> |
} |
807 |
> |
} |
808 |
> |
return null; |
809 |
> |
} |
810 |
> |
|
811 |
> |
/** |
812 |
> |
* Takes next task, if one exists, in LIFO order. |
813 |
> |
* Call only by owner in unshared queues. |
814 |
> |
*/ |
815 |
> |
final ForkJoinTask<?> pop() { |
816 |
> |
ForkJoinTask<?> t; int m; |
817 |
> |
ForkJoinTask<?>[] a = array; |
818 |
> |
if (a != null && (m = a.length - 1) >= 0) { |
819 |
> |
for (int s; (s = top - 1) - base >= 0;) { |
820 |
> |
int j = ((m & s) << ASHIFT) + ABASE; |
821 |
> |
if ((t = (ForkJoinTask<?>)U.getObjectVolatile(a, j)) == null) |
822 |
> |
break; |
823 |
> |
if (U.compareAndSwapObject(a, j, t, null)) { |
824 |
> |
top = s; |
825 |
> |
return t; |
826 |
> |
} |
827 |
> |
} |
828 |
> |
} |
829 |
> |
return null; |
830 |
> |
} |
831 |
> |
|
832 |
> |
/** |
833 |
> |
* Takes next task, if one exists, in order specified by mode. |
834 |
> |
*/ |
835 |
> |
final ForkJoinTask<?> nextLocalTask() { |
836 |
> |
return mode == 0 ? pop() : poll(); |
837 |
> |
} |
838 |
> |
|
839 |
> |
/** |
840 |
> |
* Returns next task, if one exists, in order specified by mode. |
841 |
> |
*/ |
842 |
> |
final ForkJoinTask<?> peek() { |
843 |
> |
ForkJoinTask<?>[] a = array; int m; |
844 |
> |
if (a == null || (m = a.length - 1) < 0) |
845 |
> |
return null; |
846 |
> |
int i = mode == 0 ? top - 1 : base; |
847 |
> |
int j = ((i & m) << ASHIFT) + ABASE; |
848 |
> |
return (ForkJoinTask<?>)U.getObjectVolatile(a, j); |
849 |
> |
} |
850 |
> |
|
851 |
> |
/** |
852 |
> |
* Returns task at index b if b is current base of queue. |
853 |
> |
*/ |
854 |
> |
final ForkJoinTask<?> pollAt(int b) { |
855 |
> |
ForkJoinTask<?>[] a; int i; |
856 |
> |
ForkJoinTask<?> task = null; |
857 |
> |
if ((a = array) != null && (i = ((a.length - 1) & b)) >= 0) { |
858 |
> |
int j = (i << ASHIFT) + ABASE; |
859 |
> |
ForkJoinTask<?> t = (ForkJoinTask<?>)U.getObjectVolatile(a, j); |
860 |
> |
if (t != null && base == b && |
861 |
> |
U.compareAndSwapObject(a, j, t, null)) { |
862 |
> |
base = b + 1; |
863 |
> |
task = t; |
864 |
> |
} |
865 |
> |
} |
866 |
> |
return task; |
867 |
> |
} |
868 |
> |
|
869 |
> |
/** |
870 |
> |
* Pops the given task only if it is at the current top. |
871 |
> |
*/ |
872 |
> |
final boolean tryUnpush(ForkJoinTask<?> t) { |
873 |
> |
ForkJoinTask<?>[] a; int s; |
874 |
> |
if ((a = array) != null && (s = top) != base && |
875 |
> |
U.compareAndSwapObject |
876 |
> |
(a, (((a.length - 1) & --s) << ASHIFT) + ABASE, t, null)) { |
877 |
> |
top = s; |
878 |
> |
return true; |
879 |
> |
} |
880 |
> |
return false; |
881 |
> |
} |
882 |
> |
|
883 |
> |
/** |
884 |
> |
* Polls the given task only if it is at the current base. |
885 |
> |
*/ |
886 |
> |
final boolean pollFor(ForkJoinTask<?> task) { |
887 |
> |
ForkJoinTask<?>[] a; int b, i; |
888 |
> |
if ((b = base) - top < 0 && (a = array) != null && |
889 |
> |
(i = (a.length - 1) & b) >= 0) { |
890 |
> |
int j = (i << ASHIFT) + ABASE; |
891 |
> |
if (U.getObjectVolatile(a, j) == task && base == b && |
892 |
> |
U.compareAndSwapObject(a, j, task, null)) { |
893 |
> |
base = b + 1; |
894 |
> |
return true; |
895 |
> |
} |
896 |
> |
} |
897 |
> |
return false; |
898 |
> |
} |
899 |
> |
|
900 |
> |
/** |
901 |
> |
* If present, removes from queue and executes the given task, or |
902 |
> |
* any other cancelled task. Returns (true) immediately on any CAS |
903 |
> |
* or consistency check failure so caller can retry. |
904 |
> |
* |
905 |
> |
* @return false if no progress can be made |
906 |
> |
*/ |
907 |
> |
final boolean tryRemoveAndExec(ForkJoinTask<?> task) { |
908 |
> |
boolean removed = false, empty = true, progress = true; |
909 |
> |
ForkJoinTask<?>[] a; int m, s, b, n; |
910 |
> |
if ((a = array) != null && (m = a.length - 1) >= 0 && |
911 |
> |
(n = (s = top) - (b = base)) > 0) { |
912 |
> |
for (ForkJoinTask<?> t;;) { // traverse from s to b |
913 |
> |
int j = ((--s & m) << ASHIFT) + ABASE; |
914 |
> |
t = (ForkJoinTask<?>)U.getObjectVolatile(a, j); |
915 |
> |
if (t == null) // inconsistent length |
916 |
> |
break; |
917 |
> |
else if (t == task) { |
918 |
> |
if (s + 1 == top) { // pop |
919 |
> |
if (!U.compareAndSwapObject(a, j, task, null)) |
920 |
> |
break; |
921 |
> |
top = s; |
922 |
> |
removed = true; |
923 |
> |
} |
924 |
> |
else if (base == b) // replace with proxy |
925 |
> |
removed = U.compareAndSwapObject(a, j, task, |
926 |
> |
new EmptyTask()); |
927 |
> |
break; |
928 |
> |
} |
929 |
> |
else if (t.status >= 0) |
930 |
> |
empty = false; |
931 |
> |
else if (s + 1 == top) { // pop and throw away |
932 |
> |
if (U.compareAndSwapObject(a, j, t, null)) |
933 |
> |
top = s; |
934 |
> |
break; |
935 |
> |
} |
936 |
> |
if (--n == 0) { |
937 |
> |
if (!empty && base == b) |
938 |
> |
progress = false; |
939 |
> |
break; |
940 |
> |
} |
941 |
> |
} |
942 |
> |
} |
943 |
> |
if (removed) |
944 |
> |
task.doExec(); |
945 |
> |
return progress; |
946 |
> |
} |
947 |
> |
|
948 |
> |
/** |
949 |
> |
* Initializes or doubles the capacity of array. Call either |
950 |
> |
* by owner or with lock held -- it is OK for base, but not |
951 |
> |
* top, to move while resizings are in progress. |
952 |
> |
* |
953 |
> |
* @param rejectOnFailure if true, throw exception if capacity |
954 |
> |
* exceeded (relayed ultimately to user); else return null. |
955 |
> |
*/ |
956 |
> |
final ForkJoinTask<?>[] growArray(boolean rejectOnFailure) { |
957 |
> |
ForkJoinTask<?>[] oldA = array; |
958 |
> |
int size = oldA != null ? oldA.length << 1 : INITIAL_QUEUE_CAPACITY; |
959 |
> |
if (size <= MAXIMUM_QUEUE_CAPACITY) { |
960 |
> |
int oldMask, t, b; |
961 |
> |
ForkJoinTask<?>[] a = array = new ForkJoinTask<?>[size]; |
962 |
> |
if (oldA != null && (oldMask = oldA.length - 1) >= 0 && |
963 |
> |
(t = top) - (b = base) > 0) { |
964 |
> |
int mask = size - 1; |
965 |
> |
do { |
966 |
> |
ForkJoinTask<?> x; |
967 |
> |
int oldj = ((b & oldMask) << ASHIFT) + ABASE; |
968 |
> |
int j = ((b & mask) << ASHIFT) + ABASE; |
969 |
> |
x = (ForkJoinTask<?>)U.getObjectVolatile(oldA, oldj); |
970 |
> |
if (x != null && |
971 |
> |
U.compareAndSwapObject(oldA, oldj, x, null)) |
972 |
> |
U.putObjectVolatile(a, j, x); |
973 |
> |
} while (++b != t); |
974 |
> |
} |
975 |
> |
return a; |
976 |
> |
} |
977 |
> |
else if (!rejectOnFailure) |
978 |
> |
return null; |
979 |
> |
else |
980 |
> |
throw new RejectedExecutionException("Queue capacity exceeded"); |
981 |
> |
} |
982 |
> |
|
983 |
> |
/** |
984 |
> |
* Removes and cancels all known tasks, ignoring any exceptions. |
985 |
> |
*/ |
986 |
> |
final void cancelAll() { |
987 |
> |
ForkJoinTask.cancelIgnoringExceptions(currentJoin); |
988 |
> |
ForkJoinTask.cancelIgnoringExceptions(currentSteal); |
989 |
> |
for (ForkJoinTask<?> t; (t = poll()) != null; ) |
990 |
> |
ForkJoinTask.cancelIgnoringExceptions(t); |
991 |
> |
} |
992 |
> |
|
993 |
> |
// Execution methods |
994 |
> |
|
995 |
> |
/** |
996 |
> |
* Removes and runs tasks until empty, using local mode |
997 |
> |
* ordering. |
998 |
> |
*/ |
999 |
> |
final void runLocalTasks() { |
1000 |
> |
if (base - top < 0) { |
1001 |
> |
for (ForkJoinTask<?> t; (t = nextLocalTask()) != null; ) |
1002 |
> |
t.doExec(); |
1003 |
> |
} |
1004 |
> |
} |
1005 |
> |
|
1006 |
> |
/** |
1007 |
> |
* Executes a top-level task and any local tasks remaining |
1008 |
> |
* after execution. |
1009 |
> |
* |
1010 |
> |
* @return true unless terminating |
1011 |
> |
*/ |
1012 |
> |
final boolean runTask(ForkJoinTask<?> t) { |
1013 |
> |
boolean alive = true; |
1014 |
> |
if (t != null) { |
1015 |
> |
currentSteal = t; |
1016 |
> |
t.doExec(); |
1017 |
> |
runLocalTasks(); |
1018 |
> |
++nsteals; |
1019 |
> |
currentSteal = null; |
1020 |
> |
} |
1021 |
> |
else if (runState < 0) // terminating |
1022 |
> |
alive = false; |
1023 |
> |
return alive; |
1024 |
> |
} |
1025 |
> |
|
1026 |
> |
/** |
1027 |
> |
* Executes a non-top-level (stolen) task. |
1028 |
> |
*/ |
1029 |
> |
final void runSubtask(ForkJoinTask<?> t) { |
1030 |
> |
if (t != null) { |
1031 |
> |
ForkJoinTask<?> ps = currentSteal; |
1032 |
> |
currentSteal = t; |
1033 |
> |
t.doExec(); |
1034 |
> |
currentSteal = ps; |
1035 |
> |
} |
1036 |
> |
} |
1037 |
> |
|
1038 |
> |
/** |
1039 |
> |
* Computes next value for random probes. Scans don't require |
1040 |
> |
* a very high quality generator, but also not a crummy one. |
1041 |
> |
* Marsaglia xor-shift is cheap and works well enough. Note: |
1042 |
> |
* This is manually inlined in several usages in ForkJoinPool |
1043 |
> |
* to avoid writes inside busy scan loops. |
1044 |
> |
*/ |
1045 |
> |
final int nextSeed() { |
1046 |
> |
int r = seed; |
1047 |
> |
r ^= r << 13; |
1048 |
> |
r ^= r >>> 17; |
1049 |
> |
r ^= r << 5; |
1050 |
> |
return seed = r; |
1051 |
> |
} |
1052 |
> |
|
1053 |
> |
// Unsafe mechanics |
1054 |
> |
private static final sun.misc.Unsafe U; |
1055 |
> |
private static final long RUNSTATE; |
1056 |
> |
private static final int ABASE; |
1057 |
> |
private static final int ASHIFT; |
1058 |
> |
static { |
1059 |
> |
int s; |
1060 |
> |
try { |
1061 |
> |
U = getUnsafe(); |
1062 |
> |
Class<?> k = WorkQueue.class; |
1063 |
> |
Class<?> ak = ForkJoinTask[].class; |
1064 |
> |
RUNSTATE = U.objectFieldOffset |
1065 |
> |
(k.getDeclaredField("runState")); |
1066 |
> |
ABASE = U.arrayBaseOffset(ak); |
1067 |
> |
s = U.arrayIndexScale(ak); |
1068 |
> |
} catch (Exception e) { |
1069 |
> |
throw new Error(e); |
1070 |
> |
} |
1071 |
> |
if ((s & (s-1)) != 0) |
1072 |
> |
throw new Error("data type scale not a power of two"); |
1073 |
> |
ASHIFT = 31 - Integer.numberOfLeadingZeros(s); |
1074 |
> |
} |
1075 |
> |
} |
1076 |
|
|
1077 |
|
/** |
1078 |
< |
* True if use local fifo, not default lifo, for local polling |
1079 |
< |
* Read by, and replicated by ForkJoinWorkerThreads |
1078 |
> |
* Class for artificial tasks that are used to replace the target |
1079 |
> |
* of local joins if they are removed from an interior queue slot |
1080 |
> |
* in WorkQueue.tryRemoveAndExec. We don't need the proxy to |
1081 |
> |
* actually do anything beyond having a unique identity. |
1082 |
|
*/ |
1083 |
< |
final boolean locallyFifo; |
1083 |
> |
static final class EmptyTask extends ForkJoinTask<Void> { |
1084 |
> |
EmptyTask() { status = ForkJoinTask.NORMAL; } // force done |
1085 |
> |
public Void getRawResult() { return null; } |
1086 |
> |
public void setRawResult(Void x) {} |
1087 |
> |
public boolean exec() { return true; } |
1088 |
> |
} |
1089 |
|
|
1090 |
|
/** |
1091 |
< |
* The number of threads in ForkJoinWorkerThreads.helpQuiescePool. |
1092 |
< |
* When non-zero, suppresses automatic shutdown when active |
1093 |
< |
* counts become zero. |
1091 |
> |
* Per-thread records for (typically non-FJ) threads that submit |
1092 |
> |
* to pools. Cureently holds only psuedo-random seed / index that |
1093 |
> |
* is used to choose submission queues in method doSubmit. In the |
1094 |
> |
* future, this may incorporate a means to implement different |
1095 |
> |
* task rejection and resubmission policies. |
1096 |
|
*/ |
1097 |
< |
volatile int quiescerCount; |
1097 |
> |
static final class Submitter { |
1098 |
> |
int seed; // seed for random submission queue selection |
1099 |
> |
|
1100 |
> |
// Heuristic padding to ameliorate unfortunate memory placements |
1101 |
> |
int p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pa, pb, pc, pd, pe; |
1102 |
> |
|
1103 |
> |
Submitter() { |
1104 |
> |
// Use identityHashCode, forced negative, for seed |
1105 |
> |
seed = System.identityHashCode(Thread.currentThread()) | (1 << 31); |
1106 |
> |
} |
1107 |
> |
|
1108 |
> |
/** |
1109 |
> |
* Computes next value for random probes. Like method |
1110 |
> |
* WorkQueue.nextSeed, this is manually inlined in several |
1111 |
> |
* usages to avoid writes inside busy loops. |
1112 |
> |
*/ |
1113 |
> |
final int nextSeed() { |
1114 |
> |
int r = seed; |
1115 |
> |
r ^= r << 13; |
1116 |
> |
r ^= r >>> 17; |
1117 |
> |
return seed = r ^= r << 5; |
1118 |
> |
} |
1119 |
> |
} |
1120 |
> |
|
1121 |
> |
/** ThreadLocal class for Submitters */ |
1122 |
> |
static final class ThreadSubmitter extends ThreadLocal<Submitter> { |
1123 |
> |
public Submitter initialValue() { return new Submitter(); } |
1124 |
> |
} |
1125 |
|
|
1126 |
|
/** |
1127 |
< |
* The number of threads blocked in join. |
1127 |
> |
* Per-thread submission bookeeping. Shared across all pools |
1128 |
> |
* to reduce ThreadLocal pollution and because random motion |
1129 |
> |
* to avoid contention in one pool is likely to hold for others. |
1130 |
|
*/ |
1131 |
< |
volatile int blockedCount; |
1131 |
> |
static final ThreadSubmitter submitters = new ThreadSubmitter(); |
1132 |
|
|
1133 |
|
/** |
1134 |
< |
* Counter for worker Thread names (unrelated to their poolIndex) |
1134 |
> |
* Top-level runloop for workers |
1135 |
|
*/ |
1136 |
< |
private volatile int nextWorkerNumber; |
1136 |
> |
final void runWorker(ForkJoinWorkerThread wt) { |
1137 |
> |
// Initialize queue array and seed in this thread |
1138 |
> |
WorkQueue w = wt.workQueue; |
1139 |
> |
w.growArray(false); |
1140 |
> |
// Same initial hash as Submitters |
1141 |
> |
w.seed = System.identityHashCode(Thread.currentThread()) | (1 << 31); |
1142 |
> |
|
1143 |
> |
do {} while (w.runTask(scan(w))); |
1144 |
> |
} |
1145 |
> |
|
1146 |
> |
// Creating, registering and deregistering workers |
1147 |
|
|
1148 |
|
/** |
1149 |
< |
* The index for the next created worker. Accessed under scanGuard. |
1149 |
> |
* Tries to create and start a worker |
1150 |
|
*/ |
1151 |
< |
private int nextWorkerIndex; |
1151 |
> |
private void addWorker() { |
1152 |
> |
Throwable ex = null; |
1153 |
> |
ForkJoinWorkerThread w = null; |
1154 |
> |
try { |
1155 |
> |
if ((w = factory.newThread(this)) != null) { |
1156 |
> |
w.start(); |
1157 |
> |
return; |
1158 |
> |
} |
1159 |
> |
} catch (Throwable e) { |
1160 |
> |
ex = e; |
1161 |
> |
} |
1162 |
> |
deregisterWorker(w, ex); |
1163 |
> |
} |
1164 |
|
|
1165 |
|
/** |
1166 |
< |
* SeqLock and index masking for for updates to workers array. |
1167 |
< |
* Locked when SG_UNIT is set. Unlocking clears bit by adding |
1168 |
< |
* SG_UNIT. Staleness of read-only operations can be checked by |
1169 |
< |
* comparing scanGuard to value before the reads. The low 16 bits |
575 |
< |
* (i.e, anding with SMASK) hold (the smallest power of two |
576 |
< |
* covering all worker indices, minus one, and is used to avoid |
577 |
< |
* dealing with large numbers of null slots when the workers array |
578 |
< |
* is overallocated. |
1166 |
> |
* Callback from ForkJoinWorkerThread constructor to assign a |
1167 |
> |
* public name. This must be separate from registerWorker because |
1168 |
> |
* it is called during the "super" constructor call in |
1169 |
> |
* ForkJoinWorkerThread. |
1170 |
|
*/ |
1171 |
< |
volatile int scanGuard; |
1171 |
> |
final String nextWorkerName() { |
1172 |
> |
return workerNamePrefix.concat |
1173 |
> |
(Integer.toString(nextWorkerNumber.addAndGet(1))); |
1174 |
> |
} |
1175 |
|
|
1176 |
< |
private static final int SG_UNIT = 1 << 16; |
1176 |
> |
/** |
1177 |
> |
* Callback from ForkJoinWorkerThread constructor to establish and |
1178 |
> |
* record its WorkQueue. |
1179 |
> |
* |
1180 |
> |
* @param wt the worker thread |
1181 |
> |
*/ |
1182 |
> |
final void registerWorker(ForkJoinWorkerThread wt) { |
1183 |
> |
WorkQueue w = wt.workQueue; |
1184 |
> |
ReentrantLock lock = this.lock; |
1185 |
> |
lock.lock(); |
1186 |
> |
try { |
1187 |
> |
int k = nextPoolIndex; |
1188 |
> |
WorkQueue[] ws = workQueues; |
1189 |
> |
if (ws != null) { // ignore on shutdown |
1190 |
> |
int n = ws.length; |
1191 |
> |
if (k < 0 || (k & 1) == 0 || k >= n || ws[k] != null) { |
1192 |
> |
for (k = 1; k < n && ws[k] != null; k += 2) |
1193 |
> |
; // workers are at odd indices |
1194 |
> |
if (k >= n) // resize |
1195 |
> |
workQueues = ws = Arrays.copyOf(ws, n << 1); |
1196 |
> |
} |
1197 |
> |
w.poolIndex = k; |
1198 |
> |
w.eventCount = ~(k >>> 1) & SMASK; // Set up wait count |
1199 |
> |
ws[k] = w; // record worker |
1200 |
> |
nextPoolIndex = k + 2; |
1201 |
> |
int rs = runState; |
1202 |
> |
int m = rs & SMASK; // recalculate runState mask |
1203 |
> |
if (k > m) |
1204 |
> |
m = (m << 1) + 1; |
1205 |
> |
runState = (rs & SHUTDOWN) | ((rs + RS_SEQ) & RS_SEQ_MASK) | m; |
1206 |
> |
} |
1207 |
> |
} finally { |
1208 |
> |
lock.unlock(); |
1209 |
> |
} |
1210 |
> |
} |
1211 |
|
|
1212 |
|
/** |
1213 |
< |
* The wakeup interval (in nanoseconds) for a worker waiting for a |
1214 |
< |
* task when the pool is quiescent to instead try to shrink the |
1215 |
< |
* number of workers. The exact value does not matter too |
1216 |
< |
* much. It must be short enough to release resources during |
1217 |
< |
* sustained periods of idleness, but not so short that threads |
1218 |
< |
* are continually re-created. |
1213 |
> |
* Final callback from terminating worker, as well as failure to |
1214 |
> |
* construct or start a worker in addWorker. Removes record of |
1215 |
> |
* worker from array, and adjusts counts. If pool is shutting |
1216 |
> |
* down, tries to complete termination. |
1217 |
> |
* |
1218 |
> |
* @param wt the worker thread or null if addWorker failed |
1219 |
> |
* @param ex the exception causing failure, or null if none |
1220 |
|
*/ |
1221 |
< |
private static final long SHRINK_RATE = |
1222 |
< |
4L * 1000L * 1000L * 1000L; // 4 seconds |
1221 |
> |
final void deregisterWorker(ForkJoinWorkerThread wt, Throwable ex) { |
1222 |
> |
WorkQueue w = null; |
1223 |
> |
if (wt != null && (w = wt.workQueue) != null) { |
1224 |
> |
w.runState = -1; // ensure runState is set |
1225 |
> |
stealCount.getAndAdd(w.totalSteals + w.nsteals); |
1226 |
> |
int idx = w.poolIndex; |
1227 |
> |
ReentrantLock lock = this.lock; |
1228 |
> |
lock.lock(); |
1229 |
> |
try { // remove record from array |
1230 |
> |
WorkQueue[] ws = workQueues; |
1231 |
> |
if (ws != null && idx >= 0 && idx < ws.length && ws[idx] == w) |
1232 |
> |
ws[nextPoolIndex = idx] = null; |
1233 |
> |
} finally { |
1234 |
> |
lock.unlock(); |
1235 |
> |
} |
1236 |
> |
} |
1237 |
> |
|
1238 |
> |
long c; // adjust ctl counts |
1239 |
> |
do {} while (!U.compareAndSwapLong |
1240 |
> |
(this, CTL, c = ctl, (((c - AC_UNIT) & AC_MASK) | |
1241 |
> |
((c - TC_UNIT) & TC_MASK) | |
1242 |
> |
(c & ~(AC_MASK|TC_MASK))))); |
1243 |
> |
|
1244 |
> |
if (!tryTerminate(false) && w != null) { |
1245 |
> |
w.cancelAll(); // cancel remaining tasks |
1246 |
> |
if (w.array != null) // suppress signal if never ran |
1247 |
> |
signalWork(); // wake up or create replacement |
1248 |
> |
} |
1249 |
> |
|
1250 |
> |
if (ex != null) // rethrow |
1251 |
> |
U.throwException(ex); |
1252 |
> |
} |
1253 |
|
|
1254 |
|
/** |
1255 |
< |
* Top-level loop for worker threads: On each step: if the |
597 |
< |
* previous step swept through all queues and found no tasks, or |
598 |
< |
* there are excess threads, then possibly blocks. Otherwise, |
599 |
< |
* scans for and, if found, executes a task. Returns when pool |
600 |
< |
* and/or worker terminate. |
1255 |
> |
* Tries to add and register a new queue at the given index. |
1256 |
|
* |
1257 |
< |
* @param w the worker |
1257 |
> |
* @param idx the workQueues array index to register the queue |
1258 |
> |
* @return the queue, or null if could not add because could |
1259 |
> |
* not acquire lock or idx is unusable |
1260 |
|
*/ |
1261 |
< |
final void work(ForkJoinWorkerThread w) { |
1262 |
< |
boolean swept = false; // true on empty scans |
1263 |
< |
long c; |
1264 |
< |
while (!w.terminate && (int)(c = ctl) >= 0) { |
1265 |
< |
int a; // active count |
1266 |
< |
if (!swept && (a = (int)(c >> AC_SHIFT)) <= 0) |
1267 |
< |
swept = scan(w, a); |
1268 |
< |
else if (tryAwaitWork(w, c)) |
1269 |
< |
swept = false; |
1261 |
> |
private WorkQueue tryAddSharedQueue(int idx) { |
1262 |
> |
WorkQueue q = null; |
1263 |
> |
ReentrantLock lock = this.lock; |
1264 |
> |
if (idx >= 0 && (idx & 1) == 0 && !lock.isLocked()) { |
1265 |
> |
// create queue outside of lock but only if apparently free |
1266 |
> |
WorkQueue nq = new WorkQueue(null, SHARED_QUEUE); |
1267 |
> |
if (lock.tryLock()) { |
1268 |
> |
try { |
1269 |
> |
WorkQueue[] ws = workQueues; |
1270 |
> |
if (ws != null && idx < ws.length) { |
1271 |
> |
if ((q = ws[idx]) == null) { |
1272 |
> |
int rs; // update runState seq |
1273 |
> |
ws[idx] = q = nq; |
1274 |
> |
runState = (((rs = runState) & SHUTDOWN) | |
1275 |
> |
((rs + RS_SEQ) & ~SHUTDOWN)); |
1276 |
> |
} |
1277 |
> |
} |
1278 |
> |
} finally { |
1279 |
> |
lock.unlock(); |
1280 |
> |
} |
1281 |
> |
} |
1282 |
|
} |
1283 |
+ |
return q; |
1284 |
|
} |
1285 |
|
|
1286 |
< |
// Signalling |
1286 |
> |
// Maintaining ctl counts |
1287 |
> |
|
1288 |
> |
/** |
1289 |
> |
* Increments active count; mainly called upon return from blocking. |
1290 |
> |
*/ |
1291 |
> |
final void incrementActiveCount() { |
1292 |
> |
long c; |
1293 |
> |
do {} while (!U.compareAndSwapLong(this, CTL, c = ctl, c + AC_UNIT)); |
1294 |
> |
} |
1295 |
|
|
1296 |
|
/** |
1297 |
< |
* Wakes up or creates a worker. |
1297 |
> |
* Activates or creates a worker. |
1298 |
|
*/ |
1299 |
|
final void signalWork() { |
1300 |
|
/* |
1310 |
|
*/ |
1311 |
|
long c; int e, u; |
1312 |
|
while ((((e = (int)(c = ctl)) | (u = (int)(c >>> 32))) & |
1313 |
< |
(INT_SIGN|SHORT_SIGN)) == (INT_SIGN|SHORT_SIGN) && e >= 0) { |
1314 |
< |
if (e > 0) { // release a waiting worker |
1315 |
< |
int i; ForkJoinWorkerThread w; ForkJoinWorkerThread[] ws; |
1316 |
< |
if ((ws = workers) == null || |
1317 |
< |
(i = ~e & SMASK) >= ws.length || |
1318 |
< |
(w = ws[i]) == null) |
1313 |
> |
(INT_SIGN|SHORT_SIGN)) == (INT_SIGN|SHORT_SIGN)) { |
1314 |
> |
WorkQueue[] ws = workQueues; int i; WorkQueue w; Thread p; |
1315 |
> |
if (e == 0) { // add a new worker |
1316 |
> |
if (U.compareAndSwapLong |
1317 |
> |
(this, CTL, c, (long)(((u + UTC_UNIT) & UTC_MASK) | |
1318 |
> |
((u + UAC_UNIT) & UAC_MASK)) << 32)) { |
1319 |
> |
addWorker(); |
1320 |
|
break; |
1321 |
< |
long nc = (((long)(w.nextWait & E_MASK)) | |
1322 |
< |
((long)(u + UAC_UNIT) << 32)); |
1323 |
< |
if (w.eventCount == e && |
1324 |
< |
UNSAFE.compareAndSwapLong(this, ctlOffset, c, nc)) { |
1325 |
< |
w.eventCount = (e + EC_UNIT) & E_MASK; |
1326 |
< |
if (w.parked) |
1327 |
< |
UNSAFE.unpark(w); |
1321 |
> |
} |
1322 |
> |
} |
1323 |
> |
else if (e > 0 && ws != null && |
1324 |
> |
(i = ((~e << 1) | 1) & SMASK) < ws.length && |
1325 |
> |
(w = ws[i]) != null && |
1326 |
> |
w.eventCount == (e | INT_SIGN)) { |
1327 |
> |
if (U.compareAndSwapLong |
1328 |
> |
(this, CTL, c, (((long)(w.nextWait & E_MASK)) | |
1329 |
> |
((long)(u + UAC_UNIT) << 32)))) { |
1330 |
> |
w.eventCount = (e + E_SEQ) & E_MASK; |
1331 |
> |
if ((p = w.parker) != null) |
1332 |
> |
U.unpark(p); // release a waiting worker |
1333 |
|
break; |
1334 |
|
} |
1335 |
|
} |
1336 |
< |
else if (UNSAFE.compareAndSwapLong |
653 |
< |
(this, ctlOffset, c, |
654 |
< |
(long)(((u + UTC_UNIT) & UTC_MASK) | |
655 |
< |
((u + UAC_UNIT) & UAC_MASK)) << 32)) { |
656 |
< |
addWorker(); |
1336 |
> |
else |
1337 |
|
break; |
658 |
– |
} |
1338 |
|
} |
1339 |
|
} |
1340 |
|
|
1341 |
|
/** |
1342 |
< |
* Variant of signalWork to help release waiters on rescans. |
1343 |
< |
* Tries once to release a waiter if active count < 0. |
1342 |
> |
* Tries to decrement active count (sometimes implicitly) and |
1343 |
> |
* possibly release or create a compensating worker in preparation |
1344 |
> |
* for blocking. Fails on contention or termination. |
1345 |
|
* |
1346 |
< |
* @return false if failed due to contention, else true |
1346 |
> |
* @return true if the caller can block, else should recheck and retry |
1347 |
|
*/ |
1348 |
< |
private boolean tryReleaseWaiter() { |
1349 |
< |
long c; int e, i; ForkJoinWorkerThread w; ForkJoinWorkerThread[] ws; |
1350 |
< |
if ((e = (int)(c = ctl)) > 0 && |
1351 |
< |
(int)(c >> AC_SHIFT) < 0 && |
672 |
< |
(ws = workers) != null && |
673 |
< |
(i = ~e & SMASK) < ws.length && |
674 |
< |
(w = ws[i]) != null) { |
675 |
< |
long nc = ((long)(w.nextWait & E_MASK) | |
676 |
< |
((c + AC_UNIT) & (AC_MASK|TC_MASK))); |
677 |
< |
if (w.eventCount != e || |
678 |
< |
!UNSAFE.compareAndSwapLong(this, ctlOffset, c, nc)) |
679 |
< |
return false; |
680 |
< |
w.eventCount = (e + EC_UNIT) & E_MASK; |
681 |
< |
if (w.parked) |
682 |
< |
UNSAFE.unpark(w); |
683 |
< |
} |
684 |
< |
return true; |
685 |
< |
} |
686 |
< |
|
687 |
< |
// Scanning for tasks |
1348 |
> |
final boolean tryCompensate() { |
1349 |
> |
WorkQueue[] ws; WorkQueue w; Thread p; |
1350 |
> |
int pc = parallelism, e, u, ac, tc, i; |
1351 |
> |
long c = ctl; |
1352 |
|
|
1353 |
< |
/** |
1354 |
< |
* Scans for and, if found, executes one task. Scans start at a |
1355 |
< |
* random index of workers array, and randomly select the first |
1356 |
< |
* (2*#workers)-1 probes, and then, if all empty, resort to 2 |
1357 |
< |
* circular sweeps, which is necessary to check quiescence. and |
1358 |
< |
* taking a submission only if no stealable tasks were found. The |
1359 |
< |
* steal code inside the loop is a specialized form of |
1360 |
< |
* ForkJoinWorkerThread.deqTask, followed bookkeeping to support |
1361 |
< |
* helpJoinTask and signal propagation. The code for submission |
1362 |
< |
* queues is almost identical. On each steal, the worker completes |
1363 |
< |
* not only the task, but also all local tasks that this task may |
1364 |
< |
* have generated. On detecting staleness or contention when |
1365 |
< |
* trying to take a task, this method returns without finishing |
702 |
< |
* sweep, which allows global state rechecks before retry. |
703 |
< |
* |
704 |
< |
* @param w the worker |
705 |
< |
* @param a the number of active workers |
706 |
< |
* @return true if swept all queues without finding a task |
707 |
< |
*/ |
708 |
< |
private boolean scan(ForkJoinWorkerThread w, int a) { |
709 |
< |
int g = scanGuard; // mask 0 avoids useless scans if only one active |
710 |
< |
int m = parallelism == 1 - a? 0 : g & SMASK; |
711 |
< |
ForkJoinWorkerThread[] ws = workers; |
712 |
< |
if (ws == null || ws.length <= m) // staleness check |
713 |
< |
return false; |
714 |
< |
for (int r = w.seed, k = r, j = -(m + m); j <= m + m; ++j) { |
715 |
< |
ForkJoinTask<?> t; ForkJoinTask<?>[] q; int b, i; |
716 |
< |
ForkJoinWorkerThread v = ws[k & m]; |
717 |
< |
if (v != null && (b = v.queueBase) != v.queueTop && |
718 |
< |
(q = v.queue) != null && (i = (q.length - 1) & b) >= 0) { |
719 |
< |
long u = (i << ASHIFT) + ABASE; |
720 |
< |
if ((t = q[i]) != null && v.queueBase == b && |
721 |
< |
UNSAFE.compareAndSwapObject(q, u, t, null)) { |
722 |
< |
int d = (v.queueBase = b + 1) - v.queueTop; |
723 |
< |
v.stealHint = w.poolIndex; |
724 |
< |
if (d != 0) |
725 |
< |
signalWork(); // propagate if nonempty |
726 |
< |
w.execTask(t); |
1353 |
> |
if ((e = (int)c) >= 0) { |
1354 |
> |
if ((ac = ((u = (int)(c >>> 32)) >> UAC_SHIFT)) <= 0 && |
1355 |
> |
e != 0 && (ws = workQueues) != null && |
1356 |
> |
(i = ((~e << 1) | 1) & SMASK) < ws.length && |
1357 |
> |
(w = ws[i]) != null) { |
1358 |
> |
if (w.eventCount == (e | INT_SIGN) && |
1359 |
> |
U.compareAndSwapLong |
1360 |
> |
(this, CTL, c, ((long)(w.nextWait & E_MASK) | |
1361 |
> |
(c & (AC_MASK|TC_MASK))))) { |
1362 |
> |
w.eventCount = (e + E_SEQ) & E_MASK; |
1363 |
> |
if ((p = w.parker) != null) |
1364 |
> |
U.unpark(p); |
1365 |
> |
return true; // release an idle worker |
1366 |
|
} |
728 |
– |
r ^= r << 13; r ^= r >>> 17; w.seed = r ^ (r << 5); |
729 |
– |
return false; // store next seed |
1367 |
|
} |
1368 |
< |
else if (j < 0) { // xorshift |
1369 |
< |
r ^= r << 13; r ^= r >>> 17; k = r ^= r << 5; |
1370 |
< |
} |
1371 |
< |
else |
1372 |
< |
++k; |
1373 |
< |
} |
1374 |
< |
if (scanGuard != g) // staleness check |
1375 |
< |
return false; |
1376 |
< |
else { // try to take submission |
1377 |
< |
ForkJoinTask<?> t; ForkJoinTask<?>[] q; int b, i; |
741 |
< |
if ((b = queueBase) != queueTop && |
742 |
< |
(q = submissionQueue) != null && |
743 |
< |
(i = (q.length - 1) & b) >= 0) { |
744 |
< |
long u = (i << ASHIFT) + ABASE; |
745 |
< |
if ((t = q[i]) != null && queueBase == b && |
746 |
< |
UNSAFE.compareAndSwapObject(q, u, t, null)) { |
747 |
< |
queueBase = b + 1; |
748 |
< |
w.execTask(t); |
1368 |
> |
else if ((tc = (short)(u >>> UTC_SHIFT)) >= 0 && ac + pc > 1) { |
1369 |
> |
long nc = ((c - AC_UNIT) & AC_MASK) | (c & ~AC_MASK); |
1370 |
> |
if (U.compareAndSwapLong(this, CTL, c, nc)) |
1371 |
> |
return true; // no compensation needed |
1372 |
> |
} |
1373 |
> |
else if (tc + pc < MAX_ID) { |
1374 |
> |
long nc = ((c + TC_UNIT) & TC_MASK) | (c & ~TC_MASK); |
1375 |
> |
if (U.compareAndSwapLong(this, CTL, c, nc)) { |
1376 |
> |
addWorker(); |
1377 |
> |
return true; // create replacement |
1378 |
|
} |
750 |
– |
return false; |
1379 |
|
} |
752 |
– |
return true; // all queues empty |
1380 |
|
} |
1381 |
+ |
return false; |
1382 |
|
} |
1383 |
|
|
1384 |
< |
/** |
757 |
< |
* Tries to enqueue worker in wait queue and await change in |
758 |
< |
* worker's eventCount. Before blocking, rescans queues to avoid |
759 |
< |
* missed signals. If the pool is quiescent, possibly terminates |
760 |
< |
* worker upon exit. |
761 |
< |
* |
762 |
< |
* @param w the calling worker |
763 |
< |
* @param c the ctl value on entry |
764 |
< |
* @return true if waited or another thread was released upon enq |
765 |
< |
*/ |
766 |
< |
private boolean tryAwaitWork(ForkJoinWorkerThread w, long c) { |
767 |
< |
int v = w.eventCount; |
768 |
< |
w.nextWait = (int)c; // w's successor record |
769 |
< |
long nc = (long)(v & E_MASK) | ((c - AC_UNIT) & (AC_MASK|TC_MASK)); |
770 |
< |
if (ctl != c || !UNSAFE.compareAndSwapLong(this, ctlOffset, c, nc)) { |
771 |
< |
long d = ctl; // return true if lost to a deq, to force rescan |
772 |
< |
return (int)d != (int)c && ((d - c) & AC_MASK) >= 0L; |
773 |
< |
} |
774 |
< |
if (parallelism + (int)(c >> AC_SHIFT) == 1 && |
775 |
< |
blockedCount == 0 && quiescerCount == 0) |
776 |
< |
idleAwaitWork(w, v); // quiescent -- maybe shrink |
777 |
< |
|
778 |
< |
boolean rescanned = false; |
779 |
< |
for (int sc;;) { |
780 |
< |
if (w.eventCount != v) |
781 |
< |
return true; |
782 |
< |
if ((sc = w.stealCount) != 0) { |
783 |
< |
long s = stealCount; // accumulate stealCount |
784 |
< |
if (UNSAFE.compareAndSwapLong(this, stealCountOffset, s, s+sc)) |
785 |
< |
w.stealCount = 0; |
786 |
< |
} |
787 |
< |
else if (!rescanned) { |
788 |
< |
int g = scanGuard, m = g & SMASK; |
789 |
< |
ForkJoinWorkerThread[] ws = workers; |
790 |
< |
if (ws != null && m < ws.length) { |
791 |
< |
rescanned = true; |
792 |
< |
for (int i = 0; i <= m; ++i) { |
793 |
< |
ForkJoinWorkerThread u = ws[i]; |
794 |
< |
if (u != null) { |
795 |
< |
if (u.queueBase != u.queueTop && |
796 |
< |
!tryReleaseWaiter()) |
797 |
< |
rescanned = false; // contended |
798 |
< |
if (w.eventCount != v) |
799 |
< |
return true; |
800 |
< |
} |
801 |
< |
} |
802 |
< |
} |
803 |
< |
if (scanGuard != g || // stale |
804 |
< |
(queueBase != queueTop && !tryReleaseWaiter())) |
805 |
< |
rescanned = false; |
806 |
< |
if (!rescanned) |
807 |
< |
Thread.yield(); // reduce contention |
808 |
< |
else |
809 |
< |
Thread.interrupted(); // clear before park |
810 |
< |
} |
811 |
< |
else { |
812 |
< |
w.parked = true; // must recheck |
813 |
< |
if (w.eventCount != v) { |
814 |
< |
w.parked = false; |
815 |
< |
return true; |
816 |
< |
} |
817 |
< |
LockSupport.park(this); |
818 |
< |
rescanned = w.parked = false; |
819 |
< |
} |
820 |
< |
} |
821 |
< |
} |
1384 |
> |
// Submissions |
1385 |
|
|
1386 |
|
/** |
1387 |
< |
* If pool is quiescent, checks for termination, and waits for |
1388 |
< |
* event signal for up to SHRINK_RATE nanosecs. On timeout, if ctl |
1389 |
< |
* has not changed, terminates the worker. Upon its termination |
1390 |
< |
* (see deregisterWorker), it may wake up another worker to |
828 |
< |
* possibly repeat this process. |
829 |
< |
* |
830 |
< |
* @param w the calling worker |
831 |
< |
* @param v the eventCount w must wait until changed |
1387 |
> |
* Unless shutting down, adds the given task to a submission queue |
1388 |
> |
* at submitter's current queue index. If no queue exists at the |
1389 |
> |
* index, one is created unless pool lock is busy. If the queue |
1390 |
> |
* and/or lock are busy, another index is randomly chosen. |
1391 |
|
*/ |
1392 |
< |
private void idleAwaitWork(ForkJoinWorkerThread w, int v) { |
1393 |
< |
ForkJoinTask.helpExpungeStaleExceptions(); // help clean weak refs |
1394 |
< |
if (shutdown) |
1395 |
< |
tryTerminate(false); |
1396 |
< |
long c = ctl; |
1397 |
< |
long nc = (((c & (AC_MASK|TC_MASK)) + AC_UNIT) | |
1398 |
< |
(long)(w.nextWait & E_MASK)); // ctl value to release w |
1399 |
< |
if (w.eventCount == v && |
1400 |
< |
parallelism + (int)(c >> AC_SHIFT) == 0 && |
1401 |
< |
blockedCount == 0 && quiescerCount == 0) { |
1402 |
< |
long startTime = System.nanoTime(); |
1403 |
< |
Thread.interrupted(); |
1404 |
< |
if (w.eventCount == v) { |
1405 |
< |
w.parked = true; |
1406 |
< |
if (w.eventCount == v) |
1407 |
< |
LockSupport.parkNanos(this, SHRINK_RATE); |
849 |
< |
w.parked = false; |
850 |
< |
if (w.eventCount == v && ctl == c && |
851 |
< |
System.nanoTime() - startTime >= SHRINK_RATE && |
852 |
< |
UNSAFE.compareAndSwapLong(this, ctlOffset, c, nc)) { |
853 |
< |
w.terminate = true; |
854 |
< |
w.eventCount = ((int)c + EC_UNIT) & E_MASK; |
855 |
< |
} |
1392 |
> |
private void doSubmit(ForkJoinTask<?> task) { |
1393 |
> |
if (task == null) |
1394 |
> |
throw new NullPointerException(); |
1395 |
> |
Submitter s = submitters.get(); |
1396 |
> |
for (int r = s.seed;;) { |
1397 |
> |
WorkQueue q; int k; |
1398 |
> |
int rs = runState, m = rs & SMASK; |
1399 |
> |
WorkQueue[] ws = workQueues; |
1400 |
> |
if (rs < 0 || ws == null) // shutting down |
1401 |
> |
throw new RejectedExecutionException(); |
1402 |
> |
if (ws.length > m && // k must be at index |
1403 |
> |
((q = ws[k = (r << 1) & m]) != null || |
1404 |
> |
(q = tryAddSharedQueue(k)) != null) && |
1405 |
> |
q.trySharedPush(task)) { |
1406 |
> |
signalWork(); |
1407 |
> |
return; |
1408 |
|
} |
1409 |
+ |
r ^= r << 13; // xorshift seed to new position |
1410 |
+ |
r ^= r >>> 17; |
1411 |
+ |
if (((s.seed = r ^= r << 5) & m) == 0) |
1412 |
+ |
Thread.yield(); // occasionally yield if busy |
1413 |
|
} |
1414 |
|
} |
1415 |
|
|
1416 |
< |
// Submissions |
1416 |
> |
|
1417 |
> |
// Scanning for tasks |
1418 |
|
|
1419 |
|
/** |
1420 |
< |
* Enqueues the given task in the submissionQueue. Same idea as |
1421 |
< |
* ForkJoinWorkerThread.pushTask except for use of submissionLock. |
1422 |
< |
* |
1423 |
< |
* @param t the task |
1424 |
< |
*/ |
1425 |
< |
private void addSubmission(ForkJoinTask<?> t) { |
1426 |
< |
final ReentrantLock lock = this.submissionLock; |
1427 |
< |
lock.lock(); |
1428 |
< |
try { |
1429 |
< |
ForkJoinTask<?>[] q; int s, m; |
1430 |
< |
if ((q = submissionQueue) != null) { // ignore if queue removed |
1431 |
< |
long u = (((s = queueTop) & (m = q.length-1)) << ASHIFT)+ABASE; |
1432 |
< |
UNSAFE.putOrderedObject(q, u, t); |
1433 |
< |
queueTop = s + 1; |
1434 |
< |
if (s - queueBase == m) |
1435 |
< |
growSubmissionQueue(); |
1420 |
> |
* Scans for and, if found, returns one task, else possibly |
1421 |
> |
* inactivates the worker. This method operates on single reads of |
1422 |
> |
* volatile state and is designed to be re-invoked continuously in |
1423 |
> |
* part because it returns upon detecting inconsistencies, |
1424 |
> |
* contention, or state changes that indicate possible success on |
1425 |
> |
* re-invocation. |
1426 |
> |
* |
1427 |
> |
* The scan searches for tasks across queues, randomly selecting |
1428 |
> |
* the first #queues probes, favoring steals 2:1 over submissions |
1429 |
> |
* (by exploiting even/odd indexing), and then performing a |
1430 |
> |
* circular sweep of all queues. The scan terminates upon either |
1431 |
> |
* finding a non-empty queue, or completing a full sweep. If the |
1432 |
> |
* worker is not inactivated, it takes and returns a task from |
1433 |
> |
* this queue. On failure to find a task, we take one of the |
1434 |
> |
* following actions, after which the caller will retry calling |
1435 |
> |
* this method unless terminated. |
1436 |
> |
* |
1437 |
> |
* * If not a complete sweep, try to release a waiting worker. If |
1438 |
> |
* the scan terminated because the worker is inactivated, then the |
1439 |
> |
* released worker will often be the calling worker, and it can |
1440 |
> |
* succeed obtaining a task on the next call. Or maybe it is |
1441 |
> |
* another worker, but with same net effect. Releasing in other |
1442 |
> |
* cases as well ensures that we have enough workers running. |
1443 |
> |
* |
1444 |
> |
* * If the caller has run a task since the last empty scan, |
1445 |
> |
* return (to allow rescan) if other workers are not also yet |
1446 |
> |
* enqueued. Field WorkQueue.rescans counts down on each scan to |
1447 |
> |
* ensure eventual inactivation, and occasional calls to |
1448 |
> |
* Thread.yield to help avoid interference with more useful |
1449 |
> |
* activities on the system. |
1450 |
> |
* |
1451 |
> |
* * If pool is terminating, terminate the worker. |
1452 |
> |
* |
1453 |
> |
* * If not already enqueued, try to inactivate and enqueue the |
1454 |
> |
* worker on wait queue. |
1455 |
> |
* |
1456 |
> |
* * If already enqueued and none of the above apply, either park |
1457 |
> |
* awaiting signal, or if this is the most recent waiter and pool |
1458 |
> |
* is quiescent, relay to idleAwaitWork to check for termination |
1459 |
> |
* and possibly shrink pool. |
1460 |
> |
* |
1461 |
> |
* @param w the worker (via its WorkQueue) |
1462 |
> |
* @return a task or null of none found |
1463 |
> |
*/ |
1464 |
> |
private final ForkJoinTask<?> scan(WorkQueue w) { |
1465 |
> |
boolean swept = false; // true after full empty scan |
1466 |
> |
WorkQueue[] ws; // volatile read order matters |
1467 |
> |
int r = w.seed, ec = w.eventCount; // ec is negative if inactive |
1468 |
> |
int rs = runState, m = rs & SMASK; |
1469 |
> |
if ((ws = workQueues) != null && ws.length > m) { |
1470 |
> |
ForkJoinTask<?> task = null; |
1471 |
> |
for (int k = 0, j = -2 - m; ; ++j) { |
1472 |
> |
WorkQueue q; int b; |
1473 |
> |
if (j < 0) { // random probes while j negative |
1474 |
> |
r ^= r << 13; r ^= r >>> 17; k = (r ^= r << 5) | (j & 1); |
1475 |
> |
} // worker (not submit) for odd j |
1476 |
> |
else // cyclic scan when j >= 0 |
1477 |
> |
k += (m >>> 1) | 1; // step by half to reduce bias |
1478 |
> |
|
1479 |
> |
if ((q = ws[k & m]) != null && (b = q.base) - q.top < 0) { |
1480 |
> |
if (ec >= 0) |
1481 |
> |
task = q.pollAt(b); // steal |
1482 |
> |
break; |
1483 |
> |
} |
1484 |
> |
else if (j > m) { |
1485 |
> |
if (rs == runState) // staleness check |
1486 |
> |
swept = true; |
1487 |
> |
break; |
1488 |
> |
} |
1489 |
> |
} |
1490 |
> |
w.seed = r; // save seed for next scan |
1491 |
> |
if (task != null) |
1492 |
> |
return task; |
1493 |
> |
} |
1494 |
> |
|
1495 |
> |
// Decode ctl on empty scan |
1496 |
> |
long c = ctl; int e = (int)c, a = (int)(c >> AC_SHIFT), nr, ns; |
1497 |
> |
if (!swept) { // try to release a waiter |
1498 |
> |
WorkQueue v; Thread p; |
1499 |
> |
if (e > 0 && a < 0 && ws != null && |
1500 |
> |
(v = ws[((~e << 1) | 1) & m]) != null && |
1501 |
> |
v.eventCount == (e | INT_SIGN) && U.compareAndSwapLong |
1502 |
> |
(this, CTL, c, ((long)(v.nextWait & E_MASK) | |
1503 |
> |
((c + AC_UNIT) & (AC_MASK|TC_MASK))))) { |
1504 |
> |
v.eventCount = (e + E_SEQ) & E_MASK; |
1505 |
> |
if ((p = v.parker) != null) |
1506 |
> |
U.unpark(p); |
1507 |
> |
} |
1508 |
> |
} |
1509 |
> |
else if ((nr = w.rescans) > 0) { // continue rescanning |
1510 |
> |
int ac = a + parallelism; |
1511 |
> |
if ((w.rescans = (ac < nr) ? ac : nr - 1) > 0 && w.seed < 0 && |
1512 |
> |
w.eventCount == ec) |
1513 |
> |
Thread.yield(); // 1 bit randomness for yield call |
1514 |
> |
} |
1515 |
> |
else if (e < 0) // pool is terminating |
1516 |
> |
w.runState = -1; |
1517 |
> |
else if (ec >= 0) { // try to enqueue |
1518 |
> |
long nc = (long)ec | ((c - AC_UNIT) & (AC_MASK|TC_MASK)); |
1519 |
> |
w.nextWait = e; |
1520 |
> |
w.eventCount = ec | INT_SIGN; // mark as inactive |
1521 |
> |
if (!U.compareAndSwapLong(this, CTL, c, nc)) |
1522 |
> |
w.eventCount = ec; // back out on CAS failure |
1523 |
> |
else if ((ns = w.nsteals) != 0) { // set rescans if ran task |
1524 |
> |
if (a <= 0) // ... unless too many active |
1525 |
> |
w.rescans = a + parallelism; |
1526 |
> |
w.nsteals = 0; |
1527 |
> |
w.totalSteals += ns; |
1528 |
> |
} |
1529 |
> |
} |
1530 |
> |
else{ // already queued |
1531 |
> |
if (parallelism == -a) |
1532 |
> |
idleAwaitWork(w); // quiescent |
1533 |
> |
if (w.eventCount == ec) { |
1534 |
> |
Thread.interrupted(); // clear status |
1535 |
> |
ForkJoinWorkerThread wt = w.owner; |
1536 |
> |
U.putObject(wt, PARKBLOCKER, this); |
1537 |
> |
w.parker = wt; // emulate LockSupport.park |
1538 |
> |
if (w.eventCount == ec) // recheck |
1539 |
> |
U.park(false, 0L); // block |
1540 |
> |
w.parker = null; |
1541 |
> |
U.putObject(wt, PARKBLOCKER, null); |
1542 |
|
} |
880 |
– |
} finally { |
881 |
– |
lock.unlock(); |
1543 |
|
} |
1544 |
< |
signalWork(); |
1544 |
> |
return null; |
1545 |
|
} |
1546 |
|
|
886 |
– |
// (pollSubmission is defined below with exported methods) |
887 |
– |
|
1547 |
|
/** |
1548 |
< |
* Creates or doubles submissionQueue array. |
1549 |
< |
* Basically identical to ForkJoinWorkerThread version |
1548 |
> |
* If inactivating worker w has caused pool to become quiescent, |
1549 |
> |
* checks for pool termination, and, so long as this is not the |
1550 |
> |
* only worker, waits for event for up to SHRINK_RATE nanosecs. |
1551 |
> |
* On timeout, if ctl has not changed, terminates the worker, |
1552 |
> |
* which will in turn wake up another worker to possibly repeat |
1553 |
> |
* this process. |
1554 |
> |
* |
1555 |
> |
* @param w the calling worker |
1556 |
|
*/ |
1557 |
< |
private void growSubmissionQueue() { |
1558 |
< |
ForkJoinTask<?>[] oldQ = submissionQueue; |
1559 |
< |
int size = oldQ != null ? oldQ.length << 1 : INITIAL_QUEUE_CAPACITY; |
1560 |
< |
if (size > MAXIMUM_QUEUE_CAPACITY) |
1561 |
< |
throw new RejectedExecutionException("Queue capacity exceeded"); |
1562 |
< |
if (size < INITIAL_QUEUE_CAPACITY) |
1563 |
< |
size = INITIAL_QUEUE_CAPACITY; |
1564 |
< |
ForkJoinTask<?>[] q = submissionQueue = new ForkJoinTask<?>[size]; |
1565 |
< |
int mask = size - 1; |
1566 |
< |
int top = queueTop; |
1567 |
< |
int oldMask; |
1568 |
< |
if (oldQ != null && (oldMask = oldQ.length - 1) >= 0) { |
1569 |
< |
for (int b = queueBase; b != top; ++b) { |
1570 |
< |
long u = ((b & oldMask) << ASHIFT) + ABASE; |
1571 |
< |
Object x = UNSAFE.getObjectVolatile(oldQ, u); |
1572 |
< |
if (x != null && UNSAFE.compareAndSwapObject(oldQ, u, x, null)) |
1573 |
< |
UNSAFE.putObjectVolatile |
1574 |
< |
(q, ((b & mask) << ASHIFT) + ABASE, x); |
1557 |
> |
private void idleAwaitWork(WorkQueue w) { |
1558 |
> |
long c; int nw, ec; |
1559 |
> |
if (!tryTerminate(false) && |
1560 |
> |
(int)((c = ctl) >> AC_SHIFT) + parallelism == 0 && |
1561 |
> |
(ec = w.eventCount) == ((int)c | INT_SIGN) && |
1562 |
> |
(nw = w.nextWait) != 0) { |
1563 |
> |
long nc = ((long)(nw & E_MASK) | // ctl to restore on timeout |
1564 |
> |
((c + AC_UNIT) & AC_MASK) | (c & TC_MASK)); |
1565 |
> |
ForkJoinTask.helpExpungeStaleExceptions(); // help clean |
1566 |
> |
ForkJoinWorkerThread wt = w.owner; |
1567 |
> |
while (ctl == c) { |
1568 |
> |
long startTime = System.nanoTime(); |
1569 |
> |
Thread.interrupted(); // timed variant of version in scan() |
1570 |
> |
U.putObject(wt, PARKBLOCKER, this); |
1571 |
> |
w.parker = wt; |
1572 |
> |
if (ctl == c) |
1573 |
> |
U.park(false, SHRINK_RATE); |
1574 |
> |
w.parker = null; |
1575 |
> |
U.putObject(wt, PARKBLOCKER, null); |
1576 |
> |
if (ctl != c) |
1577 |
> |
break; |
1578 |
> |
if (System.nanoTime() - startTime >= SHRINK_TIMEOUT && |
1579 |
> |
U.compareAndSwapLong(this, CTL, c, nc)) { |
1580 |
> |
w.runState = -1; // shrink |
1581 |
> |
w.eventCount = (ec + E_SEQ) | E_MASK; |
1582 |
> |
break; |
1583 |
> |
} |
1584 |
|
} |
1585 |
|
} |
1586 |
|
} |
1587 |
|
|
914 |
– |
// Blocking support |
915 |
– |
|
1588 |
|
/** |
1589 |
< |
* Tries to increment blockedCount, decrement active count |
1590 |
< |
* (sometimes implicitly) and possibly release or create a |
1591 |
< |
* compensating worker in preparation for blocking. Fails |
1592 |
< |
* on contention or termination. |
1593 |
< |
* |
1594 |
< |
* @return true if the caller can block, else should recheck and retry |
1595 |
< |
*/ |
1596 |
< |
private boolean tryPreBlock() { |
1597 |
< |
int b = blockedCount; |
1598 |
< |
if (UNSAFE.compareAndSwapInt(this, blockedCountOffset, b, b + 1)) { |
1599 |
< |
int pc = parallelism; |
1600 |
< |
do { |
1601 |
< |
ForkJoinWorkerThread[] ws; ForkJoinWorkerThread w; |
1602 |
< |
int e, ac, tc, rc, i; |
1603 |
< |
long c = ctl; |
1604 |
< |
int u = (int)(c >>> 32); |
1605 |
< |
if ((e = (int)c) < 0) { |
1606 |
< |
// skip -- terminating |
1607 |
< |
} |
1608 |
< |
else if ((ac = (u >> UAC_SHIFT)) <= 0 && e != 0 && |
1609 |
< |
(ws = workers) != null && |
1610 |
< |
(i = ~e & SMASK) < ws.length && |
1611 |
< |
(w = ws[i]) != null) { |
1612 |
< |
long nc = ((long)(w.nextWait & E_MASK) | |
1613 |
< |
(c & (AC_MASK|TC_MASK))); |
1614 |
< |
if (w.eventCount == e && |
1615 |
< |
UNSAFE.compareAndSwapLong(this, ctlOffset, c, nc)) { |
1616 |
< |
w.eventCount = (e + EC_UNIT) & E_MASK; |
1617 |
< |
if (w.parked) |
1618 |
< |
UNSAFE.unpark(w); |
1619 |
< |
return true; // release an idle worker |
1589 |
> |
* Tries to locate and execute tasks for a stealer of the given |
1590 |
> |
* task, or in turn one of its stealers, Traces currentSteal -> |
1591 |
> |
* currentJoin links looking for a thread working on a descendant |
1592 |
> |
* of the given task and with a non-empty queue to steal back and |
1593 |
> |
* execute tasks from. The first call to this method upon a |
1594 |
> |
* waiting join will often entail scanning/search, (which is OK |
1595 |
> |
* because the joiner has nothing better to do), but this method |
1596 |
> |
* leaves hints in workers to speed up subsequent calls. The |
1597 |
> |
* implementation is very branchy to cope with potential |
1598 |
> |
* inconsistencies or loops encountering chains that are stale, |
1599 |
> |
* unknown, or of length greater than MAX_HELP_DEPTH links. All |
1600 |
> |
* of these cases are dealt with by just retrying by caller. |
1601 |
> |
* |
1602 |
> |
* @param joiner the joining worker |
1603 |
> |
* @param task the task to join |
1604 |
> |
* @return true if found or ran a task (and so is immediately retryable) |
1605 |
> |
*/ |
1606 |
> |
final boolean tryHelpStealer(WorkQueue joiner, ForkJoinTask<?> task) { |
1607 |
> |
ForkJoinTask<?> subtask; // current target |
1608 |
> |
boolean progress = false; |
1609 |
> |
int depth = 0; // current chain depth |
1610 |
> |
int m = runState & SMASK; |
1611 |
> |
WorkQueue[] ws = workQueues; |
1612 |
> |
|
1613 |
> |
if (ws != null && ws.length > m && (subtask = task).status >= 0) { |
1614 |
> |
outer:for (WorkQueue j = joiner;;) { |
1615 |
> |
// Try to find the stealer of subtask, by first using hint |
1616 |
> |
WorkQueue stealer = null; |
1617 |
> |
WorkQueue v = ws[j.stealHint & m]; |
1618 |
> |
if (v != null && v.currentSteal == subtask) |
1619 |
> |
stealer = v; |
1620 |
> |
else { |
1621 |
> |
for (int i = 1; i <= m; i += 2) { |
1622 |
> |
if ((v = ws[i]) != null && v.currentSteal == subtask) { |
1623 |
> |
stealer = v; |
1624 |
> |
j.stealHint = i; // save hint |
1625 |
> |
break; |
1626 |
> |
} |
1627 |
|
} |
1628 |
+ |
if (stealer == null) |
1629 |
+ |
break; |
1630 |
|
} |
1631 |
< |
else if ((tc = (short)(u >>> UTC_SHIFT)) >= 0 && ac + pc > 1) { |
1632 |
< |
long nc = ((c - AC_UNIT) & AC_MASK) | (c & ~AC_MASK); |
1633 |
< |
if (UNSAFE.compareAndSwapLong(this, ctlOffset, c, nc)) |
1634 |
< |
return true; // no compensation needed |
1635 |
< |
} |
1636 |
< |
else if (tc + pc < MAX_ID) { |
1637 |
< |
long nc = ((c + TC_UNIT) & TC_MASK) | (c & ~TC_MASK); |
1638 |
< |
if (UNSAFE.compareAndSwapLong(this, ctlOffset, c, nc)) { |
1639 |
< |
addWorker(); |
1640 |
< |
return true; // create a replacement |
1631 |
> |
|
1632 |
> |
for (WorkQueue q = stealer;;) { // Try to help stealer |
1633 |
> |
ForkJoinTask<?> t; int b; |
1634 |
> |
if (task.status < 0) |
1635 |
> |
break outer; |
1636 |
> |
if ((b = q.base) - q.top < 0) { |
1637 |
> |
progress = true; |
1638 |
> |
if (subtask.status < 0) |
1639 |
> |
break outer; // stale |
1640 |
> |
if ((t = q.pollAt(b)) != null) { |
1641 |
> |
stealer.stealHint = joiner.poolIndex; |
1642 |
> |
joiner.runSubtask(t); |
1643 |
> |
} |
1644 |
> |
} |
1645 |
> |
else { // empty - try to descend to find stealer's stealer |
1646 |
> |
ForkJoinTask<?> next = stealer.currentJoin; |
1647 |
> |
if (++depth == MAX_HELP_DEPTH || subtask.status < 0 || |
1648 |
> |
next == null || next == subtask) |
1649 |
> |
break outer; // max depth, stale, dead-end, cyclic |
1650 |
> |
subtask = next; |
1651 |
> |
j = stealer; |
1652 |
> |
break; |
1653 |
|
} |
1654 |
|
} |
1655 |
< |
// try to back out on any failure and let caller retry |
963 |
< |
} while (!UNSAFE.compareAndSwapInt(this, blockedCountOffset, |
964 |
< |
b = blockedCount, b - 1)); |
1655 |
> |
} |
1656 |
|
} |
1657 |
< |
return false; |
1657 |
> |
return progress; |
1658 |
|
} |
1659 |
|
|
1660 |
|
/** |
1661 |
< |
* Decrements blockedCount and increments active count |
971 |
< |
*/ |
972 |
< |
private void postBlock() { |
973 |
< |
long c; |
974 |
< |
do {} while (!UNSAFE.compareAndSwapLong(this, ctlOffset, // no mask |
975 |
< |
c = ctl, c + AC_UNIT)); |
976 |
< |
int b; |
977 |
< |
do {} while(!UNSAFE.compareAndSwapInt(this, blockedCountOffset, |
978 |
< |
b = blockedCount, b - 1)); |
979 |
< |
} |
980 |
< |
|
981 |
< |
/** |
982 |
< |
* Possibly blocks waiting for the given task to complete, or |
983 |
< |
* cancels the task if terminating. Fails to wait if contended. |
1661 |
> |
* If task is at base of some steal queue, steals and executes it. |
1662 |
|
* |
1663 |
< |
* @param joinMe the task |
1663 |
> |
* @param joiner the joining worker |
1664 |
> |
* @param task the task |
1665 |
|
*/ |
1666 |
< |
final void tryAwaitJoin(ForkJoinTask<?> joinMe) { |
1667 |
< |
int s; |
1668 |
< |
Thread.interrupted(); // clear interrupts before checking termination |
1669 |
< |
if (joinMe.status >= 0) { |
1670 |
< |
if (tryPreBlock()) { |
1671 |
< |
joinMe.tryAwaitDone(0L); |
1672 |
< |
postBlock(); |
1666 |
> |
final void tryPollForAndExec(WorkQueue joiner, ForkJoinTask<?> task) { |
1667 |
> |
WorkQueue[] ws; |
1668 |
> |
int m = runState & SMASK; |
1669 |
> |
if ((ws = workQueues) != null && ws.length > m) { |
1670 |
> |
for (int j = 1; j <= m && task.status >= 0; j += 2) { |
1671 |
> |
WorkQueue q = ws[j]; |
1672 |
> |
if (q != null && q.pollFor(task)) { |
1673 |
> |
joiner.runSubtask(task); |
1674 |
> |
break; |
1675 |
> |
} |
1676 |
|
} |
995 |
– |
if ((ctl & STOP_BIT) != 0L) |
996 |
– |
joinMe.cancelIgnoringExceptions(); |
1677 |
|
} |
1678 |
|
} |
1679 |
|
|
1680 |
|
/** |
1681 |
< |
* Possibly blocks the given worker waiting for joinMe to |
1682 |
< |
* complete or timeout |
1683 |
< |
* |
1684 |
< |
* @param joinMe the task |
1685 |
< |
* @param millis the wait time for underlying Object.wait |
1686 |
< |
*/ |
1687 |
< |
final void timedAwaitJoin(ForkJoinTask<?> joinMe, long nanos) { |
1688 |
< |
while (joinMe.status >= 0) { |
1689 |
< |
Thread.interrupted(); |
1690 |
< |
if ((ctl & STOP_BIT) != 0L) { |
1691 |
< |
joinMe.cancelIgnoringExceptions(); |
1692 |
< |
break; |
1693 |
< |
} |
1694 |
< |
if (tryPreBlock()) { |
1695 |
< |
long last = System.nanoTime(); |
1696 |
< |
while (joinMe.status >= 0) { |
1697 |
< |
long millis = TimeUnit.NANOSECONDS.toMillis(nanos); |
1018 |
< |
if (millis <= 0) |
1019 |
< |
break; |
1020 |
< |
joinMe.tryAwaitDone(millis); |
1021 |
< |
if (joinMe.status < 0) |
1022 |
< |
break; |
1023 |
< |
if ((ctl & STOP_BIT) != 0L) { |
1024 |
< |
joinMe.cancelIgnoringExceptions(); |
1025 |
< |
break; |
1681 |
> |
* Returns a non-empty steal queue, if one is found during a random, |
1682 |
> |
* then cyclic scan, else null. This method must be retried by |
1683 |
> |
* caller if, by the time it tries to use the queue, it is empty. |
1684 |
> |
*/ |
1685 |
> |
private WorkQueue findNonEmptyStealQueue(WorkQueue w) { |
1686 |
> |
int r = w.seed; // Same idea as scan(), but ignoring submissions |
1687 |
> |
for (WorkQueue[] ws;;) { |
1688 |
> |
int m = runState & SMASK; |
1689 |
> |
if ((ws = workQueues) == null) |
1690 |
> |
return null; |
1691 |
> |
if (ws.length > m) { |
1692 |
> |
WorkQueue q; |
1693 |
> |
for (int n = m << 2, k = r, j = -n;;) { |
1694 |
> |
r ^= r << 13; r ^= r >>> 17; r ^= r << 5; |
1695 |
> |
if ((q = ws[(k | 1) & m]) != null && q.base - q.top < 0) { |
1696 |
> |
w.seed = r; |
1697 |
> |
return q; |
1698 |
|
} |
1699 |
< |
long now = System.nanoTime(); |
1700 |
< |
nanos -= now - last; |
1701 |
< |
last = now; |
1699 |
> |
else if (j > n) |
1700 |
> |
return null; |
1701 |
> |
else |
1702 |
> |
k = (j++ < 0) ? r : k + ((m >>> 1) | 1); |
1703 |
> |
|
1704 |
|
} |
1031 |
– |
postBlock(); |
1032 |
– |
break; |
1705 |
|
} |
1706 |
|
} |
1707 |
|
} |
1708 |
|
|
1709 |
|
/** |
1710 |
< |
* If necessary, compensates for blocker, and blocks |
1711 |
< |
*/ |
1712 |
< |
private void awaitBlocker(ManagedBlocker blocker) |
1713 |
< |
throws InterruptedException { |
1714 |
< |
while (!blocker.isReleasable()) { |
1715 |
< |
if (tryPreBlock()) { |
1716 |
< |
try { |
1717 |
< |
do {} while (!blocker.isReleasable() && !blocker.block()); |
1718 |
< |
} finally { |
1719 |
< |
postBlock(); |
1710 |
> |
* Runs tasks until {@code isQuiescent()}. We piggyback on |
1711 |
> |
* active count ctl maintenance, but rather than blocking |
1712 |
> |
* when tasks cannot be found, we rescan until all others cannot |
1713 |
> |
* find tasks either. |
1714 |
> |
*/ |
1715 |
> |
final void helpQuiescePool(WorkQueue w) { |
1716 |
> |
for (boolean active = true;;) { |
1717 |
> |
w.runLocalTasks(); // exhaust local queue |
1718 |
> |
WorkQueue q = findNonEmptyStealQueue(w); |
1719 |
> |
if (q != null) { |
1720 |
> |
ForkJoinTask<?> t; |
1721 |
> |
if (!active) { // re-establish active count |
1722 |
> |
long c; |
1723 |
> |
active = true; |
1724 |
> |
do {} while (!U.compareAndSwapLong |
1725 |
> |
(this, CTL, c = ctl, c + AC_UNIT)); |
1726 |
> |
} |
1727 |
> |
if ((t = q.poll()) != null) |
1728 |
> |
w.runSubtask(t); |
1729 |
> |
} |
1730 |
> |
else { |
1731 |
> |
long c; |
1732 |
> |
if (active) { // decrement active count without queuing |
1733 |
> |
active = false; |
1734 |
> |
do {} while (!U.compareAndSwapLong |
1735 |
> |
(this, CTL, c = ctl, c -= AC_UNIT)); |
1736 |
> |
} |
1737 |
> |
else |
1738 |
> |
c = ctl; // re-increment on exit |
1739 |
> |
if ((int)(c >> AC_SHIFT) + parallelism == 0) { |
1740 |
> |
do {} while (!U.compareAndSwapLong |
1741 |
> |
(this, CTL, c = ctl, c + AC_UNIT)); |
1742 |
> |
break; |
1743 |
|
} |
1049 |
– |
break; |
1744 |
|
} |
1745 |
|
} |
1746 |
|
} |
1747 |
|
|
1054 |
– |
// Creating, registering and deregistring workers |
1055 |
– |
|
1748 |
|
/** |
1749 |
< |
* Tries to create and start a worker; minimally rolls back counts |
1750 |
< |
* on failure. |
1749 |
> |
* Gets and removes a local or stolen task for the given worker. |
1750 |
> |
* |
1751 |
> |
* @return a task, if available |
1752 |
|
*/ |
1753 |
< |
private void addWorker() { |
1754 |
< |
Throwable ex = null; |
1755 |
< |
ForkJoinWorkerThread t = null; |
1756 |
< |
try { |
1757 |
< |
t = factory.newThread(this); |
1758 |
< |
} catch (Throwable e) { |
1759 |
< |
ex = e; |
1760 |
< |
} |
1761 |
< |
if (t == null) { // null or exceptional factory return |
1069 |
< |
long c; // adjust counts |
1070 |
< |
do {} while (!UNSAFE.compareAndSwapLong |
1071 |
< |
(this, ctlOffset, c = ctl, |
1072 |
< |
(((c - AC_UNIT) & AC_MASK) | |
1073 |
< |
((c - TC_UNIT) & TC_MASK) | |
1074 |
< |
(c & ~(AC_MASK|TC_MASK))))); |
1075 |
< |
// Propagate exception if originating from an external caller |
1076 |
< |
if (!tryTerminate(false) && ex != null && |
1077 |
< |
!(Thread.currentThread() instanceof ForkJoinWorkerThread)) |
1078 |
< |
UNSAFE.throwException(ex); |
1753 |
> |
final ForkJoinTask<?> nextTaskFor(WorkQueue w) { |
1754 |
> |
for (ForkJoinTask<?> t;;) { |
1755 |
> |
WorkQueue q; |
1756 |
> |
if ((t = w.nextLocalTask()) != null) |
1757 |
> |
return t; |
1758 |
> |
if ((q = findNonEmptyStealQueue(w)) == null) |
1759 |
> |
return null; |
1760 |
> |
if ((t = q.poll()) != null) |
1761 |
> |
return t; |
1762 |
|
} |
1080 |
– |
else |
1081 |
– |
t.start(); |
1763 |
|
} |
1764 |
|
|
1765 |
|
/** |
1766 |
< |
* Callback from ForkJoinWorkerThread constructor to assign a |
1767 |
< |
* public name |
1766 |
> |
* Returns the approximate (non-atomic) number of idle threads per |
1767 |
> |
* active thread to offset steal queue size for method |
1768 |
> |
* ForkJoinTask.getSurplusQueuedTaskCount(). |
1769 |
|
*/ |
1770 |
< |
final String nextWorkerName() { |
1771 |
< |
for (int n;;) { |
1772 |
< |
if (UNSAFE.compareAndSwapInt(this, nextWorkerNumberOffset, |
1773 |
< |
n = nextWorkerNumber, ++n)) |
1774 |
< |
return workerNamePrefix + n; |
1775 |
< |
} |
1770 |
> |
final int idlePerActive() { |
1771 |
> |
// Approximate at powers of two for small values, saturate past 4 |
1772 |
> |
int p = parallelism; |
1773 |
> |
int a = p + (int)(ctl >> AC_SHIFT); |
1774 |
> |
return (a > (p >>>= 1) ? 0 : |
1775 |
> |
a > (p >>>= 1) ? 1 : |
1776 |
> |
a > (p >>>= 1) ? 2 : |
1777 |
> |
a > (p >>>= 1) ? 4 : |
1778 |
> |
8); |
1779 |
|
} |
1780 |
|
|
1781 |
< |
/** |
1097 |
< |
* Callback from ForkJoinWorkerThread constructor to |
1098 |
< |
* determine its poolIndex and record in workers array. |
1099 |
< |
* |
1100 |
< |
* @param w the worker |
1101 |
< |
* @return the worker's pool index |
1102 |
< |
*/ |
1103 |
< |
final int registerWorker(ForkJoinWorkerThread w) { |
1104 |
< |
/* |
1105 |
< |
* In the typical case, a new worker acquires the lock, uses |
1106 |
< |
* next available index and returns quickly. Since we should |
1107 |
< |
* not block callers (ultimately from signalWork or |
1108 |
< |
* tryPreBlock) waiting for the lock needed to do this, we |
1109 |
< |
* instead help release other workers while waiting for the |
1110 |
< |
* lock. |
1111 |
< |
*/ |
1112 |
< |
for (int g;;) { |
1113 |
< |
ForkJoinWorkerThread[] ws; |
1114 |
< |
if (((g = scanGuard) & SG_UNIT) == 0 && |
1115 |
< |
UNSAFE.compareAndSwapInt(this, scanGuardOffset, |
1116 |
< |
g, g | SG_UNIT)) { |
1117 |
< |
int k = nextWorkerIndex; |
1118 |
< |
try { |
1119 |
< |
if ((ws = workers) != null) { // ignore on shutdown |
1120 |
< |
int n = ws.length; |
1121 |
< |
if (k < 0 || k >= n || ws[k] != null) { |
1122 |
< |
for (k = 0; k < n && ws[k] != null; ++k) |
1123 |
< |
; |
1124 |
< |
if (k == n) |
1125 |
< |
ws = workers = Arrays.copyOf(ws, n << 1); |
1126 |
< |
} |
1127 |
< |
ws[k] = w; |
1128 |
< |
nextWorkerIndex = k + 1; |
1129 |
< |
int m = g & SMASK; |
1130 |
< |
g = k >= m? ((m << 1) + 1) & SMASK : g + (SG_UNIT<<1); |
1131 |
< |
} |
1132 |
< |
} finally { |
1133 |
< |
scanGuard = g; |
1134 |
< |
} |
1135 |
< |
return k; |
1136 |
< |
} |
1137 |
< |
else if ((ws = workers) != null) { // help release others |
1138 |
< |
for (ForkJoinWorkerThread u : ws) { |
1139 |
< |
if (u != null && u.queueBase != u.queueTop) { |
1140 |
< |
if (tryReleaseWaiter()) |
1141 |
< |
break; |
1142 |
< |
} |
1143 |
< |
} |
1144 |
< |
} |
1145 |
< |
} |
1146 |
< |
} |
1781 |
> |
// Termination |
1782 |
|
|
1783 |
|
/** |
1784 |
< |
* Final callback from terminating worker. Removes record of |
1150 |
< |
* worker from array, and adjusts counts. If pool is shutting |
1151 |
< |
* down, tries to complete termination. |
1152 |
< |
* |
1153 |
< |
* @param w the worker |
1784 |
> |
* Sets SHUTDOWN bit of runState under lock |
1785 |
|
*/ |
1786 |
< |
final void deregisterWorker(ForkJoinWorkerThread w, Throwable ex) { |
1787 |
< |
int idx = w.poolIndex; |
1788 |
< |
int sc = w.stealCount; |
1789 |
< |
int steps = 0; |
1790 |
< |
// Remove from array, adjust worker counts and collect steal count. |
1791 |
< |
// We can intermix failed removes or adjusts with steal updates |
1161 |
< |
do { |
1162 |
< |
long s, c; |
1163 |
< |
int g; |
1164 |
< |
if (steps == 0 && ((g = scanGuard) & SG_UNIT) == 0 && |
1165 |
< |
UNSAFE.compareAndSwapInt(this, scanGuardOffset, |
1166 |
< |
g, g |= SG_UNIT)) { |
1167 |
< |
ForkJoinWorkerThread[] ws = workers; |
1168 |
< |
if (ws != null && idx >= 0 && |
1169 |
< |
idx < ws.length && ws[idx] == w) |
1170 |
< |
ws[idx] = null; // verify |
1171 |
< |
nextWorkerIndex = idx; |
1172 |
< |
scanGuard = g + SG_UNIT; |
1173 |
< |
steps = 1; |
1174 |
< |
} |
1175 |
< |
if (steps == 1 && |
1176 |
< |
UNSAFE.compareAndSwapLong(this, ctlOffset, c = ctl, |
1177 |
< |
(((c - AC_UNIT) & AC_MASK) | |
1178 |
< |
((c - TC_UNIT) & TC_MASK) | |
1179 |
< |
(c & ~(AC_MASK|TC_MASK))))) |
1180 |
< |
steps = 2; |
1181 |
< |
if (sc != 0 && |
1182 |
< |
UNSAFE.compareAndSwapLong(this, stealCountOffset, |
1183 |
< |
s = stealCount, s + sc)) |
1184 |
< |
sc = 0; |
1185 |
< |
} while (steps != 2 || sc != 0); |
1186 |
< |
if (!tryTerminate(false)) { |
1187 |
< |
if (ex != null) // possibly replace if died abnormally |
1188 |
< |
signalWork(); |
1189 |
< |
else |
1190 |
< |
tryReleaseWaiter(); |
1786 |
> |
private void enableShutdown() { |
1787 |
> |
ReentrantLock lock = this.lock; |
1788 |
> |
if (runState >= 0) { |
1789 |
> |
lock.lock(); // don't need try/finally |
1790 |
> |
runState |= SHUTDOWN; |
1791 |
> |
lock.unlock(); |
1792 |
|
} |
1793 |
|
} |
1794 |
|
|
1194 |
– |
// Shutdown and termination |
1195 |
– |
|
1795 |
|
/** |
1796 |
< |
* Possibly initiates and/or completes termination. |
1796 |
> |
* Possibly initiates and/or completes termination. Upon |
1797 |
> |
* termination, cancels all queued tasks and then |
1798 |
|
* |
1799 |
|
* @param now if true, unconditionally terminate, else only |
1800 |
< |
* if shutdown and empty queue and no active workers |
1800 |
> |
* if no work and no active workers |
1801 |
|
* @return true if now terminating or terminated |
1802 |
|
*/ |
1803 |
|
private boolean tryTerminate(boolean now) { |
1804 |
< |
long c; |
1805 |
< |
while (((c = ctl) & STOP_BIT) == 0) { |
1804 |
> |
for (long c;;) { |
1805 |
> |
if (((c = ctl) & STOP_BIT) != 0) { // already terminating |
1806 |
> |
if ((short)(c >>> TC_SHIFT) == -parallelism) { |
1807 |
> |
ReentrantLock lock = this.lock; // signal when no workers |
1808 |
> |
lock.lock(); // don't need try/finally |
1809 |
> |
termination.signalAll(); // signal when 0 workers |
1810 |
> |
lock.unlock(); |
1811 |
> |
} |
1812 |
> |
return true; |
1813 |
> |
} |
1814 |
|
if (!now) { |
1815 |
< |
if ((int)(c >> AC_SHIFT) != -parallelism) |
1815 |
> |
if ((int)(c >> AC_SHIFT) != -parallelism || runState >= 0 || |
1816 |
> |
hasQueuedSubmissions()) |
1817 |
|
return false; |
1818 |
< |
if (!shutdown || blockedCount != 0 || quiescerCount != 0 || |
1819 |
< |
queueTop - queueBase > 0) { |
1820 |
< |
if (ctl == c) // staleness check |
1821 |
< |
return false; |
1822 |
< |
continue; |
1818 |
> |
// Check for unqueued inactive workers. One pass suffices. |
1819 |
> |
WorkQueue[] ws = workQueues; WorkQueue w; |
1820 |
> |
if (ws != null) { |
1821 |
> |
int n = ws.length; |
1822 |
> |
for (int i = 1; i < n; i += 2) { |
1823 |
> |
if ((w = ws[i]) != null && w.eventCount >= 0) |
1824 |
> |
return false; |
1825 |
> |
} |
1826 |
|
} |
1827 |
|
} |
1828 |
< |
if (UNSAFE.compareAndSwapLong(this, ctlOffset, c, c | STOP_BIT)) |
1828 |
> |
if (U.compareAndSwapLong(this, CTL, c, c | STOP_BIT)) |
1829 |
|
startTerminating(); |
1830 |
|
} |
1219 |
– |
if ((short)(c >>> TC_SHIFT) == -parallelism) { |
1220 |
– |
submissionLock.lock(); |
1221 |
– |
termination.signalAll(); |
1222 |
– |
submissionLock.unlock(); |
1223 |
– |
} |
1224 |
– |
return true; |
1831 |
|
} |
1832 |
|
|
1833 |
|
/** |
1834 |
< |
* Runs up to three passes through workers: (0) Setting |
1835 |
< |
* termination status for each worker, followed by wakeups up |
1836 |
< |
* queued workers (1) helping cancel tasks (2) interrupting |
1837 |
< |
* lagging threads (likely in external tasks, but possibly also |
1838 |
< |
* blocked in joins). Each pass repeats previous steps because of |
1839 |
< |
* potential lagging thread creation. |
1834 |
> |
* Initiates termination: Runs three passes through workQueues: |
1835 |
> |
* (0) Setting termination status, followed by wakeups of queued |
1836 |
> |
* workers; (1) cancelling all tasks; (2) interrupting lagging |
1837 |
> |
* threads (likely in external tasks, but possibly also blocked in |
1838 |
> |
* joins). Each pass repeats previous steps because of potential |
1839 |
> |
* lagging thread creation. |
1840 |
|
*/ |
1841 |
|
private void startTerminating() { |
1236 |
– |
cancelSubmissions(); |
1842 |
|
for (int pass = 0; pass < 3; ++pass) { |
1843 |
< |
ForkJoinWorkerThread[] ws = workers; |
1843 |
> |
WorkQueue[] ws = workQueues; |
1844 |
|
if (ws != null) { |
1845 |
< |
for (ForkJoinWorkerThread w : ws) { |
1846 |
< |
if (w != null) { |
1847 |
< |
w.terminate = true; |
1845 |
> |
WorkQueue w; Thread wt; |
1846 |
> |
int n = ws.length; |
1847 |
> |
for (int j = 0; j < n; ++j) { |
1848 |
> |
if ((w = ws[j]) != null) { |
1849 |
> |
w.runState = -1; |
1850 |
|
if (pass > 0) { |
1851 |
< |
w.cancelTasks(); |
1852 |
< |
if (pass > 1 && !w.isInterrupted()) { |
1851 |
> |
w.cancelAll(); |
1852 |
> |
if (pass > 1 && (wt = w.owner) != null && |
1853 |
> |
!wt.isInterrupted()) { |
1854 |
|
try { |
1855 |
< |
w.interrupt(); |
1855 |
> |
wt.interrupt(); |
1856 |
|
} catch (SecurityException ignore) { |
1857 |
|
} |
1858 |
|
} |
1859 |
|
} |
1860 |
|
} |
1861 |
|
} |
1862 |
< |
terminateWaiters(); |
1863 |
< |
} |
1864 |
< |
} |
1865 |
< |
} |
1866 |
< |
|
1867 |
< |
/** |
1868 |
< |
* Polls and cancels all submissions. Called only during termination. |
1869 |
< |
*/ |
1870 |
< |
private void cancelSubmissions() { |
1871 |
< |
while (queueBase != queueTop) { |
1872 |
< |
ForkJoinTask<?> task = pollSubmission(); |
1873 |
< |
if (task != null) { |
1874 |
< |
try { |
1267 |
< |
task.cancel(false); |
1268 |
< |
} catch (Throwable ignore) { |
1269 |
< |
} |
1270 |
< |
} |
1271 |
< |
} |
1272 |
< |
} |
1273 |
< |
|
1274 |
< |
/** |
1275 |
< |
* Tries to set the termination status of waiting workers, and |
1276 |
< |
* then wake them up (after which they will terminate). |
1277 |
< |
*/ |
1278 |
< |
private void terminateWaiters() { |
1279 |
< |
ForkJoinWorkerThread[] ws = workers; |
1280 |
< |
if (ws != null) { |
1281 |
< |
ForkJoinWorkerThread w; long c; int i, e; |
1282 |
< |
int n = ws.length; |
1283 |
< |
while ((i = ~(e = (int)(c = ctl)) & SMASK) < n && |
1284 |
< |
(w = ws[i]) != null && w.eventCount == (e & E_MASK)) { |
1285 |
< |
if (UNSAFE.compareAndSwapLong(this, ctlOffset, c, |
1286 |
< |
(long)(w.nextWait & E_MASK) | |
1287 |
< |
((c + AC_UNIT) & AC_MASK) | |
1288 |
< |
(c & (TC_MASK|STOP_BIT)))) { |
1289 |
< |
w.terminate = true; |
1290 |
< |
w.eventCount = e + EC_UNIT; |
1291 |
< |
if (w.parked) |
1292 |
< |
UNSAFE.unpark(w); |
1862 |
> |
// Wake up workers parked on event queue |
1863 |
> |
int i, e; long c; Thread p; |
1864 |
> |
while ((i = ((~(e = (int)(c = ctl)) << 1) | 1) & SMASK) < n && |
1865 |
> |
(w = ws[i]) != null && |
1866 |
> |
w.eventCount == (e | INT_SIGN)) { |
1867 |
> |
long nc = ((long)(w.nextWait & E_MASK) | |
1868 |
> |
((c + AC_UNIT) & AC_MASK) | |
1869 |
> |
(c & (TC_MASK|STOP_BIT))); |
1870 |
> |
if (U.compareAndSwapLong(this, CTL, c, nc)) { |
1871 |
> |
w.eventCount = (e + E_SEQ) & E_MASK; |
1872 |
> |
if ((p = w.parker) != null) |
1873 |
> |
U.unpark(p); |
1874 |
> |
} |
1875 |
|
} |
1876 |
|
} |
1877 |
|
} |
1878 |
|
} |
1879 |
|
|
1298 |
– |
// misc ForkJoinWorkerThread support |
1299 |
– |
|
1300 |
– |
/** |
1301 |
– |
* Increment or decrement quiescerCount. Needed only to prevent |
1302 |
– |
* triggering shutdown if a worker is transiently inactive while |
1303 |
– |
* checking quiescence. |
1304 |
– |
* |
1305 |
– |
* @param delta 1 for increment, -1 for decrement |
1306 |
– |
*/ |
1307 |
– |
final void addQuiescerCount(int delta) { |
1308 |
– |
int c; |
1309 |
– |
do {} while(!UNSAFE.compareAndSwapInt(this, quiescerCountOffset, |
1310 |
– |
c = quiescerCount, c + delta)); |
1311 |
– |
} |
1312 |
– |
|
1313 |
– |
/** |
1314 |
– |
* Directly increment or decrement active count without |
1315 |
– |
* queuing. This method is used to transiently assert inactivation |
1316 |
– |
* while checking quiescence. |
1317 |
– |
* |
1318 |
– |
* @param delta 1 for increment, -1 for decrement |
1319 |
– |
*/ |
1320 |
– |
final void addActiveCount(int delta) { |
1321 |
– |
long d = delta < 0 ? -AC_UNIT : AC_UNIT; |
1322 |
– |
long c; |
1323 |
– |
do {} while (!UNSAFE.compareAndSwapLong(this, ctlOffset, c = ctl, |
1324 |
– |
((c + d) & AC_MASK) | |
1325 |
– |
(c & ~AC_MASK))); |
1326 |
– |
} |
1327 |
– |
|
1328 |
– |
/** |
1329 |
– |
* Returns the approximate (non-atomic) number of idle threads per |
1330 |
– |
* active thread. |
1331 |
– |
*/ |
1332 |
– |
final int idlePerActive() { |
1333 |
– |
// Approximate at powers of two for small values, saturate past 4 |
1334 |
– |
int p = parallelism; |
1335 |
– |
int a = p + (int)(ctl >> AC_SHIFT); |
1336 |
– |
return (a > (p >>>= 1) ? 0 : |
1337 |
– |
a > (p >>>= 1) ? 1 : |
1338 |
– |
a > (p >>>= 1) ? 2 : |
1339 |
– |
a > (p >>>= 1) ? 4 : |
1340 |
– |
8); |
1341 |
– |
} |
1342 |
– |
|
1880 |
|
// Exported methods |
1881 |
|
|
1882 |
|
// Constructors |
1951 |
|
this.parallelism = parallelism; |
1952 |
|
this.factory = factory; |
1953 |
|
this.ueh = handler; |
1954 |
< |
this.locallyFifo = asyncMode; |
1954 |
> |
this.localMode = asyncMode ? FIFO_QUEUE : LIFO_QUEUE; |
1955 |
> |
this.nextPoolIndex = 1; |
1956 |
|
long np = (long)(-parallelism); // offset ctl counts |
1957 |
|
this.ctl = ((np << AC_SHIFT) & AC_MASK) | ((np << TC_SHIFT) & TC_MASK); |
1958 |
< |
this.submissionQueue = new ForkJoinTask<?>[INITIAL_QUEUE_CAPACITY]; |
1421 |
< |
// initialize workers array with room for 2*parallelism if possible |
1958 |
> |
// initialize workQueues array with room for 2*parallelism if possible |
1959 |
|
int n = parallelism << 1; |
1960 |
|
if (n >= MAX_ID) |
1961 |
|
n = MAX_ID; |
1962 |
|
else { // See Hackers Delight, sec 3.2, where n < (1 << 16) |
1963 |
|
n |= n >>> 1; n |= n >>> 2; n |= n >>> 4; n |= n >>> 8; |
1964 |
|
} |
1965 |
< |
workers = new ForkJoinWorkerThread[n + 1]; |
1966 |
< |
this.submissionLock = new ReentrantLock(); |
1967 |
< |
this.termination = submissionLock.newCondition(); |
1965 |
> |
this.workQueues = new WorkQueue[(n + 1) << 1]; |
1966 |
> |
ReentrantLock lck = this.lock = new ReentrantLock(); |
1967 |
> |
this.termination = lck.newCondition(); |
1968 |
> |
this.stealCount = new AtomicLong(); |
1969 |
> |
this.nextWorkerNumber = new AtomicInteger(); |
1970 |
|
StringBuilder sb = new StringBuilder("ForkJoinPool-"); |
1971 |
|
sb.append(poolNumberGenerator.incrementAndGet()); |
1972 |
|
sb.append("-worker-"); |
1973 |
|
this.workerNamePrefix = sb.toString(); |
1974 |
+ |
// Create initial submission queue |
1975 |
+ |
WorkQueue sq = tryAddSharedQueue(0); |
1976 |
+ |
if (sq != null) |
1977 |
+ |
sq.growArray(false); |
1978 |
|
} |
1979 |
|
|
1980 |
|
// Execution methods |
1996 |
|
* scheduled for execution |
1997 |
|
*/ |
1998 |
|
public <T> T invoke(ForkJoinTask<T> task) { |
1999 |
< |
Thread t = Thread.currentThread(); |
2000 |
< |
if (task == null) |
1458 |
< |
throw new NullPointerException(); |
1459 |
< |
if (shutdown) |
1460 |
< |
throw new RejectedExecutionException(); |
1461 |
< |
if ((t instanceof ForkJoinWorkerThread) && |
1462 |
< |
((ForkJoinWorkerThread)t).pool == this) |
1463 |
< |
return task.invoke(); // bypass submit if in same pool |
1464 |
< |
else { |
1465 |
< |
addSubmission(task); |
1466 |
< |
return task.join(); |
1467 |
< |
} |
1468 |
< |
} |
1469 |
< |
|
1470 |
< |
/** |
1471 |
< |
* Unless terminating, forks task if within an ongoing FJ |
1472 |
< |
* computation in the current pool, else submits as external task. |
1473 |
< |
*/ |
1474 |
< |
private <T> void forkOrSubmit(ForkJoinTask<T> task) { |
1475 |
< |
ForkJoinWorkerThread w; |
1476 |
< |
Thread t = Thread.currentThread(); |
1477 |
< |
if (shutdown) |
1478 |
< |
throw new RejectedExecutionException(); |
1479 |
< |
if ((t instanceof ForkJoinWorkerThread) && |
1480 |
< |
(w = (ForkJoinWorkerThread)t).pool == this) |
1481 |
< |
w.pushTask(task); |
1482 |
< |
else |
1483 |
< |
addSubmission(task); |
1999 |
> |
doSubmit(task); |
2000 |
> |
return task.join(); |
2001 |
|
} |
2002 |
|
|
2003 |
|
/** |
2009 |
|
* scheduled for execution |
2010 |
|
*/ |
2011 |
|
public void execute(ForkJoinTask<?> task) { |
2012 |
< |
if (task == null) |
1496 |
< |
throw new NullPointerException(); |
1497 |
< |
forkOrSubmit(task); |
2012 |
> |
doSubmit(task); |
2013 |
|
} |
2014 |
|
|
2015 |
|
// AbstractExecutorService methods |
2027 |
|
job = (ForkJoinTask<?>) task; |
2028 |
|
else |
2029 |
|
job = ForkJoinTask.adapt(task, null); |
2030 |
< |
forkOrSubmit(job); |
2030 |
> |
doSubmit(job); |
2031 |
|
} |
2032 |
|
|
2033 |
|
/** |
2040 |
|
* scheduled for execution |
2041 |
|
*/ |
2042 |
|
public <T> ForkJoinTask<T> submit(ForkJoinTask<T> task) { |
2043 |
< |
if (task == null) |
1529 |
< |
throw new NullPointerException(); |
1530 |
< |
forkOrSubmit(task); |
2043 |
> |
doSubmit(task); |
2044 |
|
return task; |
2045 |
|
} |
2046 |
|
|
2053 |
|
if (task == null) |
2054 |
|
throw new NullPointerException(); |
2055 |
|
ForkJoinTask<T> job = ForkJoinTask.adapt(task); |
2056 |
< |
forkOrSubmit(job); |
2056 |
> |
doSubmit(job); |
2057 |
|
return job; |
2058 |
|
} |
2059 |
|
|
2066 |
|
if (task == null) |
2067 |
|
throw new NullPointerException(); |
2068 |
|
ForkJoinTask<T> job = ForkJoinTask.adapt(task, result); |
2069 |
< |
forkOrSubmit(job); |
2069 |
> |
doSubmit(job); |
2070 |
|
return job; |
2071 |
|
} |
2072 |
|
|
2083 |
|
job = (ForkJoinTask<?>) task; |
2084 |
|
else |
2085 |
|
job = ForkJoinTask.adapt(task, null); |
2086 |
< |
forkOrSubmit(job); |
2086 |
> |
doSubmit(job); |
2087 |
|
return job; |
2088 |
|
} |
2089 |
|
|
2160 |
|
* @return {@code true} if this pool uses async mode |
2161 |
|
*/ |
2162 |
|
public boolean getAsyncMode() { |
2163 |
< |
return locallyFifo; |
2163 |
> |
return localMode != 0; |
2164 |
|
} |
2165 |
|
|
2166 |
|
/** |
2172 |
|
* @return the number of worker threads |
2173 |
|
*/ |
2174 |
|
public int getRunningThreadCount() { |
2175 |
< |
int r = parallelism + (int)(ctl >> AC_SHIFT); |
2176 |
< |
return r <= 0? 0 : r; // suppress momentarily negative values |
2175 |
> |
int rc = 0; |
2176 |
> |
WorkQueue[] ws; WorkQueue w; |
2177 |
> |
if ((ws = workQueues) != null) { |
2178 |
> |
int n = ws.length; |
2179 |
> |
for (int i = 1; i < n; i += 2) { |
2180 |
> |
Thread.State s; ForkJoinWorkerThread wt; |
2181 |
> |
if ((w = ws[i]) != null && (wt = w.owner) != null && |
2182 |
> |
w.eventCount >= 0 && |
2183 |
> |
(s = wt.getState()) != Thread.State.BLOCKED && |
2184 |
> |
s != Thread.State.WAITING && |
2185 |
> |
s != Thread.State.TIMED_WAITING) |
2186 |
> |
++rc; |
2187 |
> |
} |
2188 |
> |
} |
2189 |
> |
return rc; |
2190 |
|
} |
2191 |
|
|
2192 |
|
/** |
2197 |
|
* @return the number of active threads |
2198 |
|
*/ |
2199 |
|
public int getActiveThreadCount() { |
2200 |
< |
int r = parallelism + (int)(ctl >> AC_SHIFT) + blockedCount; |
2201 |
< |
return r <= 0? 0 : r; // suppress momentarily negative values |
2200 |
> |
int r = parallelism + (int)(ctl >> AC_SHIFT); |
2201 |
> |
return (r <= 0) ? 0 : r; // suppress momentarily negative values |
2202 |
|
} |
2203 |
|
|
2204 |
|
/** |
2213 |
|
* @return {@code true} if all threads are currently idle |
2214 |
|
*/ |
2215 |
|
public boolean isQuiescent() { |
2216 |
< |
return parallelism + (int)(ctl >> AC_SHIFT) + blockedCount == 0; |
2216 |
> |
return (int)(ctl >> AC_SHIFT) + parallelism == 0; |
2217 |
|
} |
2218 |
|
|
2219 |
|
/** |
2228 |
|
* @return the number of steals |
2229 |
|
*/ |
2230 |
|
public long getStealCount() { |
2231 |
< |
return stealCount; |
2231 |
> |
long count = stealCount.get(); |
2232 |
> |
WorkQueue[] ws; WorkQueue w; |
2233 |
> |
if ((ws = workQueues) != null) { |
2234 |
> |
int n = ws.length; |
2235 |
> |
for (int i = 1; i < n; i += 2) { |
2236 |
> |
if ((w = ws[i]) != null) |
2237 |
> |
count += w.totalSteals; |
2238 |
> |
} |
2239 |
> |
} |
2240 |
> |
return count; |
2241 |
|
} |
2242 |
|
|
2243 |
|
/** |
2252 |
|
*/ |
2253 |
|
public long getQueuedTaskCount() { |
2254 |
|
long count = 0; |
2255 |
< |
ForkJoinWorkerThread[] ws; |
2256 |
< |
if ((short)(ctl >>> TC_SHIFT) > -parallelism && |
2257 |
< |
(ws = workers) != null) { |
2258 |
< |
for (ForkJoinWorkerThread w : ws) |
2259 |
< |
if (w != null) |
2260 |
< |
count -= w.queueBase - w.queueTop; // must read base first |
2255 |
> |
WorkQueue[] ws; WorkQueue w; |
2256 |
> |
if ((ws = workQueues) != null) { |
2257 |
> |
int n = ws.length; |
2258 |
> |
for (int i = 1; i < n; i += 2) { |
2259 |
> |
if ((w = ws[i]) != null) |
2260 |
> |
count += w.queueSize(); |
2261 |
> |
} |
2262 |
|
} |
2263 |
|
return count; |
2264 |
|
} |
2265 |
|
|
2266 |
|
/** |
2267 |
|
* Returns an estimate of the number of tasks submitted to this |
2268 |
< |
* pool that have not yet begun executing. This meThod may take |
2268 |
> |
* pool that have not yet begun executing. This method may take |
2269 |
|
* time proportional to the number of submissions. |
2270 |
|
* |
2271 |
|
* @return the number of queued submissions |
2272 |
|
*/ |
2273 |
|
public int getQueuedSubmissionCount() { |
2274 |
< |
return -queueBase + queueTop; |
2274 |
> |
int count = 0; |
2275 |
> |
WorkQueue[] ws; WorkQueue w; |
2276 |
> |
if ((ws = workQueues) != null) { |
2277 |
> |
int n = ws.length; |
2278 |
> |
for (int i = 0; i < n; i += 2) { |
2279 |
> |
if ((w = ws[i]) != null) |
2280 |
> |
count += w.queueSize(); |
2281 |
> |
} |
2282 |
> |
} |
2283 |
> |
return count; |
2284 |
|
} |
2285 |
|
|
2286 |
|
/** |
2290 |
|
* @return {@code true} if there are any queued submissions |
2291 |
|
*/ |
2292 |
|
public boolean hasQueuedSubmissions() { |
2293 |
< |
return queueBase != queueTop; |
2293 |
> |
WorkQueue[] ws; WorkQueue w; |
2294 |
> |
if ((ws = workQueues) != null) { |
2295 |
> |
int n = ws.length; |
2296 |
> |
for (int i = 0; i < n; i += 2) { |
2297 |
> |
if ((w = ws[i]) != null && w.queueSize() != 0) |
2298 |
> |
return true; |
2299 |
> |
} |
2300 |
> |
} |
2301 |
> |
return false; |
2302 |
|
} |
2303 |
|
|
2304 |
|
/** |
2309 |
|
* @return the next submission, or {@code null} if none |
2310 |
|
*/ |
2311 |
|
protected ForkJoinTask<?> pollSubmission() { |
2312 |
< |
ForkJoinTask<?> t; ForkJoinTask<?>[] q; int b, i; |
2313 |
< |
while ((b = queueBase) != queueTop && |
2314 |
< |
(q = submissionQueue) != null && |
2315 |
< |
(i = (q.length - 1) & b) >= 0) { |
2316 |
< |
long u = (i << ASHIFT) + ABASE; |
2317 |
< |
if ((t = q[i]) != null && |
1765 |
< |
queueBase == b && |
1766 |
< |
UNSAFE.compareAndSwapObject(q, u, t, null)) { |
1767 |
< |
queueBase = b + 1; |
1768 |
< |
return t; |
2312 |
> |
WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t; |
2313 |
> |
if ((ws = workQueues) != null) { |
2314 |
> |
int n = ws.length; |
2315 |
> |
for (int i = 0; i < n; i += 2) { |
2316 |
> |
if ((w = ws[i]) != null && (t = w.poll()) != null) |
2317 |
> |
return t; |
2318 |
|
} |
2319 |
|
} |
2320 |
|
return null; |
2339 |
|
*/ |
2340 |
|
protected int drainTasksTo(Collection<? super ForkJoinTask<?>> c) { |
2341 |
|
int count = 0; |
2342 |
< |
while (queueBase != queueTop) { |
2343 |
< |
ForkJoinTask<?> t = pollSubmission(); |
2344 |
< |
if (t != null) { |
2345 |
< |
c.add(t); |
2346 |
< |
++count; |
2342 |
> |
WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t; |
2343 |
> |
if ((ws = workQueues) != null) { |
2344 |
> |
int n = ws.length; |
2345 |
> |
for (int i = 0; i < n; ++i) { |
2346 |
> |
if ((w = ws[i]) != null) { |
2347 |
> |
while ((t = w.poll()) != null) { |
2348 |
> |
c.add(t); |
2349 |
> |
++count; |
2350 |
> |
} |
2351 |
> |
} |
2352 |
|
} |
2353 |
|
} |
1800 |
– |
ForkJoinWorkerThread[] ws; |
1801 |
– |
if ((short)(ctl >>> TC_SHIFT) > -parallelism && |
1802 |
– |
(ws = workers) != null) { |
1803 |
– |
for (ForkJoinWorkerThread w : ws) |
1804 |
– |
if (w != null) |
1805 |
– |
count += w.drainTasksTo(c); |
1806 |
– |
} |
2354 |
|
return count; |
2355 |
|
} |
2356 |
|
|
2365 |
|
long st = getStealCount(); |
2366 |
|
long qt = getQueuedTaskCount(); |
2367 |
|
long qs = getQueuedSubmissionCount(); |
2368 |
+ |
int rc = getRunningThreadCount(); |
2369 |
|
int pc = parallelism; |
2370 |
|
long c = ctl; |
2371 |
|
int tc = pc + (short)(c >>> TC_SHIFT); |
2372 |
< |
int rc = pc + (int)(c >> AC_SHIFT); |
2373 |
< |
if (rc < 0) // ignore transient negative |
2374 |
< |
rc = 0; |
1827 |
< |
int ac = rc + blockedCount; |
2372 |
> |
int ac = pc + (int)(c >> AC_SHIFT); |
2373 |
> |
if (ac < 0) // ignore transient negative |
2374 |
> |
ac = 0; |
2375 |
|
String level; |
2376 |
|
if ((c & STOP_BIT) != 0) |
2377 |
< |
level = (tc == 0)? "Terminated" : "Terminating"; |
2377 |
> |
level = (tc == 0) ? "Terminated" : "Terminating"; |
2378 |
|
else |
2379 |
< |
level = shutdown? "Shutting down" : "Running"; |
2379 |
> |
level = runState < 0 ? "Shutting down" : "Running"; |
2380 |
|
return super.toString() + |
2381 |
|
"[" + level + |
2382 |
|
", parallelism = " + pc + |
2403 |
|
*/ |
2404 |
|
public void shutdown() { |
2405 |
|
checkPermission(); |
2406 |
< |
shutdown = true; |
2406 |
> |
enableShutdown(); |
2407 |
|
tryTerminate(false); |
2408 |
|
} |
2409 |
|
|
2425 |
|
*/ |
2426 |
|
public List<Runnable> shutdownNow() { |
2427 |
|
checkPermission(); |
2428 |
< |
shutdown = true; |
2428 |
> |
enableShutdown(); |
2429 |
|
tryTerminate(true); |
2430 |
|
return Collections.emptyList(); |
2431 |
|
} |
2461 |
|
} |
2462 |
|
|
2463 |
|
/** |
1917 |
– |
* Returns true if terminating or terminated. Used by ForkJoinWorkerThread. |
1918 |
– |
*/ |
1919 |
– |
final boolean isAtLeastTerminating() { |
1920 |
– |
return (ctl & STOP_BIT) != 0L; |
1921 |
– |
} |
1922 |
– |
|
1923 |
– |
/** |
2464 |
|
* Returns {@code true} if this pool has been shut down. |
2465 |
|
* |
2466 |
|
* @return {@code true} if this pool has been shut down |
2467 |
|
*/ |
2468 |
|
public boolean isShutdown() { |
2469 |
< |
return shutdown; |
2469 |
> |
return runState < 0; |
2470 |
|
} |
2471 |
|
|
2472 |
|
/** |
2483 |
|
public boolean awaitTermination(long timeout, TimeUnit unit) |
2484 |
|
throws InterruptedException { |
2485 |
|
long nanos = unit.toNanos(timeout); |
2486 |
< |
final ReentrantLock lock = this.submissionLock; |
2486 |
> |
final ReentrantLock lock = this.lock; |
2487 |
|
lock.lock(); |
2488 |
|
try { |
2489 |
|
for (;;) { |
2506 |
|
* {@code isReleasable} must return {@code true} if blocking is |
2507 |
|
* not necessary. Method {@code block} blocks the current thread |
2508 |
|
* if necessary (perhaps internally invoking {@code isReleasable} |
2509 |
< |
* before actually blocking). The unusual methods in this API |
2510 |
< |
* accommodate synchronizers that may, but don't usually, block |
2511 |
< |
* for long periods. Similarly, they allow more efficient internal |
2512 |
< |
* handling of cases in which additional workers may be, but |
2513 |
< |
* usually are not, needed to ensure sufficient parallelism. |
2514 |
< |
* Toward this end, implementations of method {@code isReleasable} |
2515 |
< |
* must be amenable to repeated invocation. |
2509 |
> |
* before actually blocking). These actions are performed by any |
2510 |
> |
* thread invoking {@link ForkJoinPool#managedBlock}. The |
2511 |
> |
* unusual methods in this API accommodate synchronizers that may, |
2512 |
> |
* but don't usually, block for long periods. Similarly, they |
2513 |
> |
* allow more efficient internal handling of cases in which |
2514 |
> |
* additional workers may be, but usually are not, needed to |
2515 |
> |
* ensure sufficient parallelism. Toward this end, |
2516 |
> |
* implementations of method {@code isReleasable} must be amenable |
2517 |
> |
* to repeated invocation. |
2518 |
|
* |
2519 |
|
* <p>For example, here is a ManagedBlocker based on a |
2520 |
|
* ReentrantLock: |
2594 |
|
public static void managedBlock(ManagedBlocker blocker) |
2595 |
|
throws InterruptedException { |
2596 |
|
Thread t = Thread.currentThread(); |
2597 |
< |
if (t instanceof ForkJoinWorkerThread) { |
2598 |
< |
ForkJoinWorkerThread w = (ForkJoinWorkerThread) t; |
2599 |
< |
w.pool.awaitBlocker(blocker); |
2600 |
< |
} |
2601 |
< |
else { |
2602 |
< |
do {} while (!blocker.isReleasable() && !blocker.block()); |
2597 |
> |
ForkJoinPool p = ((t instanceof ForkJoinWorkerThread) ? |
2598 |
> |
((ForkJoinWorkerThread)t).pool : null); |
2599 |
> |
while (!blocker.isReleasable()) { |
2600 |
> |
if (p == null || p.tryCompensate()) { |
2601 |
> |
try { |
2602 |
> |
do {} while (!blocker.isReleasable() && !blocker.block()); |
2603 |
> |
} finally { |
2604 |
> |
if (p != null) |
2605 |
> |
p.incrementActiveCount(); |
2606 |
> |
} |
2607 |
> |
break; |
2608 |
> |
} |
2609 |
|
} |
2610 |
|
} |
2611 |
|
|
2622 |
|
} |
2623 |
|
|
2624 |
|
// Unsafe mechanics |
2625 |
< |
private static final sun.misc.Unsafe UNSAFE; |
2626 |
< |
private static final long ctlOffset; |
2627 |
< |
private static final long stealCountOffset; |
2628 |
< |
private static final long blockedCountOffset; |
2081 |
< |
private static final long quiescerCountOffset; |
2082 |
< |
private static final long scanGuardOffset; |
2083 |
< |
private static final long nextWorkerNumberOffset; |
2084 |
< |
private static final long ABASE; |
2085 |
< |
private static final int ASHIFT; |
2625 |
> |
private static final sun.misc.Unsafe U; |
2626 |
> |
private static final long CTL; |
2627 |
> |
private static final long RUNSTATE; |
2628 |
> |
private static final long PARKBLOCKER; |
2629 |
|
|
2630 |
|
static { |
2631 |
|
poolNumberGenerator = new AtomicInteger(); |
2089 |
– |
workerSeedGenerator = new Random(); |
2632 |
|
modifyThreadPermission = new RuntimePermission("modifyThread"); |
2633 |
|
defaultForkJoinWorkerThreadFactory = |
2634 |
|
new DefaultForkJoinWorkerThreadFactory(); |
2635 |
|
int s; |
2636 |
|
try { |
2637 |
< |
UNSAFE = getUnsafe(); |
2638 |
< |
Class k = ForkJoinPool.class; |
2639 |
< |
ctlOffset = UNSAFE.objectFieldOffset |
2637 |
> |
U = getUnsafe(); |
2638 |
> |
Class<?> k = ForkJoinPool.class; |
2639 |
> |
Class<?> tk = Thread.class; |
2640 |
> |
CTL = U.objectFieldOffset |
2641 |
|
(k.getDeclaredField("ctl")); |
2642 |
< |
stealCountOffset = UNSAFE.objectFieldOffset |
2643 |
< |
(k.getDeclaredField("stealCount")); |
2644 |
< |
blockedCountOffset = UNSAFE.objectFieldOffset |
2645 |
< |
(k.getDeclaredField("blockedCount")); |
2103 |
< |
quiescerCountOffset = UNSAFE.objectFieldOffset |
2104 |
< |
(k.getDeclaredField("quiescerCount")); |
2105 |
< |
scanGuardOffset = UNSAFE.objectFieldOffset |
2106 |
< |
(k.getDeclaredField("scanGuard")); |
2107 |
< |
nextWorkerNumberOffset = UNSAFE.objectFieldOffset |
2108 |
< |
(k.getDeclaredField("nextWorkerNumber")); |
2109 |
< |
Class a = ForkJoinTask[].class; |
2110 |
< |
ABASE = UNSAFE.arrayBaseOffset(a); |
2111 |
< |
s = UNSAFE.arrayIndexScale(a); |
2642 |
> |
RUNSTATE = U.objectFieldOffset |
2643 |
> |
(k.getDeclaredField("runState")); |
2644 |
> |
PARKBLOCKER = U.objectFieldOffset |
2645 |
> |
(tk.getDeclaredField("parkBlocker")); |
2646 |
|
} catch (Exception e) { |
2647 |
|
throw new Error(e); |
2648 |
|
} |
2115 |
– |
if ((s & (s-1)) != 0) |
2116 |
– |
throw new Error("data type scale not a power of two"); |
2117 |
– |
ASHIFT = 31 - Integer.numberOfLeadingZeros(s); |
2649 |
|
} |
2650 |
|
|
2651 |
|
/** |
2675 |
|
} |
2676 |
|
} |
2677 |
|
} |
2678 |
+ |
|
2679 |
|
} |