1 |
|
/* |
2 |
|
* Written by Doug Lea with assistance from members of JCP JSR-166 |
3 |
|
* Expert Group and released to the public domain, as explained at |
4 |
< |
* http://creativecommons.org/licenses/publicdomain |
4 |
> |
* http://creativecommons.org/publicdomain/zero/1.0/ |
5 |
|
*/ |
6 |
|
|
7 |
|
package jsr166y; |
8 |
|
|
9 |
– |
import java.util.concurrent.*; |
10 |
– |
|
9 |
|
import java.util.ArrayList; |
10 |
|
import java.util.Arrays; |
11 |
|
import java.util.Collection; |
12 |
|
import java.util.Collections; |
13 |
|
import java.util.List; |
14 |
< |
import java.util.concurrent.locks.LockSupport; |
15 |
< |
import java.util.concurrent.locks.ReentrantLock; |
14 |
> |
import java.util.Random; |
15 |
> |
import java.util.concurrent.AbstractExecutorService; |
16 |
> |
import java.util.concurrent.Callable; |
17 |
> |
import java.util.concurrent.ExecutorService; |
18 |
> |
import java.util.concurrent.Future; |
19 |
> |
import java.util.concurrent.RejectedExecutionException; |
20 |
> |
import java.util.concurrent.RunnableFuture; |
21 |
> |
import java.util.concurrent.TimeUnit; |
22 |
|
import java.util.concurrent.atomic.AtomicInteger; |
23 |
< |
import java.util.concurrent.CountDownLatch; |
23 |
> |
import java.util.concurrent.atomic.AtomicLong; |
24 |
> |
import java.util.concurrent.locks.ReentrantLock; |
25 |
> |
import java.util.concurrent.locks.Condition; |
26 |
|
|
27 |
|
/** |
28 |
|
* An {@link ExecutorService} for running {@link ForkJoinTask}s. |
33 |
|
* <p>A {@code ForkJoinPool} differs from other kinds of {@link |
34 |
|
* ExecutorService} mainly by virtue of employing |
35 |
|
* <em>work-stealing</em>: all threads in the pool attempt to find and |
36 |
< |
* execute subtasks created by other active tasks (eventually blocking |
37 |
< |
* waiting for work if none exist). This enables efficient processing |
38 |
< |
* when most tasks spawn other subtasks (as do most {@code |
39 |
< |
* ForkJoinTask}s). When setting <em>asyncMode</em> to true in |
40 |
< |
* constructors, {@code ForkJoinPool}s may also be appropriate for use |
41 |
< |
* with event-style tasks that are never joined. |
36 |
> |
* execute tasks submitted to the pool and/or created by other active |
37 |
> |
* tasks (eventually blocking waiting for work if none exist). This |
38 |
> |
* enables efficient processing when most tasks spawn other subtasks |
39 |
> |
* (as do most {@code ForkJoinTask}s), as well as when many small |
40 |
> |
* tasks are submitted to the pool from external clients. Especially |
41 |
> |
* when setting <em>asyncMode</em> to true in constructors, {@code |
42 |
> |
* ForkJoinPool}s may also be appropriate for use with event-style |
43 |
> |
* tasks that are never joined. |
44 |
|
* |
45 |
|
* <p>A {@code ForkJoinPool} is constructed with a given target |
46 |
|
* parallelism level; by default, equal to the number of available |
61 |
|
* |
62 |
|
* <p> As is the case with other ExecutorServices, there are three |
63 |
|
* main task execution methods summarized in the following |
64 |
< |
* table. These are designed to be used by clients not already engaged |
65 |
< |
* in fork/join computations in the current pool. The main forms of |
66 |
< |
* these methods accept instances of {@code ForkJoinTask}, but |
67 |
< |
* overloaded forms also allow mixed execution of plain {@code |
68 |
< |
* Runnable}- or {@code Callable}- based activities as well. However, |
69 |
< |
* tasks that are already executing in a pool should normally |
70 |
< |
* <em>NOT</em> use these pool execution methods, but instead use the |
71 |
< |
* within-computation forms listed in the table. |
64 |
> |
* table. These are designed to be used primarily by clients not |
65 |
> |
* already engaged in fork/join computations in the current pool. The |
66 |
> |
* main forms of these methods accept instances of {@code |
67 |
> |
* ForkJoinTask}, but overloaded forms also allow mixed execution of |
68 |
> |
* plain {@code Runnable}- or {@code Callable}- based activities as |
69 |
> |
* well. However, tasks that are already executing in a pool should |
70 |
> |
* normally instead use the within-computation forms listed in the |
71 |
> |
* table unless using async event-style tasks that are not usually |
72 |
> |
* joined, in which case there is little difference among choice of |
73 |
> |
* methods. |
74 |
|
* |
75 |
|
* <table BORDER CELLPADDING=3 CELLSPACING=1> |
76 |
|
* <tr> |
79 |
|
* <td ALIGN=CENTER> <b>Call from within fork/join computations</b></td> |
80 |
|
* </tr> |
81 |
|
* <tr> |
82 |
< |
* <td> <b>Arange async execution</td> |
82 |
> |
* <td> <b>Arrange async execution</td> |
83 |
|
* <td> {@link #execute(ForkJoinTask)}</td> |
84 |
|
* <td> {@link ForkJoinTask#fork}</td> |
85 |
|
* </tr> |
105 |
|
* daemon} mode, there is typically no need to explicitly {@link |
106 |
|
* #shutdown} such a pool upon program exit. |
107 |
|
* |
108 |
< |
* <pre> |
108 |
> |
* <pre> {@code |
109 |
|
* static final ForkJoinPool mainPool = new ForkJoinPool(); |
110 |
|
* ... |
111 |
|
* public void sort(long[] array) { |
112 |
|
* mainPool.invoke(new SortTask(array, 0, array.length)); |
113 |
< |
* } |
104 |
< |
* </pre> |
113 |
> |
* }}</pre> |
114 |
|
* |
115 |
|
* <p><b>Implementation notes</b>: This implementation restricts the |
116 |
|
* maximum number of running threads to 32767. Attempts to create |
129 |
|
/* |
130 |
|
* Implementation Overview |
131 |
|
* |
132 |
< |
* This class provides the central bookkeeping and control for a |
133 |
< |
* set of worker threads: Submissions from non-FJ threads enter |
134 |
< |
* into a submission queue. Workers take these tasks and typically |
135 |
< |
* split them into subtasks that may be stolen by other workers. |
136 |
< |
* The main work-stealing mechanics implemented in class |
137 |
< |
* ForkJoinWorkerThread give first priority to processing tasks |
138 |
< |
* from their own queues (LIFO or FIFO, depending on mode), then |
139 |
< |
* to randomized FIFO steals of tasks in other worker queues, and |
140 |
< |
* lastly to new submissions. These mechanics do not consider |
141 |
< |
* affinities, loads, cache localities, etc, so rarely provide the |
142 |
< |
* best possible performance on a given machine, but portably |
143 |
< |
* provide good throughput by averaging over these factors. |
144 |
< |
* (Further, even if we did try to use such information, we do not |
145 |
< |
* usually have a basis for exploiting it. For example, some sets |
146 |
< |
* of tasks profit from cache affinities, but others are harmed by |
147 |
< |
* cache pollution effects.) |
148 |
< |
* |
149 |
< |
* Beyond work-stealing support and essential bookkeeping, the |
150 |
< |
* main responsibility of this framework is to take actions when |
151 |
< |
* one worker is waiting to join a task stolen (or always held by) |
152 |
< |
* another. Becauae we are multiplexing many tasks on to a pool |
153 |
< |
* of workers, we can't just let them block (as in Thread.join). |
154 |
< |
* We also cannot just reassign the joiner's run-time stack with |
155 |
< |
* another and replace it later, which would be a form of |
156 |
< |
* "continuation", that even if possible is not necessarily a good |
157 |
< |
* idea. Given that the creation costs of most threads on most |
158 |
< |
* systems mainly surrounds setting up runtime stacks, thread |
159 |
< |
* creation and switching is usually not much more expensive than |
160 |
< |
* stack creation and switching, and is more flexible). Instead we |
161 |
< |
* combine two tactics: |
132 |
> |
* This class and its nested classes provide the main |
133 |
> |
* functionality and control for a set of worker threads: |
134 |
> |
* Submissions from non-FJ threads enter into submission |
135 |
> |
* queues. Workers take these tasks and typically split them into |
136 |
> |
* subtasks that may be stolen by other workers. Preference rules |
137 |
> |
* give first priority to processing tasks from their own queues |
138 |
> |
* (LIFO or FIFO, depending on mode), then to randomized FIFO |
139 |
> |
* steals of tasks in other queues. |
140 |
> |
* |
141 |
> |
* WorkQueues. |
142 |
> |
* ========== |
143 |
> |
* |
144 |
> |
* Most operations occur within work-stealing queues (in nested |
145 |
> |
* class WorkQueue). These are special forms of Deques that |
146 |
> |
* support only three of the four possible end-operations -- push, |
147 |
> |
* pop, and poll (aka steal), under the further constraints that |
148 |
> |
* push and pop are called only from the owning thread (or, as |
149 |
> |
* extended here, under a lock), while poll may be called from |
150 |
> |
* other threads. (If you are unfamiliar with them, you probably |
151 |
> |
* want to read Herlihy and Shavit's book "The Art of |
152 |
> |
* Multiprocessor programming", chapter 16 describing these in |
153 |
> |
* more detail before proceeding.) The main work-stealing queue |
154 |
> |
* design is roughly similar to those in the papers "Dynamic |
155 |
> |
* Circular Work-Stealing Deque" by Chase and Lev, SPAA 2005 |
156 |
> |
* (http://research.sun.com/scalable/pubs/index.html) and |
157 |
> |
* "Idempotent work stealing" by Michael, Saraswat, and Vechev, |
158 |
> |
* PPoPP 2009 (http://portal.acm.org/citation.cfm?id=1504186). |
159 |
> |
* The main differences ultimately stem from gc requirements that |
160 |
> |
* we null out taken slots as soon as we can, to maintain as small |
161 |
> |
* a footprint as possible even in programs generating huge |
162 |
> |
* numbers of tasks. To accomplish this, we shift the CAS |
163 |
> |
* arbitrating pop vs poll (steal) from being on the indices |
164 |
> |
* ("base" and "top") to the slots themselves. So, both a |
165 |
> |
* successful pop and poll mainly entail a CAS of a slot from |
166 |
> |
* non-null to null. Because we rely on CASes of references, we |
167 |
> |
* do not need tag bits on base or top. They are simple ints as |
168 |
> |
* used in any circular array-based queue (see for example |
169 |
> |
* ArrayDeque). Updates to the indices must still be ordered in a |
170 |
> |
* way that guarantees that top == base means the queue is empty, |
171 |
> |
* but otherwise may err on the side of possibly making the queue |
172 |
> |
* appear nonempty when a push, pop, or poll have not fully |
173 |
> |
* committed. Note that this means that the poll operation, |
174 |
> |
* considered individually, is not wait-free. One thief cannot |
175 |
> |
* successfully continue until another in-progress one (or, if |
176 |
> |
* previously empty, a push) completes. However, in the |
177 |
> |
* aggregate, we ensure at least probabilistic non-blockingness. |
178 |
> |
* If an attempted steal fails, a thief always chooses a different |
179 |
> |
* random victim target to try next. So, in order for one thief to |
180 |
> |
* progress, it suffices for any in-progress poll or new push on |
181 |
> |
* any empty queue to complete. |
182 |
> |
* |
183 |
> |
* This approach also enables support of a user mode in which local |
184 |
> |
* task processing is in FIFO, not LIFO order, simply by using |
185 |
> |
* poll rather than pop. This can be useful in message-passing |
186 |
> |
* frameworks in which tasks are never joined. However neither |
187 |
> |
* mode considers affinities, loads, cache localities, etc, so |
188 |
> |
* rarely provide the best possible performance on a given |
189 |
> |
* machine, but portably provide good throughput by averaging over |
190 |
> |
* these factors. (Further, even if we did try to use such |
191 |
> |
* information, we do not usually have a basis for exploiting |
192 |
> |
* it. For example, some sets of tasks profit from cache |
193 |
> |
* affinities, but others are harmed by cache pollution effects.) |
194 |
> |
* |
195 |
> |
* WorkQueues are also used in a similar way for tasks submitted |
196 |
> |
* to the pool. We cannot mix these tasks in the same queues used |
197 |
> |
* for work-stealing (this would contaminate lifo/fifo |
198 |
> |
* processing). Instead, we loosely associate (via hashing) |
199 |
> |
* submission queues with submitting threads, and randomly scan |
200 |
> |
* these queues as well when looking for work. In essence, |
201 |
> |
* submitters act like workers except that they never take tasks, |
202 |
> |
* and they are multiplexed on to a finite number of shared work |
203 |
> |
* queues. However, classes are set up so that future extensions |
204 |
> |
* could allow submitters to optionally help perform tasks as |
205 |
> |
* well. Pool submissions from internal workers are also allowed, |
206 |
> |
* but use randomized rather than thread-hashed queue indices to |
207 |
> |
* avoid imbalance. Insertion of tasks in shared mode requires a |
208 |
> |
* lock (mainly to protect in the case of resizing) but we use |
209 |
> |
* only a simple spinlock (using bits in field runState), because |
210 |
> |
* submitters encountering a busy queue try or create others so |
211 |
> |
* never block. |
212 |
> |
* |
213 |
> |
* Management. |
214 |
> |
* ========== |
215 |
> |
* |
216 |
> |
* The main throughput advantages of work-stealing stem from |
217 |
> |
* decentralized control -- workers mostly take tasks from |
218 |
> |
* themselves or each other. We cannot negate this in the |
219 |
> |
* implementation of other management responsibilities. The main |
220 |
> |
* tactic for avoiding bottlenecks is packing nearly all |
221 |
> |
* essentially atomic control state into two volatile variables |
222 |
> |
* that are by far most often read (not written) as status and |
223 |
> |
* consistency checks |
224 |
> |
* |
225 |
> |
* Field "ctl" contains 64 bits holding all the information needed |
226 |
> |
* to atomically decide to add, inactivate, enqueue (on an event |
227 |
> |
* queue), dequeue, and/or re-activate workers. To enable this |
228 |
> |
* packing, we restrict maximum parallelism to (1<<15)-1 (which is |
229 |
> |
* far in excess of normal operating range) to allow ids, counts, |
230 |
> |
* and their negations (used for thresholding) to fit into 16bit |
231 |
> |
* fields. |
232 |
> |
* |
233 |
> |
* Field "runState" contains 32 bits needed to register and |
234 |
> |
* deregister WorkQueues, as well as to enable shutdown. It is |
235 |
> |
* only modified under a lock (normally briefly held, but |
236 |
> |
* occasionally protecting allocations and resizings) but even |
237 |
> |
* when locked remains available to check consistency. |
238 |
> |
* |
239 |
> |
* Recording WorkQueues. WorkQueues are recorded in the |
240 |
> |
* "workQueues" array that is created upon pool construction and |
241 |
> |
* expanded if necessary. Updates to the array while recording |
242 |
> |
* new workers and unrecording terminated ones are protected from |
243 |
> |
* each other by a lock but the array is otherwise concurrently |
244 |
> |
* readable, and accessed directly. To simplify index-based |
245 |
> |
* operations, the array size is always a power of two, and all |
246 |
> |
* readers must tolerate null slots. Shared (submission) queues |
247 |
> |
* are at even indices, worker queues at odd indices. Grouping |
248 |
> |
* them together in this way simplifies and speeds up task |
249 |
> |
* scanning. To avoid flailing during start-up, the array is |
250 |
> |
* presized to hold twice #parallelism workers (which is unlikely |
251 |
> |
* to need further resizing during execution). But to avoid |
252 |
> |
* dealing with so many null slots, variable runState includes a |
253 |
> |
* mask for the nearest power of two that contains all current |
254 |
> |
* workers. All worker thread creation is on-demand, triggered by |
255 |
> |
* task submissions, replacement of terminated workers, and/or |
256 |
> |
* compensation for blocked workers. However, all other support |
257 |
> |
* code is set up to work with other policies. To ensure that we |
258 |
> |
* do not hold on to worker references that would prevent GC, ALL |
259 |
> |
* accesses to workQueues are via indices into the workQueues |
260 |
> |
* array (which is one source of some of the messy code |
261 |
> |
* constructions here). In essence, the workQueues array serves as |
262 |
> |
* a weak reference mechanism. Thus for example the wait queue |
263 |
> |
* field of ctl stores indices, not references. Access to the |
264 |
> |
* workQueues in associated methods (for example signalWork) must |
265 |
> |
* both index-check and null-check the IDs. All such accesses |
266 |
> |
* ignore bad IDs by returning out early from what they are doing, |
267 |
> |
* since this can only be associated with termination, in which |
268 |
> |
* case it is OK to give up. |
269 |
> |
* |
270 |
> |
* All uses of the workQueues array check that it is non-null |
271 |
> |
* (even if previously non-null). This allows nulling during |
272 |
> |
* termination, which is currently not necessary, but remains an |
273 |
> |
* option for resource-revocation-based shutdown schemes. It also |
274 |
> |
* helps reduce JIT issuance of uncommon-trap code, which tends to |
275 |
> |
* unnecessarily complicate control flow in some methods. |
276 |
> |
* |
277 |
> |
* Event Queuing. Unlike HPC work-stealing frameworks, we cannot |
278 |
> |
* let workers spin indefinitely scanning for tasks when none can |
279 |
> |
* be found immediately, and we cannot start/resume workers unless |
280 |
> |
* there appear to be tasks available. On the other hand, we must |
281 |
> |
* quickly prod them into action when new tasks are submitted or |
282 |
> |
* generated. In many usages, ramp-up time to activate workers is |
283 |
> |
* the main limiting factor in overall performance (this is |
284 |
> |
* compounded at program start-up by JIT compilation and |
285 |
> |
* allocation). So we try to streamline this as much as possible. |
286 |
> |
* We park/unpark workers after placing in an event wait queue |
287 |
> |
* when they cannot find work. This "queue" is actually a simple |
288 |
> |
* Treiber stack, headed by the "id" field of ctl, plus a 15bit |
289 |
> |
* counter value (that reflects the number of times a worker has |
290 |
> |
* been inactivated) to avoid ABA effects (we need only as many |
291 |
> |
* version numbers as worker threads). Successors are held in |
292 |
> |
* field WorkQueue.nextWait. Queuing deals with several intrinsic |
293 |
> |
* races, mainly that a task-producing thread can miss seeing (and |
294 |
> |
* signalling) another thread that gave up looking for work but |
295 |
> |
* has not yet entered the wait queue. We solve this by requiring |
296 |
> |
* a full sweep of all workers (via repeated calls to method |
297 |
> |
* scan()) both before and after a newly waiting worker is added |
298 |
> |
* to the wait queue. During a rescan, the worker might release |
299 |
> |
* some other queued worker rather than itself, which has the same |
300 |
> |
* net effect. Because enqueued workers may actually be rescanning |
301 |
> |
* rather than waiting, we set and clear the "parker" field of |
302 |
> |
* Workqueues to reduce unnecessary calls to unpark. (This |
303 |
> |
* requires a secondary recheck to avoid missed signals.) Note |
304 |
> |
* the unusual conventions about Thread.interrupts surrounding |
305 |
> |
* parking and other blocking: Because interrupts are used solely |
306 |
> |
* to alert threads to check termination, which is checked anyway |
307 |
> |
* upon blocking, we clear status (using Thread.interrupted) |
308 |
> |
* before any call to park, so that park does not immediately |
309 |
> |
* return due to status being set via some other unrelated call to |
310 |
> |
* interrupt in user code. |
311 |
> |
* |
312 |
> |
* Signalling. We create or wake up workers only when there |
313 |
> |
* appears to be at least one task they might be able to find and |
314 |
> |
* execute. When a submission is added or another worker adds a |
315 |
> |
* task to a queue that previously had fewer than two tasks, they |
316 |
> |
* signal waiting workers (or trigger creation of new ones if |
317 |
> |
* fewer than the given parallelism level -- see signalWork). |
318 |
> |
* These primary signals are buttressed by signals during rescans; |
319 |
> |
* together these cover the signals needed in cases when more |
320 |
> |
* tasks are pushed but untaken, and improve performance compared |
321 |
> |
* to having one thread wake up all workers. |
322 |
> |
* |
323 |
> |
* Trimming workers. To release resources after periods of lack of |
324 |
> |
* use, a worker starting to wait when the pool is quiescent will |
325 |
> |
* time out and terminate if the pool has remained quiescent for |
326 |
> |
* SHRINK_RATE nanosecs. This will slowly propagate, eventually |
327 |
> |
* terminating all workers after long periods of non-use. |
328 |
> |
* |
329 |
> |
* Shutdown and Termination. A call to shutdownNow atomically sets |
330 |
> |
* a runState bit and then (non-atomically) sets each workers |
331 |
> |
* runState status, cancels all unprocessed tasks, and wakes up |
332 |
> |
* all waiting workers. Detecting whether termination should |
333 |
> |
* commence after a non-abrupt shutdown() call requires more work |
334 |
> |
* and bookkeeping. We need consensus about quiescence (i.e., that |
335 |
> |
* there is no more work). The active count provides a primary |
336 |
> |
* indication but non-abrupt shutdown still requires a rechecking |
337 |
> |
* scan for any workers that are inactive but not queued. |
338 |
> |
* |
339 |
> |
* Joining Tasks. |
340 |
> |
* ============== |
341 |
> |
* |
342 |
> |
* Any of several actions may be taken when one worker is waiting |
343 |
> |
* to join a task stolen (or always held by) another. Because we |
344 |
> |
* are multiplexing many tasks on to a pool of workers, we can't |
345 |
> |
* just let them block (as in Thread.join). We also cannot just |
346 |
> |
* reassign the joiner's run-time stack with another and replace |
347 |
> |
* it later, which would be a form of "continuation", that even if |
348 |
> |
* possible is not necessarily a good idea since we sometimes need |
349 |
> |
* both an unblocked task and its continuation to |
350 |
> |
* progress. Instead we combine two tactics: |
351 |
|
* |
352 |
|
* Helping: Arranging for the joiner to execute some task that it |
353 |
< |
* would be running if the steal had not occurred. Method |
156 |
< |
* ForkJoinWorkerThread.helpJoinTask tracks joining->stealing |
157 |
< |
* links to try to find such a task. |
353 |
> |
* would be running if the steal had not occurred. |
354 |
|
* |
355 |
|
* Compensating: Unless there are already enough live threads, |
356 |
< |
* method helpMaintainParallelism() may create or or |
357 |
< |
* re-activate a spare thread to compensate for blocked |
162 |
< |
* joiners until they unblock. |
356 |
> |
* method tryCompensate() may create or re-activate a spare |
357 |
> |
* thread to compensate for blocked joiners until they unblock. |
358 |
|
* |
359 |
< |
* It is impossible to keep exactly the target (parallelism) |
360 |
< |
* number of threads running at any given time. Determining |
361 |
< |
* existence of conservatively safe helping targets, the |
362 |
< |
* availability of already-created spares, and the apparent need |
363 |
< |
* to create new spares are all racy and require heuristic |
364 |
< |
* guidance, so we rely on multiple retries of each. Compensation |
365 |
< |
* occurs in slow-motion. It is triggered only upon timeouts of |
171 |
< |
* Object.wait used for joins. This reduces poor decisions that |
172 |
< |
* would otherwise be made when threads are waiting for others |
173 |
< |
* that are stalled because of unrelated activities such as |
174 |
< |
* garbage collection. |
359 |
> |
* A third form (implemented in tryRemoveAndExec and |
360 |
> |
* tryPollForAndExec) amounts to helping a hypothetical |
361 |
> |
* compensator: If we can readily tell that a possible action of a |
362 |
> |
* compensator is to steal and execute the task being joined, the |
363 |
> |
* joining thread can do so directly, without the need for a |
364 |
> |
* compensation thread (although at the expense of larger run-time |
365 |
> |
* stacks, but the tradeoff is typically worthwhile). |
366 |
|
* |
367 |
|
* The ManagedBlocker extension API can't use helping so relies |
368 |
|
* only on compensation in method awaitBlocker. |
369 |
|
* |
370 |
< |
* The main throughput advantages of work-stealing stem from |
371 |
< |
* decentralized control -- workers mostly steal tasks from each |
372 |
< |
* other. We do not want to negate this by creating bottlenecks |
373 |
< |
* implementing other management responsibilities. So we use a |
374 |
< |
* collection of techniques that avoid, reduce, or cope well with |
375 |
< |
* contention. These entail several instances of bit-packing into |
376 |
< |
* CASable fields to maintain only the minimally required |
377 |
< |
* atomicity. To enable such packing, we restrict maximum |
378 |
< |
* parallelism to (1<<15)-1 (enabling twice this (to accommodate |
379 |
< |
* unbalanced increments and decrements) to fit into a 16 bit |
380 |
< |
* field, which is far in excess of normal operating range. Even |
381 |
< |
* though updates to some of these bookkeeping fields do sometimes |
382 |
< |
* contend with each other, they don't normally cache-contend with |
383 |
< |
* updates to others enough to warrant memory padding or |
384 |
< |
* isolation. So they are all held as fields of ForkJoinPool |
385 |
< |
* objects. The main capabilities are as follows: |
386 |
< |
* |
387 |
< |
* 1. Creating and removing workers. Workers are recorded in the |
388 |
< |
* "workers" array. This is an array as opposed to some other data |
389 |
< |
* structure to support index-based random steals by workers. |
390 |
< |
* Updates to the array recording new workers and unrecording |
391 |
< |
* terminated ones are protected from each other by a lock |
392 |
< |
* (workerLock) but the array is otherwise concurrently readable, |
393 |
< |
* and accessed directly by workers. To simplify index-based |
394 |
< |
* operations, the array size is always a power of two, and all |
395 |
< |
* readers must tolerate null slots. Currently, all worker thread |
396 |
< |
* creation is on-demand, triggered by task submissions, |
397 |
< |
* replacement of terminated workers, and/or compensation for |
398 |
< |
* blocked workers. However, all other support code is set up to |
399 |
< |
* work with other policies. |
400 |
< |
* |
401 |
< |
* To ensure that we do not hold on to worker references that |
402 |
< |
* would prevent GC, ALL accesses to workers are via indices into |
403 |
< |
* the workers array (which is one source of some of the unusual |
404 |
< |
* code constructions here). In essence, the workers array serves |
405 |
< |
* as a WeakReference mechanism. Thus for example the event queue |
406 |
< |
* stores worker indices, not worker references. Access to the |
407 |
< |
* workers in associated methods (for example releaseEventWaiters) |
408 |
< |
* must both index-check and null-check the IDs. All such accesses |
409 |
< |
* ignore bad IDs by returning out early from what they are doing, |
410 |
< |
* since this can only be associated with shutdown, in which case |
411 |
< |
* it is OK to give up. On termination, we just clobber these |
412 |
< |
* data structures without trying to use them. |
222 |
< |
* |
223 |
< |
* 2. Bookkeeping for dynamically adding and removing workers. We |
224 |
< |
* aim to approximately maintain the given level of parallelism. |
225 |
< |
* When some workers are known to be blocked (on joins or via |
226 |
< |
* ManagedBlocker), we may create or resume others to take their |
227 |
< |
* place until they unblock (see below). Implementing this |
228 |
< |
* requires counts of the number of "running" threads (i.e., those |
229 |
< |
* that are neither blocked nor artifically suspended) as well as |
230 |
< |
* the total number. These two values are packed into one field, |
231 |
< |
* "workerCounts" because we need accurate snapshots when deciding |
232 |
< |
* to create, resume or suspend. Note however that the |
233 |
< |
* correspondance of these counts to reality is not guaranteed. In |
234 |
< |
* particular updates for unblocked threads may lag until they |
235 |
< |
* actually wake up. |
236 |
< |
* |
237 |
< |
* 3. Maintaining global run state. The run state of the pool |
238 |
< |
* consists of a runLevel (SHUTDOWN, TERMINATING, etc) similar to |
239 |
< |
* those in other Executor implementations, as well as a count of |
240 |
< |
* "active" workers -- those that are, or soon will be, or |
241 |
< |
* recently were executing tasks. The runLevel and active count |
242 |
< |
* are packed together in order to correctly trigger shutdown and |
243 |
< |
* termination. Without care, active counts can be subject to very |
244 |
< |
* high contention. We substantially reduce this contention by |
245 |
< |
* relaxing update rules. A worker must claim active status |
246 |
< |
* prospectively, by activating if it sees that a submitted or |
247 |
< |
* stealable task exists (it may find after activating that the |
248 |
< |
* task no longer exists). It stays active while processing this |
249 |
< |
* task (if it exists) and any other local subtasks it produces, |
250 |
< |
* until it cannot find any other tasks. It then tries |
251 |
< |
* inactivating (see method preStep), but upon update contention |
252 |
< |
* instead scans for more tasks, later retrying inactivation if it |
253 |
< |
* doesn't find any. |
254 |
< |
* |
255 |
< |
* 4. Managing idle workers waiting for tasks. We cannot let |
256 |
< |
* workers spin indefinitely scanning for tasks when none are |
257 |
< |
* available. On the other hand, we must quickly prod them into |
258 |
< |
* action when new tasks are submitted or generated. We |
259 |
< |
* park/unpark these idle workers using an event-count scheme. |
260 |
< |
* Field eventCount is incremented upon events that may enable |
261 |
< |
* workers that previously could not find a task to now find one: |
262 |
< |
* Submission of a new task to the pool, or another worker pushing |
263 |
< |
* a task onto a previously empty queue. (We also use this |
264 |
< |
* mechanism for configuration and termination actions that |
265 |
< |
* require wakeups of idle workers). Each worker maintains its |
266 |
< |
* last known event count, and blocks when a scan for work did not |
267 |
< |
* find a task AND its lastEventCount matches the current |
268 |
< |
* eventCount. Waiting idle workers are recorded in a variant of |
269 |
< |
* Treiber stack headed by field eventWaiters which, when nonzero, |
270 |
< |
* encodes the thread index and count awaited for by the worker |
271 |
< |
* thread most recently calling eventSync. This thread in turn has |
272 |
< |
* a record (field nextEventWaiter) for the next waiting worker. |
273 |
< |
* In addition to allowing simpler decisions about need for |
274 |
< |
* wakeup, the event count bits in eventWaiters serve the role of |
275 |
< |
* tags to avoid ABA errors in Treiber stacks. Upon any wakeup, |
276 |
< |
* released threads also try to release at most two others. The |
277 |
< |
* net effect is a tree-like diffusion of signals, where released |
278 |
< |
* threads (and possibly others) help with unparks. To further |
279 |
< |
* reduce contention effects a bit, failed CASes to increment |
280 |
< |
* field eventCount are tolerated without retries in signalWork. |
281 |
< |
* Conceptually they are merged into the same event, which is OK |
282 |
< |
* when their only purpose is to enable workers to scan for work. |
283 |
< |
* |
284 |
< |
* 5. Managing suspension of extra workers. When a worker notices |
285 |
< |
* (usually upon timeout of a wait()) that there are too few |
286 |
< |
* running threads, we may create a new thread to maintain |
287 |
< |
* parallelism level, or at least avoid starvation. Usually, extra |
288 |
< |
* threads are needed for only very short periods, yet join |
289 |
< |
* dependencies are such that we sometimes need them in |
290 |
< |
* bursts. Rather than create new threads each time this happens, |
291 |
< |
* we suspend no-longer-needed extra ones as "spares". For most |
292 |
< |
* purposes, we don't distinguish "extra" spare threads from |
293 |
< |
* normal "core" threads: On each call to preStep (the only point |
294 |
< |
* at which we can do this) a worker checks to see if there are |
295 |
< |
* now too many running workers, and if so, suspends itself. |
296 |
< |
* Method helpMaintainParallelism looks for suspended threads to |
297 |
< |
* resume before considering creating a new replacement. The |
298 |
< |
* spares themselves are encoded on another variant of a Treiber |
299 |
< |
* Stack, headed at field "spareWaiters". Note that the use of |
300 |
< |
* spares is intrinsically racy. One thread may become a spare at |
301 |
< |
* about the same time as another is needlessly being created. We |
302 |
< |
* counteract this and related slop in part by requiring resumed |
303 |
< |
* spares to immediately recheck (in preStep) to see whether they |
304 |
< |
* they should re-suspend. |
305 |
< |
* |
306 |
< |
* 6. Killing off unneeded workers. A timeout mechanism is used to |
307 |
< |
* shed unused workers: The oldest (first) event queue waiter uses |
308 |
< |
* a timed rather than hard wait. When this wait times out without |
309 |
< |
* a normal wakeup, it tries to shutdown any one (for convenience |
310 |
< |
* the newest) other spare or event waiter via |
311 |
< |
* tryShutdownUnusedWorker. This eventually reduces the number of |
312 |
< |
* worker threads to a minimum of one after a long enough period |
313 |
< |
* without use. |
314 |
< |
* |
315 |
< |
* 7. Deciding when to create new workers. The main dynamic |
316 |
< |
* control in this class is deciding when to create extra threads |
317 |
< |
* in method helpMaintainParallelism. We would like to keep |
318 |
< |
* exactly #parallelism threads running, which is an impossble |
319 |
< |
* task. We always need to create one when the number of running |
320 |
< |
* threads would become zero and all workers are busy. Beyond |
321 |
< |
* this, we must rely on heuristics that work well in the the |
322 |
< |
* presence of transients phenomena such as GC stalls, dynamic |
323 |
< |
* compilation, and wake-up lags. These transients are extremely |
324 |
< |
* common -- we are normally trying to fully saturate the CPUs on |
325 |
< |
* a machine, so almost any activity other than running tasks |
326 |
< |
* impedes accuracy. Our main defense is to allow parallelism to |
327 |
< |
* lapse for a while during joins, and use a timeout to see if, |
328 |
< |
* after the resulting settling, there is still a need for |
329 |
< |
* additional workers. This also better copes with the fact that |
330 |
< |
* some of the methods in this class tend to never become compiled |
331 |
< |
* (but are interpreted), so some components of the entire set of |
332 |
< |
* controls might execute 100 times faster than others. And |
333 |
< |
* similarly for cases where the apparent lack of work is just due |
334 |
< |
* to GC stalls and other transient system activity. |
370 |
> |
* The algorithm in tryHelpStealer entails a form of "linear" |
371 |
> |
* helping: Each worker records (in field currentSteal) the most |
372 |
> |
* recent task it stole from some other worker. Plus, it records |
373 |
> |
* (in field currentJoin) the task it is currently actively |
374 |
> |
* joining. Method tryHelpStealer uses these markers to try to |
375 |
> |
* find a worker to help (i.e., steal back a task from and execute |
376 |
> |
* it) that could hasten completion of the actively joined task. |
377 |
> |
* In essence, the joiner executes a task that would be on its own |
378 |
> |
* local deque had the to-be-joined task not been stolen. This may |
379 |
> |
* be seen as a conservative variant of the approach in Wagner & |
380 |
> |
* Calder "Leapfrogging: a portable technique for implementing |
381 |
> |
* efficient futures" SIGPLAN Notices, 1993 |
382 |
> |
* (http://portal.acm.org/citation.cfm?id=155354). It differs in |
383 |
> |
* that: (1) We only maintain dependency links across workers upon |
384 |
> |
* steals, rather than use per-task bookkeeping. This sometimes |
385 |
> |
* requires a linear scan of workers array to locate stealers, but |
386 |
> |
* often doesn't because stealers leave hints (that may become |
387 |
> |
* stale/wrong) of where to locate them. A stealHint is only a |
388 |
> |
* hint because a worker might have had multiple steals and the |
389 |
> |
* hint records only one of them (usually the most current). |
390 |
> |
* Hinting isolates cost to when it is needed, rather than adding |
391 |
> |
* to per-task overhead. (2) It is "shallow", ignoring nesting |
392 |
> |
* and potentially cyclic mutual steals. (3) It is intentionally |
393 |
> |
* racy: field currentJoin is updated only while actively joining, |
394 |
> |
* which means that we miss links in the chain during long-lived |
395 |
> |
* tasks, GC stalls etc (which is OK since blocking in such cases |
396 |
> |
* is usually a good idea). (4) We bound the number of attempts |
397 |
> |
* to find work (see MAX_HELP_DEPTH) and fall back to suspending |
398 |
> |
* the worker and if necessary replacing it with another. |
399 |
> |
* |
400 |
> |
* It is impossible to keep exactly the target parallelism number |
401 |
> |
* of threads running at any given time. Determining the |
402 |
> |
* existence of conservatively safe helping targets, the |
403 |
> |
* availability of already-created spares, and the apparent need |
404 |
> |
* to create new spares are all racy, so we rely on multiple |
405 |
> |
* retries of each. Currently, in keeping with on-demand |
406 |
> |
* signalling policy, we compensate only if blocking would leave |
407 |
> |
* less than one active (non-waiting, non-blocked) worker. |
408 |
> |
* Additionally, to avoid some false alarms due to GC, lagging |
409 |
> |
* counters, system activity, etc, compensated blocking for joins |
410 |
> |
* is only attempted after rechecks stabilize in |
411 |
> |
* ForkJoinTask.awaitJoin. (Retries are interspersed with |
412 |
> |
* Thread.yield, for good citizenship.) |
413 |
|
* |
414 |
< |
* Beware that there is a lot of representation-level coupling |
414 |
> |
* Style notes: There is a lot of representation-level coupling |
415 |
|
* among classes ForkJoinPool, ForkJoinWorkerThread, and |
416 |
< |
* ForkJoinTask. For example, direct access to "workers" array by |
417 |
< |
* workers, and direct access to ForkJoinTask.status by both |
418 |
< |
* ForkJoinPool and ForkJoinWorkerThread. There is little point |
419 |
< |
* trying to reduce this, since any associated future changes in |
420 |
< |
* representations will need to be accompanied by algorithmic |
421 |
< |
* changes anyway. |
422 |
< |
* |
423 |
< |
* Style notes: There are lots of inline assignments (of form |
424 |
< |
* "while ((local = field) != 0)") which are usually the simplest |
425 |
< |
* way to ensure the required read orderings (which are sometimes |
426 |
< |
* critical). Also several occurrences of the unusual "do {} |
427 |
< |
* while(!cas...)" which is the simplest way to force an update of |
428 |
< |
* a CAS'ed variable. There are also other coding oddities that |
429 |
< |
* help some methods perform reasonably even when interpreted (not |
430 |
< |
* compiled), at the expense of some messy constructions that |
431 |
< |
* reduce byte code counts. |
432 |
< |
* |
433 |
< |
* The order of declarations in this file is: (1) statics (2) |
434 |
< |
* fields (along with constants used when unpacking some of them) |
435 |
< |
* (3) internal control methods (4) callbacks and other support |
436 |
< |
* for ForkJoinTask and ForkJoinWorkerThread classes, (5) exported |
437 |
< |
* methods (plus a few little helpers). |
416 |
> |
* ForkJoinTask. The fields of WorkQueue maintain data structures |
417 |
> |
* managed by ForkJoinPool, so are directly accessed. There is |
418 |
> |
* little point trying to reduce this, since any associated future |
419 |
> |
* changes in representations will need to be accompanied by |
420 |
> |
* algorithmic changes anyway. All together, these low-level |
421 |
> |
* implementation choices produce as much as a factor of 4 |
422 |
> |
* performance improvement compared to naive implementations, and |
423 |
> |
* enable the processing of billions of tasks per second, at the |
424 |
> |
* expense of some ugliness. |
425 |
> |
* |
426 |
> |
* Methods signalWork() and scan() are the main bottlenecks so are |
427 |
> |
* especially heavily micro-optimized/mangled. There are lots of |
428 |
> |
* inline assignments (of form "while ((local = field) != 0)") |
429 |
> |
* which are usually the simplest way to ensure the required read |
430 |
> |
* orderings (which are sometimes critical). This leads to a |
431 |
> |
* "C"-like style of listing declarations of these locals at the |
432 |
> |
* heads of methods or blocks. There are several occurrences of |
433 |
> |
* the unusual "do {} while (!cas...)" which is the simplest way |
434 |
> |
* to force an update of a CAS'ed variable. There are also other |
435 |
> |
* coding oddities that help some methods perform reasonably even |
436 |
> |
* when interpreted (not compiled). |
437 |
> |
* |
438 |
> |
* The order of declarations in this file is: (1) declarations of |
439 |
> |
* statics (2) fields (along with constants used when unpacking |
440 |
> |
* some of them), listed in an order that tends to reduce |
441 |
> |
* contention among them a bit under most JVMs; (3) nested |
442 |
> |
* classes; (4) internal control methods; (5) callbacks and other |
443 |
> |
* support for ForkJoinTask methods; (6) exported methods (plus a |
444 |
> |
* few little helpers); (7) static block initializing all statics |
445 |
> |
* in a minimally dependent order. |
446 |
|
*/ |
447 |
|
|
448 |
|
/** |
477 |
|
* overridden in ForkJoinPool constructors. |
478 |
|
*/ |
479 |
|
public static final ForkJoinWorkerThreadFactory |
480 |
< |
defaultForkJoinWorkerThreadFactory = |
395 |
< |
new DefaultForkJoinWorkerThreadFactory(); |
480 |
> |
defaultForkJoinWorkerThreadFactory; |
481 |
|
|
482 |
|
/** |
483 |
|
* Permission required for callers of methods that may start or |
484 |
|
* kill threads. |
485 |
|
*/ |
486 |
< |
private static final RuntimePermission modifyThreadPermission = |
402 |
< |
new RuntimePermission("modifyThread"); |
486 |
> |
private static final RuntimePermission modifyThreadPermission; |
487 |
|
|
488 |
|
/** |
489 |
|
* If there is a security manager, makes sure caller has |
498 |
|
/** |
499 |
|
* Generator for assigning sequence numbers as pool names. |
500 |
|
*/ |
501 |
< |
private static final AtomicInteger poolNumberGenerator = |
418 |
< |
new AtomicInteger(); |
501 |
> |
private static final AtomicInteger poolNumberGenerator; |
502 |
|
|
503 |
|
/** |
504 |
< |
* The time to block in a join (see awaitJoin) before checking if |
505 |
< |
* a new worker should be (re)started to maintain parallelism |
506 |
< |
* level. The value should be short enough to maintain gloabal |
507 |
< |
* responsiveness and progress but long enough to avoid |
508 |
< |
* counterproductive firings during GC stalls or unrelated system |
509 |
< |
* activity, and to not bog down systems with continual re-firings |
510 |
< |
* on GCs or legitimately long waits. |
511 |
< |
*/ |
512 |
< |
private static final long JOIN_TIMEOUT_MILLIS = 250L; // 4 per second |
513 |
< |
|
514 |
< |
/** |
515 |
< |
* The wakeup interval (in nanoseconds) for the oldest worker |
516 |
< |
* worker waiting for an event invokes tryShutdownUnusedWorker to shrink |
517 |
< |
* the number of workers. The exact value does not matter too |
518 |
< |
* much, but should be long enough to slowly release resources |
519 |
< |
* during long periods without use without disrupting normal use. |
504 |
> |
* Bits and masks for control variables |
505 |
> |
* |
506 |
> |
* Field ctl is a long packed with: |
507 |
> |
* AC: Number of active running workers minus target parallelism (16 bits) |
508 |
> |
* TC: Number of total workers minus target parallelism (16 bits) |
509 |
> |
* ST: true if pool is terminating (1 bit) |
510 |
> |
* EC: the wait count of top waiting thread (15 bits) |
511 |
> |
* ID: ~(poolIndex >>> 1) of top of Treiber stack of waiters (16 bits) |
512 |
> |
* |
513 |
> |
* When convenient, we can extract the upper 32 bits of counts and |
514 |
> |
* the lower 32 bits of queue state, u = (int)(ctl >>> 32) and e = |
515 |
> |
* (int)ctl. The ec field is never accessed alone, but always |
516 |
> |
* together with id and st. The offsets of counts by the target |
517 |
> |
* parallelism and the positionings of fields makes it possible to |
518 |
> |
* perform the most common checks via sign tests of fields: When |
519 |
> |
* ac is negative, there are not enough active workers, when tc is |
520 |
> |
* negative, there are not enough total workers, when id is |
521 |
> |
* negative, there is at least one waiting worker, and when e is |
522 |
> |
* negative, the pool is terminating. To deal with these possibly |
523 |
> |
* negative fields, we use casts in and out of "short" and/or |
524 |
> |
* signed shifts to maintain signedness. |
525 |
> |
* |
526 |
> |
* When a thread is queued (inactivated), its eventCount field is |
527 |
> |
* negative, which is the only way to tell if a worker is |
528 |
> |
* prevented from executing tasks, even though it must continue to |
529 |
> |
* scan for them to avoid queuing races. |
530 |
> |
* |
531 |
> |
* Field runState is an int packed with: |
532 |
> |
* SHUTDOWN: true if shutdown is enabled (1 bit) |
533 |
> |
* SEQ: a sequence number updated upon (de)registering workers (15 bits) |
534 |
> |
* MASK: mask (power of 2 - 1) covering all registered poolIndexes (16 bits) |
535 |
> |
* |
536 |
> |
* The combination of mask and sequence number enables simple |
537 |
> |
* consistency checks: Staleness of read-only operations on the |
538 |
> |
* workers and queues arrays can be checked by comparing runState |
539 |
> |
* before vs after the reads. The low 16 bits (i.e, anding with |
540 |
> |
* SMASK) hold (the smallest power of two covering all worker |
541 |
> |
* indices, minus one. The mask for queues (vs workers) is twice |
542 |
> |
* this value plus 1. |
543 |
> |
*/ |
544 |
> |
|
545 |
> |
// bit positions/shifts for fields |
546 |
> |
private static final int AC_SHIFT = 48; |
547 |
> |
private static final int TC_SHIFT = 32; |
548 |
> |
private static final int ST_SHIFT = 31; |
549 |
> |
private static final int EC_SHIFT = 16; |
550 |
> |
|
551 |
> |
// bounds |
552 |
> |
private static final int MAX_ID = 0x7fff; // max poolIndex |
553 |
> |
private static final int SMASK = 0xffff; // mask short bits |
554 |
> |
private static final int SHORT_SIGN = 1 << 15; |
555 |
> |
private static final int INT_SIGN = 1 << 31; |
556 |
> |
|
557 |
> |
// masks |
558 |
> |
private static final long STOP_BIT = 0x0001L << ST_SHIFT; |
559 |
> |
private static final long AC_MASK = ((long)SMASK) << AC_SHIFT; |
560 |
> |
private static final long TC_MASK = ((long)SMASK) << TC_SHIFT; |
561 |
> |
|
562 |
> |
// units for incrementing and decrementing |
563 |
> |
private static final long TC_UNIT = 1L << TC_SHIFT; |
564 |
> |
private static final long AC_UNIT = 1L << AC_SHIFT; |
565 |
> |
|
566 |
> |
// masks and units for dealing with u = (int)(ctl >>> 32) |
567 |
> |
private static final int UAC_SHIFT = AC_SHIFT - 32; |
568 |
> |
private static final int UTC_SHIFT = TC_SHIFT - 32; |
569 |
> |
private static final int UAC_MASK = SMASK << UAC_SHIFT; |
570 |
> |
private static final int UTC_MASK = SMASK << UTC_SHIFT; |
571 |
> |
private static final int UAC_UNIT = 1 << UAC_SHIFT; |
572 |
> |
private static final int UTC_UNIT = 1 << UTC_SHIFT; |
573 |
> |
|
574 |
> |
// masks and units for dealing with e = (int)ctl |
575 |
> |
private static final int E_MASK = 0x7fffffff; // no STOP_BIT |
576 |
> |
private static final int E_SEQ = 1 << EC_SHIFT; |
577 |
> |
|
578 |
> |
// runState bits |
579 |
> |
private static final int SHUTDOWN = 1 << 31; |
580 |
> |
private static final int RS_SEQ = 1 << 16; |
581 |
> |
private static final int RS_SEQ_MASK = 0x7fff0000; |
582 |
> |
|
583 |
> |
// access mode for WorkQueue |
584 |
> |
static final int LIFO_QUEUE = 0; |
585 |
> |
static final int FIFO_QUEUE = 1; |
586 |
> |
static final int SHARED_QUEUE = -1; |
587 |
> |
|
588 |
> |
/** |
589 |
> |
* The wakeup interval (in nanoseconds) for a worker waiting for a |
590 |
> |
* task when the pool is quiescent to instead try to shrink the |
591 |
> |
* number of workers. The exact value does not matter too |
592 |
> |
* much. It must be short enough to release resources during |
593 |
> |
* sustained periods of idleness, but not so short that threads |
594 |
> |
* are continually re-created. |
595 |
> |
*/ |
596 |
> |
private static final long SHRINK_RATE = |
597 |
> |
4L * 1000L * 1000L * 1000L; // 4 seconds |
598 |
> |
|
599 |
> |
/** |
600 |
> |
* The timeout value for attempted shrinkage, includes |
601 |
> |
* some slop to cope with system timer imprecision. |
602 |
> |
*/ |
603 |
> |
private static final long SHRINK_TIMEOUT = SHRINK_RATE - (SHRINK_RATE / 10); |
604 |
> |
|
605 |
> |
/** |
606 |
> |
* The maximum stolen->joining link depth allowed in tryHelpStealer. |
607 |
> |
* Depths for legitimate chains are unbounded, but we use a fixed |
608 |
> |
* constant to avoid (otherwise unchecked) cycles and to bound |
609 |
> |
* staleness of traversal parameters at the expense of sometimes |
610 |
> |
* blocking when we could be helping. |
611 |
|
*/ |
612 |
< |
private static final long SHRINK_RATE_NANOS = |
439 |
< |
30L * 1000L * 1000L * 1000L; // 2 per minute |
612 |
> |
private static final int MAX_HELP_DEPTH = 16; |
613 |
|
|
614 |
< |
/** |
615 |
< |
* Absolute bound for parallelism level. Twice this number plus |
616 |
< |
* one (i.e., 0xfff) must fit into a 16bit field to enable |
617 |
< |
* word-packing for some counts and indices. |
614 |
> |
/* |
615 |
> |
* Field layout order in this class tends to matter more than one |
616 |
> |
* would like. Runtime layout order is only loosely related to |
617 |
> |
* declaration order and may differ across JVMs, but the following |
618 |
> |
* empirically works OK on current JVMs. |
619 |
> |
*/ |
620 |
> |
|
621 |
> |
volatile long ctl; // main pool control |
622 |
> |
final int parallelism; // parallelism level |
623 |
> |
final int localMode; // per-worker scheduling mode |
624 |
> |
int nextPoolIndex; // hint used in registerWorker |
625 |
> |
volatile int runState; // shutdown status, seq, and mask |
626 |
> |
WorkQueue[] workQueues; // main registry |
627 |
> |
final ReentrantLock lock; // for registration |
628 |
> |
final Condition termination; // for awaitTermination |
629 |
> |
final ForkJoinWorkerThreadFactory factory; // factory for new workers |
630 |
> |
final Thread.UncaughtExceptionHandler ueh; // per-worker UEH |
631 |
> |
final AtomicLong stealCount; // collect counts when terminated |
632 |
> |
final AtomicInteger nextWorkerNumber; // to create worker name string |
633 |
> |
final String workerNamePrefix; // Prefix for assigning worker names |
634 |
> |
|
635 |
> |
/** |
636 |
> |
* Queues supporting work-stealing as well as external task |
637 |
> |
* submission. See above for main rationale and algorithms. |
638 |
> |
* Implementation relies heavily on "Unsafe" intrinsics |
639 |
> |
* and selective use of "volatile": |
640 |
> |
* |
641 |
> |
* Field "base" is the index (mod array.length) of the least valid |
642 |
> |
* queue slot, which is always the next position to steal (poll) |
643 |
> |
* from if nonempty. Reads and writes require volatile orderings |
644 |
> |
* but not CAS, because updates are only performed after slot |
645 |
> |
* CASes. |
646 |
> |
* |
647 |
> |
* Field "top" is the index (mod array.length) of the next queue |
648 |
> |
* slot to push to or pop from. It is written only by owner thread |
649 |
> |
* for push, or under lock for trySharedPush, and accessed by |
650 |
> |
* other threads only after reading (volatile) base. Both top and |
651 |
> |
* base are allowed to wrap around on overflow, but (top - base) |
652 |
> |
* (or more commonly -(base - top) to force volatile read of base |
653 |
> |
* before top) still estimates size. |
654 |
> |
* |
655 |
> |
* The array slots are read and written using the emulation of |
656 |
> |
* volatiles/atomics provided by Unsafe. Insertions must in |
657 |
> |
* general use putOrderedObject as a form of releasing store to |
658 |
> |
* ensure that all writes to the task object are ordered before |
659 |
> |
* its publication in the queue. (Although we can avoid one case |
660 |
> |
* of this when locked in trySharedPush.) All removals entail a |
661 |
> |
* CAS to null. The array is always a power of two. To ensure |
662 |
> |
* safety of Unsafe array operations, all accesses perform |
663 |
> |
* explicit null checks and implicit bounds checks via |
664 |
> |
* power-of-two masking. |
665 |
> |
* |
666 |
> |
* In addition to basic queuing support, this class contains |
667 |
> |
* fields described elsewhere to control execution. It turns out |
668 |
> |
* to work better memory-layout-wise to include them in this |
669 |
> |
* class rather than a separate class. |
670 |
> |
* |
671 |
> |
* Performance on most platforms is very sensitive to placement of |
672 |
> |
* instances of both WorkQueues and their arrays -- we absolutely |
673 |
> |
* do not want multiple WorkQueue instances or multiple queue |
674 |
> |
* arrays sharing cache lines. (It would be best for queue objects |
675 |
> |
* and their arrays to share, but there is nothing available to |
676 |
> |
* help arrange that). Unfortunately, because they are recorded |
677 |
> |
* in a common array, WorkQueue instances are often moved to be |
678 |
> |
* adjacent by garbage collectors. To reduce impact, we use field |
679 |
> |
* padding that works OK on common platforms; this effectively |
680 |
> |
* trades off slightly slower average field access for the sake of |
681 |
> |
* avoiding really bad worst-case access. (Until better JVM |
682 |
> |
* support is in place, this padding is dependent on transient |
683 |
> |
* properties of JVM field layout rules.) We also take care in |
684 |
> |
* allocating and sizing and resizing the array. Non-shared queue |
685 |
> |
* arrays are initialized (via method growArray) by workers before |
686 |
> |
* use. Others are allocated on first use. |
687 |
|
*/ |
688 |
< |
private static final int MAX_WORKERS = 0x7fff; |
688 |
> |
static final class WorkQueue { |
689 |
> |
/** |
690 |
> |
* Capacity of work-stealing queue array upon initialization. |
691 |
> |
* Must be a power of two; at least 4, but set larger to |
692 |
> |
* reduce cacheline sharing among queues. |
693 |
> |
*/ |
694 |
> |
static final int INITIAL_QUEUE_CAPACITY = 1 << 8; |
695 |
|
|
696 |
< |
/** |
697 |
< |
* Array holding all worker threads in the pool. Array size must |
698 |
< |
* be a power of two. Updates and replacements are protected by |
699 |
< |
* workerLock, but the array is always kept in a consistent enough |
700 |
< |
* state to be randomly accessed without locking by workers |
701 |
< |
* performing work-stealing, as well as other traversal-based |
702 |
< |
* methods in this class. All readers must tolerate that some |
703 |
< |
* array slots may be null. |
456 |
< |
*/ |
457 |
< |
volatile ForkJoinWorkerThread[] workers; |
696 |
> |
/** |
697 |
> |
* Maximum size for queue arrays. Must be a power of two less |
698 |
> |
* than or equal to 1 << (31 - width of array entry) to ensure |
699 |
> |
* lack of wraparound of index calculations, but defined to a |
700 |
> |
* value a bit less than this to help users trap runaway |
701 |
> |
* programs before saturating systems. |
702 |
> |
*/ |
703 |
> |
static final int MAXIMUM_QUEUE_CAPACITY = 1 << 26; // 64M |
704 |
|
|
705 |
< |
/** |
706 |
< |
* Queue for external submissions. |
707 |
< |
*/ |
708 |
< |
private final LinkedTransferQueue<ForkJoinTask<?>> submissionQueue; |
705 |
> |
volatile long totalSteals; // cumulative number of steals |
706 |
> |
int seed; // for random scanning; initialize nonzero |
707 |
> |
volatile int eventCount; // encoded inactivation count; < 0 if inactive |
708 |
> |
int nextWait; // encoded record of next event waiter |
709 |
> |
int rescans; // remaining scans until block |
710 |
> |
int nsteals; // top-level task executions since last idle |
711 |
> |
final int mode; // lifo, fifo, or shared |
712 |
> |
int poolIndex; // index of this queue in pool (or 0) |
713 |
> |
int stealHint; // index of most recent known stealer |
714 |
> |
volatile int runState; // 1: locked, -1: terminate; else 0 |
715 |
> |
volatile int base; // index of next slot for poll |
716 |
> |
int top; // index of next slot for push |
717 |
> |
ForkJoinTask<?>[] array; // the elements (initially unallocated) |
718 |
> |
final ForkJoinWorkerThread owner; // owning thread or null if shared |
719 |
> |
volatile Thread parker; // == owner during call to park; else null |
720 |
> |
ForkJoinTask<?> currentJoin; // task being joined in awaitJoin |
721 |
> |
ForkJoinTask<?> currentSteal; // current non-local task being executed |
722 |
> |
// Heuristic padding to ameliorate unfortunate memory placements |
723 |
> |
Object p00, p01, p02, p03, p04, p05, p06, p07, p08, p09, p0a; |
724 |
> |
|
725 |
> |
WorkQueue(ForkJoinWorkerThread owner, int mode) { |
726 |
> |
this.owner = owner; |
727 |
> |
this.mode = mode; |
728 |
> |
// Place indices in the center of array (that is not yet allocated) |
729 |
> |
base = top = INITIAL_QUEUE_CAPACITY >>> 1; |
730 |
> |
} |
731 |
|
|
732 |
< |
/** |
733 |
< |
* Lock protecting updates to workers array. |
734 |
< |
*/ |
735 |
< |
private final ReentrantLock workerLock; |
732 |
> |
/** |
733 |
> |
* Returns number of tasks in the queue |
734 |
> |
*/ |
735 |
> |
final int queueSize() { |
736 |
> |
int n = base - top; // non-owner callers must read base first |
737 |
> |
return (n >= 0) ? 0 : -n; |
738 |
> |
} |
739 |
|
|
740 |
< |
/** |
741 |
< |
* Latch released upon termination. |
742 |
< |
*/ |
743 |
< |
private final Phaser termination; |
740 |
> |
/** |
741 |
> |
* Pushes a task. Call only by owner in unshared queues. |
742 |
> |
* |
743 |
> |
* @param task the task. Caller must ensure non-null. |
744 |
> |
* @param p, if non-null, pool to signal if necessary |
745 |
> |
* @throw RejectedExecutionException if array cannot |
746 |
> |
* be resized |
747 |
> |
*/ |
748 |
> |
final void push(ForkJoinTask<?> task, ForkJoinPool p) { |
749 |
> |
ForkJoinTask<?>[] a; |
750 |
> |
int s = top, m, n; |
751 |
> |
if ((a = array) != null) { // ignore if queue removed |
752 |
> |
U.putOrderedObject |
753 |
> |
(a, (((m = a.length - 1) & s) << ASHIFT) + ABASE, task); |
754 |
> |
if ((n = (top = s + 1) - base) <= 2) { |
755 |
> |
if (p != null) |
756 |
> |
p.signalWork(); |
757 |
> |
} |
758 |
> |
else if (n >= m) |
759 |
> |
growArray(true); |
760 |
> |
} |
761 |
> |
} |
762 |
|
|
763 |
< |
/** |
764 |
< |
* Creation factory for worker threads. |
765 |
< |
*/ |
766 |
< |
private final ForkJoinWorkerThreadFactory factory; |
763 |
> |
/** |
764 |
> |
* Pushes a task if lock is free and array is either big |
765 |
> |
* enough or can be resized to be big enough. |
766 |
> |
* |
767 |
> |
* @param task the task. Caller must ensure non-null. |
768 |
> |
* @return true if submitted |
769 |
> |
*/ |
770 |
> |
final boolean trySharedPush(ForkJoinTask<?> task) { |
771 |
> |
boolean submitted = false; |
772 |
> |
if (runState == 0 && U.compareAndSwapInt(this, RUNSTATE, 0, 1)) { |
773 |
> |
ForkJoinTask<?>[] a = array; |
774 |
> |
int s = top, n = s - base; |
775 |
> |
try { |
776 |
> |
if ((a != null && n < a.length - 1) || |
777 |
> |
(a = growArray(false)) != null) { // must presize |
778 |
> |
int j = (((a.length - 1) & s) << ASHIFT) + ABASE; |
779 |
> |
U.putObject(a, (long)j, task); // don't need "ordered" |
780 |
> |
top = s + 1; |
781 |
> |
submitted = true; |
782 |
> |
} |
783 |
> |
} finally { |
784 |
> |
runState = 0; // unlock |
785 |
> |
} |
786 |
> |
} |
787 |
> |
return submitted; |
788 |
> |
} |
789 |
|
|
790 |
< |
/** |
791 |
< |
* Sum of per-thread steal counts, updated only when threads are |
792 |
< |
* idle or terminating. |
793 |
< |
*/ |
794 |
< |
private volatile long stealCount; |
790 |
> |
/** |
791 |
> |
* Takes next task, if one exists, in FIFO order. |
792 |
> |
*/ |
793 |
> |
final ForkJoinTask<?> poll() { |
794 |
> |
ForkJoinTask<?>[] a; int b, i; |
795 |
> |
while ((b = base) - top < 0 && (a = array) != null && |
796 |
> |
(i = (a.length - 1) & b) >= 0) { |
797 |
> |
int j = (i << ASHIFT) + ABASE; |
798 |
> |
ForkJoinTask<?> t = (ForkJoinTask<?>)U.getObjectVolatile(a, j); |
799 |
> |
if (t != null && base == b && |
800 |
> |
U.compareAndSwapObject(a, j, t, null)) { |
801 |
> |
base = b + 1; |
802 |
> |
return t; |
803 |
> |
} |
804 |
> |
} |
805 |
> |
return null; |
806 |
> |
} |
807 |
|
|
808 |
< |
/** |
809 |
< |
* Encoded record of top of treiber stack of threads waiting for |
810 |
< |
* events. The top 32 bits contain the count being waited for. The |
811 |
< |
* bottom 16 bits contains one plus the pool index of waiting |
812 |
< |
* worker thread. (Bits 16-31 are unused.) |
813 |
< |
*/ |
814 |
< |
private volatile long eventWaiters; |
808 |
> |
/** |
809 |
> |
* Takes next task, if one exists, in LIFO order. |
810 |
> |
* Call only by owner in unshared queues. |
811 |
> |
*/ |
812 |
> |
final ForkJoinTask<?> pop() { |
813 |
> |
ForkJoinTask<?> t; int m; |
814 |
> |
ForkJoinTask<?>[] a = array; |
815 |
> |
if (a != null && (m = a.length - 1) >= 0) { |
816 |
> |
for (int s; (s = top - 1) - base >= 0;) { |
817 |
> |
int j = ((m & s) << ASHIFT) + ABASE; |
818 |
> |
if ((t = (ForkJoinTask<?>)U.getObjectVolatile(a, j)) == null) |
819 |
> |
break; |
820 |
> |
if (U.compareAndSwapObject(a, j, t, null)) { |
821 |
> |
top = s; |
822 |
> |
return t; |
823 |
> |
} |
824 |
> |
} |
825 |
> |
} |
826 |
> |
return null; |
827 |
> |
} |
828 |
|
|
829 |
< |
private static final int EVENT_COUNT_SHIFT = 32; |
830 |
< |
private static final long WAITER_ID_MASK = (1L << 16) - 1L; |
829 |
> |
/** |
830 |
> |
* Takes next task, if one exists, in order specified by mode. |
831 |
> |
*/ |
832 |
> |
final ForkJoinTask<?> nextLocalTask() { |
833 |
> |
return mode == 0 ? pop() : poll(); |
834 |
> |
} |
835 |
|
|
836 |
< |
/** |
837 |
< |
* A counter for events that may wake up worker threads: |
838 |
< |
* - Submission of a new task to the pool |
839 |
< |
* - A worker pushing a task on an empty queue |
840 |
< |
* - termination |
841 |
< |
*/ |
842 |
< |
private volatile int eventCount; |
836 |
> |
/** |
837 |
> |
* Returns next task, if one exists, in order specified by mode. |
838 |
> |
*/ |
839 |
> |
final ForkJoinTask<?> peek() { |
840 |
> |
ForkJoinTask<?>[] a = array; int m; |
841 |
> |
if (a == null || (m = a.length - 1) < 0) |
842 |
> |
return null; |
843 |
> |
int i = mode == 0 ? top - 1 : base; |
844 |
> |
int j = ((i & m) << ASHIFT) + ABASE; |
845 |
> |
return (ForkJoinTask<?>)U.getObjectVolatile(a, j); |
846 |
> |
} |
847 |
|
|
848 |
< |
/** |
849 |
< |
* Encoded record of top of treiber stack of spare threads waiting |
850 |
< |
* for resumption. The top 16 bits contain an arbitrary count to |
851 |
< |
* avoid ABA effects. The bottom 16bits contains one plus the pool |
852 |
< |
* index of waiting worker thread. |
853 |
< |
*/ |
854 |
< |
private volatile int spareWaiters; |
848 |
> |
/** |
849 |
> |
* Returns task at index b if b is current base of queue. |
850 |
> |
*/ |
851 |
> |
final ForkJoinTask<?> pollAt(int b) { |
852 |
> |
ForkJoinTask<?>[] a; int i; |
853 |
> |
ForkJoinTask<?> task = null; |
854 |
> |
if ((a = array) != null && (i = ((a.length - 1) & b)) >= 0) { |
855 |
> |
int j = (i << ASHIFT) + ABASE; |
856 |
> |
ForkJoinTask<?> t = (ForkJoinTask<?>)U.getObjectVolatile(a, j); |
857 |
> |
if (t != null && base == b && |
858 |
> |
U.compareAndSwapObject(a, j, t, null)) { |
859 |
> |
base = b + 1; |
860 |
> |
task = t; |
861 |
> |
} |
862 |
> |
} |
863 |
> |
return task; |
864 |
> |
} |
865 |
|
|
866 |
< |
private static final int SPARE_COUNT_SHIFT = 16; |
867 |
< |
private static final int SPARE_ID_MASK = (1 << 16) - 1; |
866 |
> |
/** |
867 |
> |
* Pops the given task only if it is at the current top. |
868 |
> |
*/ |
869 |
> |
final boolean tryUnpush(ForkJoinTask<?> t) { |
870 |
> |
ForkJoinTask<?>[] a; int s; |
871 |
> |
if ((a = array) != null && (s = top) != base && |
872 |
> |
U.compareAndSwapObject |
873 |
> |
(a, (((a.length - 1) & --s) << ASHIFT) + ABASE, t, null)) { |
874 |
> |
top = s; |
875 |
> |
return true; |
876 |
> |
} |
877 |
> |
return false; |
878 |
> |
} |
879 |
|
|
880 |
< |
/** |
881 |
< |
* Lifecycle control. The low word contains the number of workers |
882 |
< |
* that are (probably) executing tasks. This value is atomically |
883 |
< |
* incremented before a worker gets a task to run, and decremented |
884 |
< |
* when worker has no tasks and cannot find any. Bits 16-18 |
885 |
< |
* contain runLevel value. When all are zero, the pool is |
886 |
< |
* running. Level transitions are monotonic (running -> shutdown |
887 |
< |
* -> terminating -> terminated) so each transition adds a bit. |
888 |
< |
* These are bundled together to ensure consistent read for |
889 |
< |
* termination checks (i.e., that runLevel is at least SHUTDOWN |
890 |
< |
* and active threads is zero). |
891 |
< |
* |
892 |
< |
* Notes: Most direct CASes are dependent on these bitfield |
893 |
< |
* positions. Also, this field is non-private to enable direct |
894 |
< |
* performance-sensitive CASes in ForkJoinWorkerThread. |
895 |
< |
*/ |
531 |
< |
volatile int runState; |
880 |
> |
/** |
881 |
> |
* Polls the given task only if it is at the current base. |
882 |
> |
*/ |
883 |
> |
final boolean pollFor(ForkJoinTask<?> task) { |
884 |
> |
ForkJoinTask<?>[] a; int b, i; |
885 |
> |
if ((b = base) - top < 0 && (a = array) != null && |
886 |
> |
(i = (a.length - 1) & b) >= 0) { |
887 |
> |
int j = (i << ASHIFT) + ABASE; |
888 |
> |
if (U.getObjectVolatile(a, j) == task && base == b && |
889 |
> |
U.compareAndSwapObject(a, j, task, null)) { |
890 |
> |
base = b + 1; |
891 |
> |
return true; |
892 |
> |
} |
893 |
> |
} |
894 |
> |
return false; |
895 |
> |
} |
896 |
|
|
897 |
< |
// Note: The order among run level values matters. |
898 |
< |
private static final int RUNLEVEL_SHIFT = 16; |
899 |
< |
private static final int SHUTDOWN = 1 << RUNLEVEL_SHIFT; |
900 |
< |
private static final int TERMINATING = 1 << (RUNLEVEL_SHIFT + 1); |
901 |
< |
private static final int TERMINATED = 1 << (RUNLEVEL_SHIFT + 2); |
902 |
< |
private static final int ACTIVE_COUNT_MASK = (1 << RUNLEVEL_SHIFT) - 1; |
897 |
> |
/** |
898 |
> |
* If present, removes from queue and executes the given task, or |
899 |
> |
* any other cancelled task. Returns (true) immediately on any CAS |
900 |
> |
* or consistency check failure so caller can retry. |
901 |
> |
* |
902 |
> |
* @return false if no progress can be made |
903 |
> |
*/ |
904 |
> |
final boolean tryRemoveAndExec(ForkJoinTask<?> task) { |
905 |
> |
boolean removed = false, empty = true, progress = true; |
906 |
> |
ForkJoinTask<?>[] a; int m, s, b, n; |
907 |
> |
if ((a = array) != null && (m = a.length - 1) >= 0 && |
908 |
> |
(n = (s = top) - (b = base)) > 0) { |
909 |
> |
for (ForkJoinTask<?> t;;) { // traverse from s to b |
910 |
> |
int j = ((--s & m) << ASHIFT) + ABASE; |
911 |
> |
t = (ForkJoinTask<?>)U.getObjectVolatile(a, j); |
912 |
> |
if (t == null) // inconsistent length |
913 |
> |
break; |
914 |
> |
else if (t == task) { |
915 |
> |
if (s + 1 == top) { // pop |
916 |
> |
if (!U.compareAndSwapObject(a, j, task, null)) |
917 |
> |
break; |
918 |
> |
top = s; |
919 |
> |
removed = true; |
920 |
> |
} |
921 |
> |
else if (base == b) // replace with proxy |
922 |
> |
removed = U.compareAndSwapObject(a, j, task, |
923 |
> |
new EmptyTask()); |
924 |
> |
break; |
925 |
> |
} |
926 |
> |
else if (t.status >= 0) |
927 |
> |
empty = false; |
928 |
> |
else if (s + 1 == top) { // pop and throw away |
929 |
> |
if (U.compareAndSwapObject(a, j, t, null)) |
930 |
> |
top = s; |
931 |
> |
break; |
932 |
> |
} |
933 |
> |
if (--n == 0) { |
934 |
> |
if (!empty && base == b) |
935 |
> |
progress = false; |
936 |
> |
break; |
937 |
> |
} |
938 |
> |
} |
939 |
> |
} |
940 |
> |
if (removed) |
941 |
> |
task.doExec(); |
942 |
> |
return progress; |
943 |
> |
} |
944 |
|
|
945 |
< |
/** |
946 |
< |
* Holds number of total (i.e., created and not yet terminated) |
947 |
< |
* and running (i.e., not blocked on joins or other managed sync) |
948 |
< |
* threads, packed together to ensure consistent snapshot when |
949 |
< |
* making decisions about creating and suspending spare |
950 |
< |
* threads. Updated only by CAS. Note that adding a new worker |
951 |
< |
* requires incrementing both counts, since workers start off in |
952 |
< |
* running state. |
953 |
< |
*/ |
954 |
< |
private volatile int workerCounts; |
945 |
> |
/** |
946 |
> |
* Initializes or doubles the capacity of array. Call either |
947 |
> |
* by owner or with lock held -- it is OK for base, but not |
948 |
> |
* top, to move while resizings are in progress. |
949 |
> |
* |
950 |
> |
* @param rejectOnFailure if true, throw exception if capacity |
951 |
> |
* exceeded (relayed ultimately to user); else return null. |
952 |
> |
*/ |
953 |
> |
final ForkJoinTask<?>[] growArray(boolean rejectOnFailure) { |
954 |
> |
ForkJoinTask<?>[] oldA = array; |
955 |
> |
int size = oldA != null ? oldA.length << 1 : INITIAL_QUEUE_CAPACITY; |
956 |
> |
if (size <= MAXIMUM_QUEUE_CAPACITY) { |
957 |
> |
int oldMask, t, b; |
958 |
> |
ForkJoinTask<?>[] a = array = new ForkJoinTask<?>[size]; |
959 |
> |
if (oldA != null && (oldMask = oldA.length - 1) >= 0 && |
960 |
> |
(t = top) - (b = base) > 0) { |
961 |
> |
int mask = size - 1; |
962 |
> |
do { |
963 |
> |
ForkJoinTask<?> x; |
964 |
> |
int oldj = ((b & oldMask) << ASHIFT) + ABASE; |
965 |
> |
int j = ((b & mask) << ASHIFT) + ABASE; |
966 |
> |
x = (ForkJoinTask<?>)U.getObjectVolatile(oldA, oldj); |
967 |
> |
if (x != null && |
968 |
> |
U.compareAndSwapObject(oldA, oldj, x, null)) |
969 |
> |
U.putObjectVolatile(a, j, x); |
970 |
> |
} while (++b != t); |
971 |
> |
} |
972 |
> |
return a; |
973 |
> |
} |
974 |
> |
else if (!rejectOnFailure) |
975 |
> |
return null; |
976 |
> |
else |
977 |
> |
throw new RejectedExecutionException("Queue capacity exceeded"); |
978 |
> |
} |
979 |
|
|
980 |
< |
private static final int TOTAL_COUNT_SHIFT = 16; |
981 |
< |
private static final int RUNNING_COUNT_MASK = (1 << TOTAL_COUNT_SHIFT) - 1; |
982 |
< |
private static final int ONE_RUNNING = 1; |
983 |
< |
private static final int ONE_TOTAL = 1 << TOTAL_COUNT_SHIFT; |
980 |
> |
/** |
981 |
> |
* Removes and cancels all known tasks, ignoring any exceptions |
982 |
> |
*/ |
983 |
> |
final void cancelAll() { |
984 |
> |
ForkJoinTask.cancelIgnoringExceptions(currentJoin); |
985 |
> |
ForkJoinTask.cancelIgnoringExceptions(currentSteal); |
986 |
> |
for (ForkJoinTask<?> t; (t = poll()) != null; ) |
987 |
> |
ForkJoinTask.cancelIgnoringExceptions(t); |
988 |
> |
} |
989 |
|
|
990 |
< |
/** |
557 |
< |
* The target parallelism level. |
558 |
< |
* Accessed directly by ForkJoinWorkerThreads. |
559 |
< |
*/ |
560 |
< |
final int parallelism; |
990 |
> |
// Execution methods |
991 |
|
|
992 |
< |
/** |
993 |
< |
* True if use local fifo, not default lifo, for local polling |
994 |
< |
* Read by, and replicated by ForkJoinWorkerThreads |
995 |
< |
*/ |
996 |
< |
final boolean locallyFifo; |
992 |
> |
/** |
993 |
> |
* Removes and runs tasks until empty, using local mode |
994 |
> |
* ordering. |
995 |
> |
*/ |
996 |
> |
final void runLocalTasks() { |
997 |
> |
if (base - top < 0) { |
998 |
> |
for (ForkJoinTask<?> t; (t = nextLocalTask()) != null; ) |
999 |
> |
t.doExec(); |
1000 |
> |
} |
1001 |
> |
} |
1002 |
|
|
1003 |
< |
/** |
1004 |
< |
* The uncaught exception handler used when any worker abruptly |
1005 |
< |
* terminates. |
1006 |
< |
*/ |
1007 |
< |
private final Thread.UncaughtExceptionHandler ueh; |
1003 |
> |
/** |
1004 |
> |
* Executes a top-level task and any local tasks remaining |
1005 |
> |
* after execution. |
1006 |
> |
* |
1007 |
> |
* @return true unless terminating |
1008 |
> |
*/ |
1009 |
> |
final boolean runTask(ForkJoinTask<?> t) { |
1010 |
> |
boolean alive = true; |
1011 |
> |
if (t != null) { |
1012 |
> |
currentSteal = t; |
1013 |
> |
t.doExec(); |
1014 |
> |
runLocalTasks(); |
1015 |
> |
++nsteals; |
1016 |
> |
currentSteal = null; |
1017 |
> |
} |
1018 |
> |
else if (runState < 0) // terminating |
1019 |
> |
alive = false; |
1020 |
> |
return alive; |
1021 |
> |
} |
1022 |
|
|
1023 |
< |
/** |
1024 |
< |
* Pool number, just for assigning useful names to worker threads |
1025 |
< |
*/ |
1026 |
< |
private final int poolNumber; |
1023 |
> |
/** |
1024 |
> |
* Executes a non-top-level (stolen) task |
1025 |
> |
*/ |
1026 |
> |
final void runSubtask(ForkJoinTask<?> t) { |
1027 |
> |
if (t != null) { |
1028 |
> |
ForkJoinTask<?> ps = currentSteal; |
1029 |
> |
currentSteal = t; |
1030 |
> |
t.doExec(); |
1031 |
> |
currentSteal = ps; |
1032 |
> |
} |
1033 |
> |
} |
1034 |
|
|
1035 |
< |
// Utilities for CASing fields. Note that most of these |
1036 |
< |
// are usually manually inlined by callers |
1035 |
> |
/** |
1036 |
> |
* Computes next value for random probes. Scans don't require |
1037 |
> |
* a very high quality generator, but also not a crummy one. |
1038 |
> |
* Marsaglia xor-shift is cheap and works well enough. Note: |
1039 |
> |
* This is manually inlined in several usages in ForkJoinPool |
1040 |
> |
* to avoid writes inside busy scan loops. |
1041 |
> |
*/ |
1042 |
> |
final int nextSeed() { |
1043 |
> |
int r = seed; |
1044 |
> |
r ^= r << 13; |
1045 |
> |
r ^= r >>> 17; |
1046 |
> |
r ^= r << 5; |
1047 |
> |
return seed = r; |
1048 |
> |
} |
1049 |
|
|
1050 |
< |
/** |
1051 |
< |
* Increments running count part of workerCounts |
1052 |
< |
*/ |
1053 |
< |
final void incrementRunningCount() { |
1054 |
< |
int c; |
1055 |
< |
do {} while (!UNSAFE.compareAndSwapInt(this, workerCountsOffset, |
1056 |
< |
c = workerCounts, |
1057 |
< |
c + ONE_RUNNING)); |
1050 |
> |
// Unsafe mechanics |
1051 |
> |
private static final sun.misc.Unsafe U; |
1052 |
> |
private static final long RUNSTATE; |
1053 |
> |
private static final int ABASE; |
1054 |
> |
private static final int ASHIFT; |
1055 |
> |
static { |
1056 |
> |
int s; |
1057 |
> |
try { |
1058 |
> |
U = getUnsafe(); |
1059 |
> |
Class<?> k = WorkQueue.class; |
1060 |
> |
Class<?> ak = ForkJoinTask[].class; |
1061 |
> |
RUNSTATE = U.objectFieldOffset |
1062 |
> |
(k.getDeclaredField("runState")); |
1063 |
> |
ABASE = U.arrayBaseOffset(ak); |
1064 |
> |
s = U.arrayIndexScale(ak); |
1065 |
> |
} catch (Exception e) { |
1066 |
> |
throw new Error(e); |
1067 |
> |
} |
1068 |
> |
if ((s & (s-1)) != 0) |
1069 |
> |
throw new Error("data type scale not a power of two"); |
1070 |
> |
ASHIFT = 31 - Integer.numberOfLeadingZeros(s); |
1071 |
> |
} |
1072 |
|
} |
1073 |
|
|
1074 |
|
/** |
1075 |
< |
* Tries to decrement running count unless already zero |
1075 |
> |
* Class for artificial tasks that are used to replace the target |
1076 |
> |
* of local joins if they are removed from an interior queue slot |
1077 |
> |
* in WorkQueue.tryRemoveAndExec. We don't need the proxy to |
1078 |
> |
* actually do anything beyond having a unique identity. |
1079 |
|
*/ |
1080 |
< |
final boolean tryDecrementRunningCount() { |
1081 |
< |
int wc = workerCounts; |
1082 |
< |
if ((wc & RUNNING_COUNT_MASK) == 0) |
1083 |
< |
return false; |
1084 |
< |
return UNSAFE.compareAndSwapInt(this, workerCountsOffset, |
600 |
< |
wc, wc - ONE_RUNNING); |
1080 |
> |
static final class EmptyTask extends ForkJoinTask<Void> { |
1081 |
> |
EmptyTask() { status = ForkJoinTask.NORMAL; } // force done |
1082 |
> |
public Void getRawResult() { return null; } |
1083 |
> |
public void setRawResult(Void x) {} |
1084 |
> |
public boolean exec() { return true; } |
1085 |
|
} |
1086 |
|
|
1087 |
|
/** |
1088 |
< |
* Forces decrement of encoded workerCounts, awaiting nonzero if |
1089 |
< |
* (rarely) necessary when other count updates lag. |
1090 |
< |
* |
607 |
< |
* @param dr -- either zero or ONE_RUNNING |
608 |
< |
* @param dt == either zero or ONE_TOTAL |
1088 |
> |
* Computes a hash code for the given thread. This method is |
1089 |
> |
* expected to provide higher-quality hash codes than those using |
1090 |
> |
* method hashCode(). |
1091 |
|
*/ |
1092 |
< |
private void decrementWorkerCounts(int dr, int dt) { |
1093 |
< |
for (;;) { |
1094 |
< |
int wc = workerCounts; |
1095 |
< |
if ((wc & RUNNING_COUNT_MASK) - dr < 0 || |
1096 |
< |
(wc >>> TOTAL_COUNT_SHIFT) - dt < 0) { |
1097 |
< |
if ((runState & TERMINATED) != 0) |
1098 |
< |
return; // lagging termination on a backout |
1099 |
< |
Thread.yield(); |
618 |
< |
} |
619 |
< |
if (UNSAFE.compareAndSwapInt(this, workerCountsOffset, |
620 |
< |
wc, wc - (dr + dt))) |
621 |
< |
return; |
622 |
< |
} |
1092 |
> |
static final int hashThread(Thread t) { |
1093 |
> |
long id = (t == null) ? 0L : t.getId(); // Use MurmurHash of thread id |
1094 |
> |
int h = (int)id ^ (int)(id >>> 32); |
1095 |
> |
h ^= h >>> 16; |
1096 |
> |
h *= 0x85ebca6b; |
1097 |
> |
h ^= h >>> 13; |
1098 |
> |
h *= 0xc2b2ae35; |
1099 |
> |
return h ^ (h >>> 16); |
1100 |
|
} |
1101 |
|
|
1102 |
|
/** |
1103 |
< |
* Tries decrementing active count; fails on contention. |
627 |
< |
* Called when workers cannot find tasks to run. |
1103 |
> |
* Top-level runloop for workers |
1104 |
|
*/ |
1105 |
< |
final boolean tryDecrementActiveCount() { |
1106 |
< |
int c; |
1107 |
< |
return UNSAFE.compareAndSwapInt(this, runStateOffset, |
1108 |
< |
c = runState, c - 1); |
633 |
< |
} |
1105 |
> |
final void runWorker(ForkJoinWorkerThread wt) { |
1106 |
> |
WorkQueue w = wt.workQueue; |
1107 |
> |
w.growArray(false); // Initialize queue array and seed in this thread |
1108 |
> |
w.seed = hashThread(Thread.currentThread()) | (1 << 31); // force < 0 |
1109 |
|
|
1110 |
< |
/** |
636 |
< |
* Advances to at least the given level. Returns true if not |
637 |
< |
* already in at least the given level. |
638 |
< |
*/ |
639 |
< |
private boolean advanceRunLevel(int level) { |
640 |
< |
for (;;) { |
641 |
< |
int s = runState; |
642 |
< |
if ((s & level) != 0) |
643 |
< |
return false; |
644 |
< |
if (UNSAFE.compareAndSwapInt(this, runStateOffset, s, s | level)) |
645 |
< |
return true; |
646 |
< |
} |
1110 |
> |
do {} while (w.runTask(scan(w))); |
1111 |
|
} |
1112 |
|
|
1113 |
< |
// workers array maintenance |
1113 |
> |
// Creating, registering and deregistering workers |
1114 |
|
|
1115 |
|
/** |
1116 |
< |
* Records and returns a workers array index for new worker. |
1116 |
> |
* Tries to create and start a worker |
1117 |
|
*/ |
1118 |
< |
private int recordWorker(ForkJoinWorkerThread w) { |
1119 |
< |
// Try using slot totalCount-1. If not available, scan and/or resize |
1120 |
< |
int k = (workerCounts >>> TOTAL_COUNT_SHIFT) - 1; |
657 |
< |
final ReentrantLock lock = this.workerLock; |
658 |
< |
lock.lock(); |
1118 |
> |
private void addWorker() { |
1119 |
> |
Throwable ex = null; |
1120 |
> |
ForkJoinWorkerThread w = null; |
1121 |
|
try { |
1122 |
< |
ForkJoinWorkerThread[] ws = workers; |
1123 |
< |
int n = ws.length; |
1124 |
< |
if (k < 0 || k >= n || ws[k] != null) { |
663 |
< |
for (k = 0; k < n && ws[k] != null; ++k) |
664 |
< |
; |
665 |
< |
if (k == n) |
666 |
< |
ws = Arrays.copyOf(ws, n << 1); |
1122 |
> |
if ((w = factory.newThread(this)) != null) { |
1123 |
> |
w.start(); |
1124 |
> |
return; |
1125 |
|
} |
1126 |
< |
ws[k] = w; |
1127 |
< |
workers = ws; // volatile array write ensures slot visibility |
670 |
< |
} finally { |
671 |
< |
lock.unlock(); |
1126 |
> |
} catch (Throwable e) { |
1127 |
> |
ex = e; |
1128 |
|
} |
1129 |
< |
return k; |
1129 |
> |
deregisterWorker(w, ex); |
1130 |
> |
} |
1131 |
> |
|
1132 |
> |
/** |
1133 |
> |
* Callback from ForkJoinWorkerThread constructor to assign a |
1134 |
> |
* public name. This must be separate from registerWorker because |
1135 |
> |
* it is called during the "super" constructor call in |
1136 |
> |
* ForkJoinWorkerThread. |
1137 |
> |
*/ |
1138 |
> |
final String nextWorkerName() { |
1139 |
> |
return workerNamePrefix.concat |
1140 |
> |
(Integer.toString(nextWorkerNumber.addAndGet(1))); |
1141 |
|
} |
1142 |
|
|
1143 |
|
/** |
1144 |
< |
* Nulls out record of worker in workers array |
1144 |
> |
* Callback from ForkJoinWorkerThread constructor to establish and |
1145 |
> |
* record its WorkQueue |
1146 |
> |
* |
1147 |
> |
* @param wt the worker thread |
1148 |
|
*/ |
1149 |
< |
private void forgetWorker(ForkJoinWorkerThread w) { |
1150 |
< |
int idx = w.poolIndex; |
1151 |
< |
// Locking helps method recordWorker avoid unecessary expansion |
682 |
< |
final ReentrantLock lock = this.workerLock; |
1149 |
> |
final void registerWorker(ForkJoinWorkerThread wt) { |
1150 |
> |
WorkQueue w = wt.workQueue; |
1151 |
> |
ReentrantLock lock = this.lock; |
1152 |
|
lock.lock(); |
1153 |
|
try { |
1154 |
< |
ForkJoinWorkerThread[] ws = workers; |
1155 |
< |
if (idx >= 0 && idx < ws.length && ws[idx] == w) // verify |
1156 |
< |
ws[idx] = null; |
1154 |
> |
int k = nextPoolIndex; |
1155 |
> |
WorkQueue[] ws = workQueues; |
1156 |
> |
if (ws != null) { // ignore on shutdown |
1157 |
> |
int n = ws.length; |
1158 |
> |
if (k < 0 || (k & 1) == 0 || k >= n || ws[k] != null) { |
1159 |
> |
for (k = 1; k < n && ws[k] != null; k += 2) |
1160 |
> |
; // workers are at odd indices |
1161 |
> |
if (k >= n) // resize |
1162 |
> |
workQueues = ws = Arrays.copyOf(ws, n << 1); |
1163 |
> |
} |
1164 |
> |
w.poolIndex = k; |
1165 |
> |
w.eventCount = ~(k >>> 1) & SMASK; // Set up wait count |
1166 |
> |
ws[k] = w; // record worker |
1167 |
> |
nextPoolIndex = k + 2; |
1168 |
> |
int rs = runState; |
1169 |
> |
int m = rs & SMASK; // recalculate runState mask |
1170 |
> |
if (k > m) |
1171 |
> |
m = (m << 1) + 1; |
1172 |
> |
runState = (rs & SHUTDOWN) | ((rs + RS_SEQ) & RS_SEQ_MASK) | m; |
1173 |
> |
} |
1174 |
|
} finally { |
1175 |
|
lock.unlock(); |
1176 |
|
} |
1177 |
|
} |
1178 |
|
|
1179 |
|
/** |
1180 |
< |
* Final callback from terminating worker. Removes record of |
1180 |
> |
* Final callback from terminating worker, as well as failure to |
1181 |
> |
* construct or start a worker in addWorker. Removes record of |
1182 |
|
* worker from array, and adjusts counts. If pool is shutting |
1183 |
< |
* down, tries to complete terminatation. |
1183 |
> |
* down, tries to complete termination. |
1184 |
|
* |
1185 |
< |
* @param w the worker |
1185 |
> |
* @param wt the worker thread or null if addWorker failed |
1186 |
> |
* @param ex the exception causing failure, or null if none |
1187 |
|
*/ |
1188 |
< |
final void workerTerminated(ForkJoinWorkerThread w) { |
1189 |
< |
forgetWorker(w); |
1190 |
< |
decrementWorkerCounts(w.isTrimmed()? 0 : ONE_RUNNING, ONE_TOTAL); |
1191 |
< |
while (w.stealCount != 0) // collect final count |
1192 |
< |
tryAccumulateStealCount(w); |
1193 |
< |
tryTerminate(false); |
1194 |
< |
} |
1188 |
> |
final void deregisterWorker(ForkJoinWorkerThread wt, Throwable ex) { |
1189 |
> |
WorkQueue w = null; |
1190 |
> |
if (wt != null && (w = wt.workQueue) != null) { |
1191 |
> |
w.runState = -1; // ensure runState is set |
1192 |
> |
stealCount.getAndAdd(w.totalSteals + w.nsteals); |
1193 |
> |
int idx = w.poolIndex; |
1194 |
> |
ReentrantLock lock = this.lock; |
1195 |
> |
lock.lock(); |
1196 |
> |
try { // remove record from array |
1197 |
> |
WorkQueue[] ws = workQueues; |
1198 |
> |
if (ws != null && idx >= 0 && idx < ws.length && ws[idx] == w) |
1199 |
> |
ws[nextPoolIndex = idx] = null; |
1200 |
> |
} finally { |
1201 |
> |
lock.unlock(); |
1202 |
> |
} |
1203 |
> |
} |
1204 |
|
|
1205 |
< |
// Waiting for and signalling events |
1205 |
> |
long c; // adjust ctl counts |
1206 |
> |
do {} while (!U.compareAndSwapLong |
1207 |
> |
(this, CTL, c = ctl, (((c - AC_UNIT) & AC_MASK) | |
1208 |
> |
((c - TC_UNIT) & TC_MASK) | |
1209 |
> |
(c & ~(AC_MASK|TC_MASK))))); |
1210 |
|
|
1211 |
< |
/** |
1212 |
< |
* Releases workers blocked on a count not equal to current count. |
1213 |
< |
* Normally called after precheck that eventWaiters isn't zero to |
1214 |
< |
* avoid wasted array checks. Gives up upon a change in count or |
714 |
< |
* upon releasing two workers, letting others take over. |
715 |
< |
*/ |
716 |
< |
private void releaseEventWaiters() { |
717 |
< |
ForkJoinWorkerThread[] ws = workers; |
718 |
< |
int n = ws.length; |
719 |
< |
long h = eventWaiters; |
720 |
< |
int ec = eventCount; |
721 |
< |
boolean releasedOne = false; |
722 |
< |
ForkJoinWorkerThread w; int id; |
723 |
< |
while ((id = ((int)(h & WAITER_ID_MASK)) - 1) >= 0 && |
724 |
< |
(int)(h >>> EVENT_COUNT_SHIFT) != ec && |
725 |
< |
id < n && (w = ws[id]) != null) { |
726 |
< |
if (UNSAFE.compareAndSwapLong(this, eventWaitersOffset, |
727 |
< |
h, w.nextWaiter)) { |
728 |
< |
LockSupport.unpark(w); |
729 |
< |
if (releasedOne) // exit on second release |
730 |
< |
break; |
731 |
< |
releasedOne = true; |
732 |
< |
} |
733 |
< |
if (eventCount != ec) |
734 |
< |
break; |
735 |
< |
h = eventWaiters; |
1211 |
> |
if (!tryTerminate(false) && w != null) { |
1212 |
> |
w.cancelAll(); // cancel remaining tasks |
1213 |
> |
if (w.array != null) // suppress signal if never ran |
1214 |
> |
signalWork(); // wake up or create replacement |
1215 |
|
} |
1216 |
+ |
|
1217 |
+ |
if (ex != null) // rethrow |
1218 |
+ |
U.throwException(ex); |
1219 |
|
} |
1220 |
|
|
1221 |
+ |
|
1222 |
+ |
// Maintaining ctl counts |
1223 |
+ |
|
1224 |
|
/** |
1225 |
< |
* Tries to advance eventCount and releases waiters. Called only |
741 |
< |
* from workers. |
1225 |
> |
* Increments active count; mainly called upon return from blocking |
1226 |
|
*/ |
1227 |
< |
final void signalWork() { |
1228 |
< |
int c; // try to increment event count -- CAS failure OK |
1229 |
< |
UNSAFE.compareAndSwapInt(this, eventCountOffset, c = eventCount, c+1); |
746 |
< |
if (eventWaiters != 0L) |
747 |
< |
releaseEventWaiters(); |
1227 |
> |
final void incrementActiveCount() { |
1228 |
> |
long c; |
1229 |
> |
do {} while (!U.compareAndSwapLong(this, CTL, c = ctl, c + AC_UNIT)); |
1230 |
|
} |
1231 |
|
|
1232 |
|
/** |
1233 |
< |
* Adds the given worker to event queue and blocks until |
1234 |
< |
* terminating or event count advances from the given value |
1235 |
< |
* |
1236 |
< |
* @param w the calling worker thread |
1237 |
< |
* @param ec the count |
1238 |
< |
*/ |
1239 |
< |
private void eventSync(ForkJoinWorkerThread w, int ec) { |
1240 |
< |
long nh = (((long)ec) << EVENT_COUNT_SHIFT) | ((long)(w.poolIndex+1)); |
1241 |
< |
long h; |
1242 |
< |
while ((runState < SHUTDOWN || !tryTerminate(false)) && |
1243 |
< |
(((int)((h = eventWaiters) & WAITER_ID_MASK)) == 0 || |
1244 |
< |
(int)(h >>> EVENT_COUNT_SHIFT) == ec) && |
1245 |
< |
eventCount == ec) { |
1246 |
< |
if (UNSAFE.compareAndSwapLong(this, eventWaitersOffset, |
1247 |
< |
w.nextWaiter = h, nh)) { |
1248 |
< |
awaitEvent(w, ec); |
1249 |
< |
break; |
1233 |
> |
* Activates or creates a worker |
1234 |
> |
*/ |
1235 |
> |
final void signalWork() { |
1236 |
> |
/* |
1237 |
> |
* The while condition is true if: (there is are too few total |
1238 |
> |
* workers OR there is at least one waiter) AND (there are too |
1239 |
> |
* few active workers OR the pool is terminating). The value |
1240 |
> |
* of e distinguishes the remaining cases: zero (no waiters) |
1241 |
> |
* for create, negative if terminating (in which case do |
1242 |
> |
* nothing), else release a waiter. The secondary checks for |
1243 |
> |
* release (non-null array etc) can fail if the pool begins |
1244 |
> |
* terminating after the test, and don't impose any added cost |
1245 |
> |
* because JVMs must perform null and bounds checks anyway. |
1246 |
> |
*/ |
1247 |
> |
long c; int e, u; |
1248 |
> |
while ((((e = (int)(c = ctl)) | (u = (int)(c >>> 32))) & |
1249 |
> |
(INT_SIGN|SHORT_SIGN)) == (INT_SIGN|SHORT_SIGN)) { |
1250 |
> |
WorkQueue[] ws = workQueues; int i; WorkQueue w; Thread p; |
1251 |
> |
if (e == 0) { // add a new worker |
1252 |
> |
if (U.compareAndSwapLong |
1253 |
> |
(this, CTL, c, (long)(((u + UTC_UNIT) & UTC_MASK) | |
1254 |
> |
((u + UAC_UNIT) & UAC_MASK)) << 32)) { |
1255 |
> |
addWorker(); |
1256 |
> |
break; |
1257 |
> |
} |
1258 |
> |
} |
1259 |
> |
else if (e > 0 && ws != null && |
1260 |
> |
(i = ((~e << 1) | 1) & SMASK) < ws.length && |
1261 |
> |
(w = ws[i]) != null && |
1262 |
> |
w.eventCount == (e | INT_SIGN)) { |
1263 |
> |
if (U.compareAndSwapLong |
1264 |
> |
(this, CTL, c, (((long)(w.nextWait & E_MASK)) | |
1265 |
> |
((long)(u + UAC_UNIT) << 32)))) { |
1266 |
> |
w.eventCount = (e + E_SEQ) & E_MASK; |
1267 |
> |
if ((p = w.parker) != null) |
1268 |
> |
U.unpark(p); // release a waiting worker |
1269 |
> |
break; |
1270 |
> |
} |
1271 |
|
} |
1272 |
+ |
else |
1273 |
+ |
break; |
1274 |
|
} |
1275 |
|
} |
1276 |
|
|
1277 |
|
/** |
1278 |
< |
* Blocks the given worker (that has already been entered as an |
1279 |
< |
* event waiter) until terminating or event count advances from |
1280 |
< |
* the given value. The oldest (first) waiter uses a timed wait to |
1281 |
< |
* occasionally one-by-one shrink the number of workers (to a |
1282 |
< |
* minimum of one) if the pool has not been used for extended |
1283 |
< |
* periods. |
1284 |
< |
* |
1285 |
< |
* @param w the calling worker thread |
1286 |
< |
* @param ec the count |
1287 |
< |
*/ |
1288 |
< |
private void awaitEvent(ForkJoinWorkerThread w, int ec) { |
1289 |
< |
while (eventCount == ec) { |
1290 |
< |
if (tryAccumulateStealCount(w)) { // transfer while idle |
1291 |
< |
boolean untimed = (w.nextWaiter != 0L || |
1292 |
< |
(workerCounts & RUNNING_COUNT_MASK) <= 1); |
1293 |
< |
long startTime = untimed? 0 : System.nanoTime(); |
1294 |
< |
Thread.interrupted(); // clear/ignore interrupt |
1295 |
< |
if (eventCount != ec || w.runState != 0 || |
1296 |
< |
runState >= TERMINATING) // recheck after clear |
1297 |
< |
break; |
1298 |
< |
if (untimed) |
1299 |
< |
LockSupport.park(w); |
1300 |
< |
else { |
1301 |
< |
LockSupport.parkNanos(w, SHRINK_RATE_NANOS); |
1302 |
< |
if (eventCount != ec || w.runState != 0 || |
1303 |
< |
runState >= TERMINATING) |
1304 |
< |
break; |
1305 |
< |
if (System.nanoTime() - startTime >= SHRINK_RATE_NANOS) |
1306 |
< |
tryShutdownUnusedWorker(ec); |
1278 |
> |
* Tries to decrement active count (sometimes implicitly) and |
1279 |
> |
* possibly release or create a compensating worker in preparation |
1280 |
> |
* for blocking. Fails on contention or termination. |
1281 |
> |
* |
1282 |
> |
* @return true if the caller can block, else should recheck and retry |
1283 |
> |
*/ |
1284 |
> |
final boolean tryCompensate() { |
1285 |
> |
WorkQueue[] ws; WorkQueue w; Thread p; |
1286 |
> |
int pc = parallelism, e, u, ac, tc, i; |
1287 |
> |
long c = ctl; |
1288 |
> |
|
1289 |
> |
if ((e = (int)c) >= 0) { |
1290 |
> |
if ((ac = ((u = (int)(c >>> 32)) >> UAC_SHIFT)) <= 0 && |
1291 |
> |
e != 0 && (ws = workQueues) != null && |
1292 |
> |
(i = ((~e << 1) | 1) & SMASK) < ws.length && |
1293 |
> |
(w = ws[i]) != null) { |
1294 |
> |
if (w.eventCount == (e | INT_SIGN) && |
1295 |
> |
U.compareAndSwapLong |
1296 |
> |
(this, CTL, c, ((long)(w.nextWait & E_MASK) | |
1297 |
> |
(c & (AC_MASK|TC_MASK))))) { |
1298 |
> |
w.eventCount = (e + E_SEQ) & E_MASK; |
1299 |
> |
if ((p = w.parker) != null) |
1300 |
> |
U.unpark(p); |
1301 |
> |
return true; // release an idle worker |
1302 |
> |
} |
1303 |
> |
} |
1304 |
> |
else if ((tc = (short)(u >>> UTC_SHIFT)) >= 0 && ac + pc > 1) { |
1305 |
> |
long nc = ((c - AC_UNIT) & AC_MASK) | (c & ~AC_MASK); |
1306 |
> |
if (U.compareAndSwapLong(this, CTL, c, nc)) |
1307 |
> |
return true; // no compensation needed |
1308 |
> |
} |
1309 |
> |
else if (tc + pc < MAX_ID) { |
1310 |
> |
long nc = ((c + TC_UNIT) & TC_MASK) | (c & ~TC_MASK); |
1311 |
> |
if (U.compareAndSwapLong(this, CTL, c, nc)) { |
1312 |
> |
addWorker(); |
1313 |
> |
return true; // create replacement |
1314 |
|
} |
1315 |
|
} |
1316 |
|
} |
1317 |
+ |
return false; |
1318 |
|
} |
1319 |
|
|
1320 |
< |
// Maintaining parallelism |
808 |
< |
|
809 |
< |
/** |
810 |
< |
* Pushes worker onto the spare stack |
811 |
< |
*/ |
812 |
< |
final void pushSpare(ForkJoinWorkerThread w) { |
813 |
< |
int ns = (++w.spareCount << SPARE_COUNT_SHIFT) | (w.poolIndex + 1); |
814 |
< |
do {} while (!UNSAFE.compareAndSwapInt(this, spareWaitersOffset, |
815 |
< |
w.nextSpare = spareWaiters,ns)); |
816 |
< |
} |
1320 |
> |
// Submissions |
1321 |
|
|
1322 |
|
/** |
1323 |
< |
* Tries (once) to resume a spare if the number of running |
1324 |
< |
* threads is less than target. |
1323 |
> |
* Unless shutting down, adds the given task to some submission |
1324 |
> |
* queue; using a randomly chosen queue index if the caller is a |
1325 |
> |
* ForkJoinWorkerThread, else one based on caller thread's hash |
1326 |
> |
* code. If no queue exists at the index, one is created. If the |
1327 |
> |
* queue is busy, another is chosen by sweeping through the queues |
1328 |
> |
* array. |
1329 |
|
*/ |
1330 |
< |
private void tryResumeSpare() { |
1331 |
< |
int sw, id; |
1332 |
< |
ForkJoinWorkerThread[] ws = workers; |
1333 |
< |
int n = ws.length; |
1334 |
< |
ForkJoinWorkerThread w; |
1335 |
< |
if ((sw = spareWaiters) != 0 && |
1336 |
< |
(id = (sw & SPARE_ID_MASK) - 1) >= 0 && |
1337 |
< |
id < n && (w = ws[id]) != null && |
1338 |
< |
(workerCounts & RUNNING_COUNT_MASK) < parallelism && |
1339 |
< |
spareWaiters == sw && |
1340 |
< |
UNSAFE.compareAndSwapInt(this, spareWaitersOffset, |
1341 |
< |
sw, w.nextSpare)) { |
1342 |
< |
int c; // increment running count before resume |
1343 |
< |
do {} while(!UNSAFE.compareAndSwapInt |
1344 |
< |
(this, workerCountsOffset, |
1345 |
< |
c = workerCounts, c + ONE_RUNNING)); |
1346 |
< |
if (w.tryUnsuspend()) |
1347 |
< |
LockSupport.unpark(w); |
1348 |
< |
else // back out if w was shutdown |
1349 |
< |
decrementWorkerCounts(ONE_RUNNING, 0); |
1330 |
> |
private void doSubmit(ForkJoinTask<?> task) { |
1331 |
> |
if (task == null) |
1332 |
> |
throw new NullPointerException(); |
1333 |
> |
Thread t = Thread.currentThread(); |
1334 |
> |
int r = ((t instanceof ForkJoinWorkerThread) ? |
1335 |
> |
((ForkJoinWorkerThread)t).workQueue.nextSeed() : hashThread(t)); |
1336 |
> |
for (;;) { |
1337 |
> |
int rs = runState, m = rs & SMASK; |
1338 |
> |
int j = r &= (m & ~1); // even numbered queues |
1339 |
> |
WorkQueue[] ws = workQueues; |
1340 |
> |
if (rs < 0 || ws == null) |
1341 |
> |
throw new RejectedExecutionException(); // shutting down |
1342 |
> |
if (ws.length > m) { // consistency check |
1343 |
> |
for (WorkQueue q;;) { // circular sweep |
1344 |
> |
if (((q = ws[j]) != null || |
1345 |
> |
(q = tryAddSharedQueue(j)) != null) && |
1346 |
> |
q.trySharedPush(task)) { |
1347 |
> |
signalWork(); |
1348 |
> |
return; |
1349 |
> |
} |
1350 |
> |
if ((j = (j + 2) & m) == r) { |
1351 |
> |
Thread.yield(); // all queues busy |
1352 |
> |
break; |
1353 |
> |
} |
1354 |
> |
} |
1355 |
> |
} |
1356 |
|
} |
1357 |
|
} |
1358 |
|
|
1359 |
|
/** |
1360 |
< |
* Tries to increase the number of running workers if below target |
1361 |
< |
* parallelism: If a spare exists tries to resume it via |
1362 |
< |
* tryResumeSpare. Otherwise, if not enough total workers or all |
1363 |
< |
* existing workers are busy, adds a new worker. In all casses also |
1364 |
< |
* helps wake up releasable workers waiting for work. |
1365 |
< |
*/ |
1366 |
< |
private void helpMaintainParallelism() { |
1367 |
< |
int pc = parallelism; |
1368 |
< |
int wc, rs, tc; |
1369 |
< |
while (((wc = workerCounts) & RUNNING_COUNT_MASK) < pc && |
1370 |
< |
(rs = runState) < TERMINATING) { |
1371 |
< |
if (spareWaiters != 0) |
1372 |
< |
tryResumeSpare(); |
859 |
< |
else if ((tc = wc >>> TOTAL_COUNT_SHIFT) >= MAX_WORKERS || |
860 |
< |
(tc >= pc && (rs & ACTIVE_COUNT_MASK) != tc)) |
861 |
< |
break; // enough total |
862 |
< |
else if (runState == rs && workerCounts == wc && |
863 |
< |
UNSAFE.compareAndSwapInt(this, workerCountsOffset, wc, |
864 |
< |
wc + (ONE_RUNNING|ONE_TOTAL))) { |
865 |
< |
ForkJoinWorkerThread w = null; |
1360 |
> |
* Tries to add and register a new queue at the given index. |
1361 |
> |
* |
1362 |
> |
* @param idx the workQueues array index to register the queue |
1363 |
> |
* @return the queue, or null if could not add because could |
1364 |
> |
* not acquire lock or idx is unusable |
1365 |
> |
*/ |
1366 |
> |
private WorkQueue tryAddSharedQueue(int idx) { |
1367 |
> |
WorkQueue q = null; |
1368 |
> |
ReentrantLock lock = this.lock; |
1369 |
> |
if (idx >= 0 && (idx & 1) == 0 && !lock.isLocked()) { |
1370 |
> |
// create queue outside of lock but only if apparently free |
1371 |
> |
WorkQueue nq = new WorkQueue(null, SHARED_QUEUE); |
1372 |
> |
if (lock.tryLock()) { |
1373 |
|
try { |
1374 |
< |
w = factory.newThread(this); |
1375 |
< |
} finally { // adjust on null or exceptional factory return |
1376 |
< |
if (w == null) { |
1377 |
< |
decrementWorkerCounts(ONE_RUNNING, ONE_TOTAL); |
1378 |
< |
tryTerminate(false); // handle failure during shutdown |
1374 |
> |
WorkQueue[] ws = workQueues; |
1375 |
> |
if (ws != null && idx < ws.length) { |
1376 |
> |
if ((q = ws[idx]) == null) { |
1377 |
> |
int rs; // update runState seq |
1378 |
> |
ws[idx] = q = nq; |
1379 |
> |
runState = (((rs = runState) & SHUTDOWN) | |
1380 |
> |
((rs + RS_SEQ) & ~SHUTDOWN)); |
1381 |
> |
} |
1382 |
|
} |
1383 |
< |
} |
1384 |
< |
if (w == null) |
875 |
< |
break; |
876 |
< |
w.start(recordWorker(w), ueh); |
877 |
< |
if ((workerCounts >>> TOTAL_COUNT_SHIFT) >= pc) { |
878 |
< |
int c; // advance event count |
879 |
< |
UNSAFE.compareAndSwapInt(this, eventCountOffset, |
880 |
< |
c = eventCount, c+1); |
881 |
< |
break; // add at most one unless total below target |
1383 |
> |
} finally { |
1384 |
> |
lock.unlock(); |
1385 |
|
} |
1386 |
|
} |
1387 |
|
} |
1388 |
< |
if (eventWaiters != 0L) |
886 |
< |
releaseEventWaiters(); |
1388 |
> |
return q; |
1389 |
|
} |
1390 |
|
|
1391 |
< |
/** |
890 |
< |
* Callback from the oldest waiter in awaitEvent waking up after a |
891 |
< |
* period of non-use. If all workers are idle, tries (once) to |
892 |
< |
* shutdown an event waiter or a spare, if one exists. Note that |
893 |
< |
* we don't need CAS or locks here because the method is called |
894 |
< |
* only from one thread occasionally waking (and even misfires are |
895 |
< |
* OK). Note that until the shutdown worker fully terminates, |
896 |
< |
* workerCounts will overestimate total count, which is tolerable. |
897 |
< |
* |
898 |
< |
* @param ec the event count waited on by caller (to abort |
899 |
< |
* attempt if count has since changed). |
900 |
< |
*/ |
901 |
< |
private void tryShutdownUnusedWorker(int ec) { |
902 |
< |
if (runState == 0 && eventCount == ec) { // only trigger if all idle |
903 |
< |
ForkJoinWorkerThread[] ws = workers; |
904 |
< |
int n = ws.length; |
905 |
< |
ForkJoinWorkerThread w = null; |
906 |
< |
boolean shutdown = false; |
907 |
< |
int sw; |
908 |
< |
long h; |
909 |
< |
if ((sw = spareWaiters) != 0) { // prefer killing spares |
910 |
< |
int id = (sw & SPARE_ID_MASK) - 1; |
911 |
< |
if (id >= 0 && id < n && (w = ws[id]) != null && |
912 |
< |
UNSAFE.compareAndSwapInt(this, spareWaitersOffset, |
913 |
< |
sw, w.nextSpare)) |
914 |
< |
shutdown = true; |
915 |
< |
} |
916 |
< |
else if ((h = eventWaiters) != 0L) { |
917 |
< |
long nh; |
918 |
< |
int id = ((int)(h & WAITER_ID_MASK)) - 1; |
919 |
< |
if (id >= 0 && id < n && (w = ws[id]) != null && |
920 |
< |
(nh = w.nextWaiter) != 0L && // keep at least one worker |
921 |
< |
UNSAFE.compareAndSwapLong(this, eventWaitersOffset, h, nh)) |
922 |
< |
shutdown = true; |
923 |
< |
} |
924 |
< |
if (w != null && shutdown) { |
925 |
< |
w.shutdown(); |
926 |
< |
LockSupport.unpark(w); |
927 |
< |
} |
928 |
< |
} |
929 |
< |
releaseEventWaiters(); // in case of interference |
930 |
< |
} |
1391 |
> |
// Scanning for tasks |
1392 |
|
|
1393 |
|
/** |
1394 |
< |
* Callback from workers invoked upon each top-level action (i.e., |
1395 |
< |
* stealing a task or taking a submission and running it). |
1396 |
< |
* Performs one or more of the following: |
1397 |
< |
* |
1398 |
< |
* 1. If the worker is active and either did not run a task |
1399 |
< |
* or there are too many workers, try to set its active status |
1400 |
< |
* to inactive and update activeCount. On contention, we may |
1401 |
< |
* try again in this or a subsequent call. |
1402 |
< |
* |
1403 |
< |
* 2. If not enough total workers, help create some. |
1404 |
< |
* |
1405 |
< |
* 3. If there are too many running workers, suspend this worker |
1406 |
< |
* (first forcing inactive if necessary). If it is not needed, |
1407 |
< |
* it may be shutdown while suspended (via |
1408 |
< |
* tryShutdownUnusedWorker). Otherwise, upon resume it |
1409 |
< |
* rechecks running thread count and need for event sync. |
1410 |
< |
* |
1411 |
< |
* 4. If worker did not run a task, await the next task event via |
1412 |
< |
* eventSync if necessary (first forcing inactivation), upon |
1413 |
< |
* which the worker may be shutdown via |
1414 |
< |
* tryShutdownUnusedWorker. Otherwise, help release any |
1415 |
< |
* existing event waiters that are now releasable, |
1416 |
< |
* |
1417 |
< |
* @param w the worker |
1418 |
< |
* @param ran true if worker ran a task since last call to this method |
1419 |
< |
*/ |
1420 |
< |
final void preStep(ForkJoinWorkerThread w, boolean ran) { |
1421 |
< |
int wec = w.lastEventCount; |
1422 |
< |
boolean active = w.active; |
1423 |
< |
boolean inactivate = false; |
1424 |
< |
int pc = parallelism; |
1425 |
< |
int rs; |
1426 |
< |
while (w.runState == 0 && (rs = runState) < TERMINATING) { |
1427 |
< |
if ((inactivate || (active && (rs & ACTIVE_COUNT_MASK) >= pc)) && |
1428 |
< |
UNSAFE.compareAndSwapInt(this, runStateOffset, rs, rs - 1)) |
1429 |
< |
inactivate = active = w.active = false; |
1430 |
< |
int wc = workerCounts; |
1431 |
< |
if ((wc & RUNNING_COUNT_MASK) > pc) { |
1432 |
< |
if (!(inactivate |= active) && // must inactivate to suspend |
1433 |
< |
workerCounts == wc && // try to suspend as spare |
1434 |
< |
UNSAFE.compareAndSwapInt(this, workerCountsOffset, |
1435 |
< |
wc, wc - ONE_RUNNING)) |
1436 |
< |
w.suspendAsSpare(); |
1437 |
< |
} |
1438 |
< |
else if ((wc >>> TOTAL_COUNT_SHIFT) < pc) |
1439 |
< |
helpMaintainParallelism(); // not enough workers |
1440 |
< |
else if (!ran) { |
1441 |
< |
long h = eventWaiters; |
1442 |
< |
int ec = eventCount; |
1443 |
< |
if (h != 0L && (int)(h >>> EVENT_COUNT_SHIFT) != ec) |
1444 |
< |
releaseEventWaiters(); // release others before waiting |
1445 |
< |
else if (ec != wec) { |
1446 |
< |
w.lastEventCount = ec; // no need to wait |
1394 |
> |
* Scans for and, if found, returns one task, else possibly |
1395 |
> |
* inactivates the worker. This method operates on single reads of |
1396 |
> |
* volatile state and is designed to be re-invoked continuously in |
1397 |
> |
* part because it returns upon detecting inconsistencies, |
1398 |
> |
* contention, or state changes that indicate possible success on |
1399 |
> |
* re-invocation. |
1400 |
> |
* |
1401 |
> |
* The scan searches for tasks across queues, randomly selecting |
1402 |
> |
* the first #queues probes, favoring steals 2:1 over submissions |
1403 |
> |
* (by exploiting even/odd indexing), and then performing a |
1404 |
> |
* circular sweep of all queues. The scan terminates upon either |
1405 |
> |
* finding a non-empty queue, or completing a full sweep. If the |
1406 |
> |
* worker is not inactivated, it takes and returns a task from |
1407 |
> |
* this queue. On failure to find a task, we take one of the |
1408 |
> |
* following actions, after which the caller will retry calling |
1409 |
> |
* this method unless terminated. |
1410 |
> |
* |
1411 |
> |
* * If not a complete sweep, try to release a waiting worker. If |
1412 |
> |
* the scan terminated because the worker is inactivated, then the |
1413 |
> |
* released worker will often be the calling worker, and it can |
1414 |
> |
* succeed obtaining a task on the next call. Or maybe it is |
1415 |
> |
* another worker, but with same net effect. Releasing in other |
1416 |
> |
* cases as well ensures that we have enough workers running. |
1417 |
> |
* |
1418 |
> |
* * If the caller has run a task since the the last empty scan, |
1419 |
> |
* return (to allow rescan) if other workers are not also yet |
1420 |
> |
* enqueued. Field WorkQueue.rescans counts down on each scan to |
1421 |
> |
* ensure eventual inactivation, and occasional calls to |
1422 |
> |
* Thread.yield to help avoid interference with more useful |
1423 |
> |
* activities on the system. |
1424 |
> |
* |
1425 |
> |
* * If pool is terminating, terminate the worker |
1426 |
> |
* |
1427 |
> |
* * If not already enqueued, try to inactivate and enqueue the |
1428 |
> |
* worker on wait queue. |
1429 |
> |
* |
1430 |
> |
* * If already enqueued and none of the above apply, either park |
1431 |
> |
* awaiting signal, or if this is the most recent waiter and pool |
1432 |
> |
* is quiescent, relay to idleAwaitWork to check for termination |
1433 |
> |
* and possibly shrink pool. |
1434 |
> |
* |
1435 |
> |
* @param w the worker (via its WorkQueue) |
1436 |
> |
* @return a task or null of none found |
1437 |
> |
*/ |
1438 |
> |
private final ForkJoinTask<?> scan(WorkQueue w) { |
1439 |
> |
boolean swept = false; // true after full empty scan |
1440 |
> |
WorkQueue[] ws; // volatile read order matters |
1441 |
> |
int r = w.seed, ec = w.eventCount; // ec is negative if inactive |
1442 |
> |
int rs = runState, m = rs & SMASK; |
1443 |
> |
if ((ws = workQueues) != null && ws.length > m) { |
1444 |
> |
ForkJoinTask<?> task = null; |
1445 |
> |
for (int k = 0, j = -2 - m; ; ++j) { |
1446 |
> |
WorkQueue q; int b; |
1447 |
> |
if (j < 0) { // random probes while j negative |
1448 |
> |
r ^= r << 13; r ^= r >>> 17; k = (r ^= r << 5) | (j & 1); |
1449 |
> |
} // worker (not submit) for odd j |
1450 |
> |
else // cyclic scan when j >= 0 |
1451 |
> |
k += (m >>> 1) | 1; // step by half to reduce bias |
1452 |
> |
|
1453 |
> |
if ((q = ws[k & m]) != null && (b = q.base) - q.top < 0) { |
1454 |
> |
if (ec >= 0) |
1455 |
> |
task = q.pollAt(b); // steal |
1456 |
> |
break; |
1457 |
> |
} |
1458 |
> |
else if (j > m) { |
1459 |
> |
if (rs == runState) // staleness check |
1460 |
> |
swept = true; |
1461 |
|
break; |
1462 |
|
} |
988 |
– |
else if (!(inactivate |= active)) |
989 |
– |
eventSync(w, wec); // must inactivate before sync |
1463 |
|
} |
1464 |
< |
else |
1465 |
< |
break; |
1464 |
> |
w.seed = r; // save seed for next scan |
1465 |
> |
if (task != null) |
1466 |
> |
return task; |
1467 |
> |
} |
1468 |
> |
|
1469 |
> |
// Decode ctl on empty scan |
1470 |
> |
long c = ctl; int e = (int)c, a = (int)(c >> AC_SHIFT), nr, ns; |
1471 |
> |
if (!swept) { // try to release a waiter |
1472 |
> |
WorkQueue v; Thread p; |
1473 |
> |
if (e > 0 && a < 0 && ws != null && |
1474 |
> |
(v = ws[((~e << 1) | 1) & m]) != null && |
1475 |
> |
v.eventCount == (e | INT_SIGN) && U.compareAndSwapLong |
1476 |
> |
(this, CTL, c, ((long)(v.nextWait & E_MASK) | |
1477 |
> |
((c + AC_UNIT) & (AC_MASK|TC_MASK))))) { |
1478 |
> |
v.eventCount = (e + E_SEQ) & E_MASK; |
1479 |
> |
if ((p = v.parker) != null) |
1480 |
> |
U.unpark(p); |
1481 |
> |
} |
1482 |
> |
} |
1483 |
> |
else if ((nr = w.rescans) > 0) { // continue rescanning |
1484 |
> |
int ac = a + parallelism; |
1485 |
> |
if ((w.rescans = (ac < nr) ? ac : nr - 1) > 0 && w.seed < 0 && |
1486 |
> |
w.eventCount == ec) |
1487 |
> |
Thread.yield(); // 1 bit randomness for yield call |
1488 |
> |
} |
1489 |
> |
else if (e < 0) // pool is terminating |
1490 |
> |
w.runState = -1; |
1491 |
> |
else if (ec >= 0) { // try to enqueue |
1492 |
> |
long nc = (long)ec | ((c - AC_UNIT) & (AC_MASK|TC_MASK)); |
1493 |
> |
w.nextWait = e; |
1494 |
> |
w.eventCount = ec | INT_SIGN; // mark as inactive |
1495 |
> |
if (!U.compareAndSwapLong(this, CTL, c, nc)) |
1496 |
> |
w.eventCount = ec; // back out on CAS failure |
1497 |
> |
else if ((ns = w.nsteals) != 0) { // set rescans if ran task |
1498 |
> |
if (a <= 0) // ... unless too many active |
1499 |
> |
w.rescans = a + parallelism; |
1500 |
> |
w.nsteals = 0; |
1501 |
> |
w.totalSteals += ns; |
1502 |
> |
} |
1503 |
> |
} |
1504 |
> |
else{ // already queued |
1505 |
> |
if (parallelism == -a) |
1506 |
> |
idleAwaitWork(w); // quiescent |
1507 |
> |
if (w.eventCount == ec) { |
1508 |
> |
Thread.interrupted(); // clear status |
1509 |
> |
ForkJoinWorkerThread wt = w.owner; |
1510 |
> |
U.putObject(wt, PARKBLOCKER, this); |
1511 |
> |
w.parker = wt; // emulate LockSupport.park |
1512 |
> |
if (w.eventCount == ec) // recheck |
1513 |
> |
U.park(false, 0L); // block |
1514 |
> |
w.parker = null; |
1515 |
> |
U.putObject(wt, PARKBLOCKER, null); |
1516 |
> |
} |
1517 |
|
} |
1518 |
+ |
return null; |
1519 |
|
} |
1520 |
|
|
1521 |
|
/** |
1522 |
< |
* Helps and/or blocks awaiting join of the given task. |
1523 |
< |
* See above for explanation. |
1524 |
< |
* |
1525 |
< |
* @param joinMe the task to join |
1526 |
< |
* @param worker the current worker thread |
1527 |
< |
*/ |
1528 |
< |
final void awaitJoin(ForkJoinTask<?> joinMe, ForkJoinWorkerThread worker) { |
1529 |
< |
int retries = 2 + (parallelism >> 2); // #helpJoins before blocking |
1530 |
< |
while (joinMe.status >= 0) { |
1531 |
< |
int wc; |
1532 |
< |
worker.helpJoinTask(joinMe); |
1533 |
< |
if (joinMe.status < 0) |
1534 |
< |
break; |
1535 |
< |
else if (retries > 0) |
1536 |
< |
--retries; |
1537 |
< |
else if (((wc = workerCounts) & RUNNING_COUNT_MASK) != 0 && |
1538 |
< |
UNSAFE.compareAndSwapInt(this, workerCountsOffset, |
1539 |
< |
wc, wc - ONE_RUNNING)) { |
1540 |
< |
int stat, c; long h; |
1541 |
< |
while ((stat = joinMe.status) >= 0 && |
1542 |
< |
(h = eventWaiters) != 0L && // help release others |
1543 |
< |
(int)(h >>> EVENT_COUNT_SHIFT) != eventCount) |
1544 |
< |
releaseEventWaiters(); |
1545 |
< |
if (stat >= 0 && |
1546 |
< |
((workerCounts & RUNNING_COUNT_MASK) == 0 || |
1547 |
< |
(stat = |
1548 |
< |
joinMe.internalAwaitDone(JOIN_TIMEOUT_MILLIS)) >= 0)) |
1549 |
< |
helpMaintainParallelism(); // timeout or no running workers |
1550 |
< |
do {} while (!UNSAFE.compareAndSwapInt |
1551 |
< |
(this, workerCountsOffset, |
1552 |
< |
c = workerCounts, c + ONE_RUNNING)); |
1553 |
< |
if (stat < 0) |
1554 |
< |
break; // else restart |
1522 |
> |
* If inactivating worker w has caused pool to become quiescent, |
1523 |
> |
* check for pool termination, and, so long as this is not the |
1524 |
> |
* only worker, wait for event for up to SHRINK_RATE nanosecs On |
1525 |
> |
* timeout, if ctl has not changed, terminate the worker, which |
1526 |
> |
* will in turn wake up another worker to possibly repeat this |
1527 |
> |
* process. |
1528 |
> |
* |
1529 |
> |
* @param w the calling worker |
1530 |
> |
*/ |
1531 |
> |
private void idleAwaitWork(WorkQueue w) { |
1532 |
> |
long c; int nw, ec; |
1533 |
> |
if (!tryTerminate(false) && |
1534 |
> |
(int)((c = ctl) >> AC_SHIFT) + parallelism == 0 && |
1535 |
> |
(ec = w.eventCount) == ((int)c | INT_SIGN) && |
1536 |
> |
(nw = w.nextWait) != 0) { |
1537 |
> |
long nc = ((long)(nw & E_MASK) | // ctl to restore on timeout |
1538 |
> |
((c + AC_UNIT) & AC_MASK) | (c & TC_MASK)); |
1539 |
> |
ForkJoinTask.helpExpungeStaleExceptions(); // help clean |
1540 |
> |
ForkJoinWorkerThread wt = w.owner; |
1541 |
> |
while (ctl == c) { |
1542 |
> |
long startTime = System.nanoTime(); |
1543 |
> |
Thread.interrupted(); // timed variant of version in scan() |
1544 |
> |
U.putObject(wt, PARKBLOCKER, this); |
1545 |
> |
w.parker = wt; |
1546 |
> |
if (ctl == c) |
1547 |
> |
U.park(false, SHRINK_RATE); |
1548 |
> |
w.parker = null; |
1549 |
> |
U.putObject(wt, PARKBLOCKER, null); |
1550 |
> |
if (ctl != c) |
1551 |
> |
break; |
1552 |
> |
if (System.nanoTime() - startTime >= SHRINK_TIMEOUT && |
1553 |
> |
U.compareAndSwapLong(this, CTL, c, nc)) { |
1554 |
> |
w.runState = -1; // shrink |
1555 |
> |
w.eventCount = (ec + E_SEQ) | E_MASK; |
1556 |
> |
break; |
1557 |
> |
} |
1558 |
|
} |
1559 |
|
} |
1560 |
|
} |
1561 |
|
|
1562 |
|
/** |
1563 |
< |
* Same idea as awaitJoin, but no helping, retries, or timeouts. |
1564 |
< |
*/ |
1565 |
< |
final void awaitBlocker(ManagedBlocker blocker) |
1566 |
< |
throws InterruptedException { |
1567 |
< |
while (!blocker.isReleasable()) { |
1568 |
< |
int wc = workerCounts; |
1569 |
< |
if ((wc & RUNNING_COUNT_MASK) != 0 && |
1570 |
< |
UNSAFE.compareAndSwapInt(this, workerCountsOffset, |
1571 |
< |
wc, wc - ONE_RUNNING)) { |
1572 |
< |
try { |
1573 |
< |
while (!blocker.isReleasable()) { |
1574 |
< |
long h = eventWaiters; |
1575 |
< |
if (h != 0L && |
1576 |
< |
(int)(h >>> EVENT_COUNT_SHIFT) != eventCount) |
1577 |
< |
releaseEventWaiters(); |
1578 |
< |
else if ((workerCounts & RUNNING_COUNT_MASK) == 0 && |
1579 |
< |
runState < TERMINATING) |
1580 |
< |
helpMaintainParallelism(); |
1581 |
< |
else if (blocker.block()) |
1563 |
> |
* Tries to locate and execute tasks for a stealer of the given |
1564 |
> |
* task, or in turn one of its stealers, Traces currentSteal -> |
1565 |
> |
* currentJoin links looking for a thread working on a descendant |
1566 |
> |
* of the given task and with a non-empty queue to steal back and |
1567 |
> |
* execute tasks from. The first call to this method upon a |
1568 |
> |
* waiting join will often entail scanning/search, (which is OK |
1569 |
> |
* because the joiner has nothing better to do), but this method |
1570 |
> |
* leaves hints in workers to speed up subsequent calls. The |
1571 |
> |
* implementation is very branchy to cope with potential |
1572 |
> |
* inconsistencies or loops encountering chains that are stale, |
1573 |
> |
* unknown, or of length greater than MAX_HELP_DEPTH links. All |
1574 |
> |
* of these cases are dealt with by just retrying by caller. |
1575 |
> |
* |
1576 |
> |
* @param joiner the joining worker |
1577 |
> |
* @param task the task to join |
1578 |
> |
* @return true if found or ran a task (and so is immediately retryable) |
1579 |
> |
*/ |
1580 |
> |
final boolean tryHelpStealer(WorkQueue joiner, ForkJoinTask<?> task) { |
1581 |
> |
ForkJoinTask<?> subtask; // current target |
1582 |
> |
boolean progress = false; |
1583 |
> |
int depth = 0; // current chain depth |
1584 |
> |
int m = runState & SMASK; |
1585 |
> |
WorkQueue[] ws = workQueues; |
1586 |
> |
|
1587 |
> |
if (ws != null && ws.length > m && (subtask = task).status >= 0) { |
1588 |
> |
outer:for (WorkQueue j = joiner;;) { |
1589 |
> |
// Try to find the stealer of subtask, by first using hint |
1590 |
> |
WorkQueue stealer = null; |
1591 |
> |
WorkQueue v = ws[j.stealHint & m]; |
1592 |
> |
if (v != null && v.currentSteal == subtask) |
1593 |
> |
stealer = v; |
1594 |
> |
else { |
1595 |
> |
for (int i = 1; i <= m; i += 2) { |
1596 |
> |
if ((v = ws[i]) != null && v.currentSteal == subtask) { |
1597 |
> |
stealer = v; |
1598 |
> |
j.stealHint = i; // save hint |
1599 |
|
break; |
1600 |
+ |
} |
1601 |
+ |
} |
1602 |
+ |
if (stealer == null) |
1603 |
+ |
break; |
1604 |
+ |
} |
1605 |
+ |
|
1606 |
+ |
for (WorkQueue q = stealer;;) { // Try to help stealer |
1607 |
+ |
ForkJoinTask<?> t; int b; |
1608 |
+ |
if (task.status < 0) |
1609 |
+ |
break outer; |
1610 |
+ |
if ((b = q.base) - q.top < 0) { |
1611 |
+ |
progress = true; |
1612 |
+ |
if (subtask.status < 0) |
1613 |
+ |
break outer; // stale |
1614 |
+ |
if ((t = q.pollAt(b)) != null) { |
1615 |
+ |
stealer.stealHint = joiner.poolIndex; |
1616 |
+ |
joiner.runSubtask(t); |
1617 |
+ |
} |
1618 |
+ |
} |
1619 |
+ |
else { // empty - try to descend to find stealer's stealer |
1620 |
+ |
ForkJoinTask<?> next = stealer.currentJoin; |
1621 |
+ |
if (++depth == MAX_HELP_DEPTH || subtask.status < 0 || |
1622 |
+ |
next == null || next == subtask) |
1623 |
+ |
break outer; // max depth, stale, dead-end, cyclic |
1624 |
+ |
subtask = next; |
1625 |
+ |
j = stealer; |
1626 |
+ |
break; |
1627 |
|
} |
1056 |
– |
} finally { |
1057 |
– |
int c; |
1058 |
– |
do {} while (!UNSAFE.compareAndSwapInt |
1059 |
– |
(this, workerCountsOffset, |
1060 |
– |
c = workerCounts, c + ONE_RUNNING)); |
1628 |
|
} |
1062 |
– |
break; |
1629 |
|
} |
1630 |
|
} |
1631 |
+ |
return progress; |
1632 |
|
} |
1633 |
|
|
1634 |
|
/** |
1635 |
< |
* Possibly initiates and/or completes termination. |
1635 |
> |
* If task is at base of some steal queue, steals and executes it. |
1636 |
|
* |
1637 |
< |
* @param now if true, unconditionally terminate, else only |
1638 |
< |
* if shutdown and empty queue and no active workers |
1072 |
< |
* @return true if now terminating or terminated |
1637 |
> |
* @param joiner the joining worker |
1638 |
> |
* @param task the task |
1639 |
|
*/ |
1640 |
< |
private boolean tryTerminate(boolean now) { |
1641 |
< |
if (now) |
1642 |
< |
advanceRunLevel(SHUTDOWN); // ensure at least SHUTDOWN |
1643 |
< |
else if (runState < SHUTDOWN || |
1644 |
< |
!submissionQueue.isEmpty() || |
1645 |
< |
(runState & ACTIVE_COUNT_MASK) != 0) |
1646 |
< |
return false; |
1647 |
< |
|
1648 |
< |
if (advanceRunLevel(TERMINATING)) |
1649 |
< |
startTerminating(); |
1650 |
< |
|
1085 |
< |
// Finish now if all threads terminated; else in some subsequent call |
1086 |
< |
if ((workerCounts >>> TOTAL_COUNT_SHIFT) == 0) { |
1087 |
< |
advanceRunLevel(TERMINATED); |
1088 |
< |
termination.arrive(); |
1640 |
> |
final void tryPollForAndExec(WorkQueue joiner, ForkJoinTask<?> task) { |
1641 |
> |
WorkQueue[] ws; |
1642 |
> |
int m = runState & SMASK; |
1643 |
> |
if ((ws = workQueues) != null && ws.length > m) { |
1644 |
> |
for (int j = 1; j <= m && task.status >= 0; j += 2) { |
1645 |
> |
WorkQueue q = ws[j]; |
1646 |
> |
if (q != null && q.pollFor(task)) { |
1647 |
> |
joiner.runSubtask(task); |
1648 |
> |
break; |
1649 |
> |
} |
1650 |
> |
} |
1651 |
|
} |
1090 |
– |
return true; |
1652 |
|
} |
1653 |
|
|
1654 |
|
/** |
1655 |
< |
* Actions on transition to TERMINATING |
1656 |
< |
* |
1657 |
< |
* Runs up to four passes through workers: (0) shutting down each |
1658 |
< |
* (without waking up if parked) to quickly spread notifications |
1659 |
< |
* without unnecessary bouncing around event queues etc (1) wake |
1660 |
< |
* up and help cancel tasks (2) interrupt (3) mop up races with |
1661 |
< |
* interrupted workers |
1662 |
< |
*/ |
1663 |
< |
private void startTerminating() { |
1664 |
< |
cancelSubmissions(); |
1665 |
< |
for (int passes = 0; passes < 4 && workerCounts != 0; ++passes) { |
1666 |
< |
int c; // advance event count |
1667 |
< |
UNSAFE.compareAndSwapInt(this, eventCountOffset, |
1668 |
< |
c = eventCount, c+1); |
1669 |
< |
eventWaiters = 0L; // clobber lists |
1670 |
< |
spareWaiters = 0; |
1671 |
< |
ForkJoinWorkerThread[] ws = workers; |
1111 |
< |
int n = ws.length; |
1112 |
< |
for (int i = 0; i < n; ++i) { |
1113 |
< |
ForkJoinWorkerThread w = ws[i]; |
1114 |
< |
if (w != null) { |
1115 |
< |
w.shutdown(); |
1116 |
< |
if (passes > 0 && !w.isTerminated()) { |
1117 |
< |
w.cancelTasks(); |
1118 |
< |
LockSupport.unpark(w); |
1119 |
< |
if (passes > 1) { |
1120 |
< |
try { |
1121 |
< |
w.interrupt(); |
1122 |
< |
} catch (SecurityException ignore) { |
1123 |
< |
} |
1124 |
< |
} |
1655 |
> |
* Returns a non-empty steal queue, if one is found during a random, |
1656 |
> |
* then cyclic scan, else null. This method must be retried by |
1657 |
> |
* caller if, by the time it tries to use the queue, it is empty. |
1658 |
> |
*/ |
1659 |
> |
private WorkQueue findNonEmptyStealQueue(WorkQueue w) { |
1660 |
> |
int r = w.seed; // Same idea as scan(), but ignoring submissions |
1661 |
> |
for (WorkQueue[] ws;;) { |
1662 |
> |
int m = runState & SMASK; |
1663 |
> |
if ((ws = workQueues) == null) |
1664 |
> |
return null; |
1665 |
> |
if (ws.length > m) { |
1666 |
> |
WorkQueue q; |
1667 |
> |
for (int n = m << 2, k = r, j = -n;;) { |
1668 |
> |
r ^= r << 13; r ^= r >>> 17; r ^= r << 5; |
1669 |
> |
if ((q = ws[(k | 1) & m]) != null && q.base - q.top < 0) { |
1670 |
> |
w.seed = r; |
1671 |
> |
return q; |
1672 |
|
} |
1673 |
+ |
else if (j > n) |
1674 |
+ |
return null; |
1675 |
+ |
else |
1676 |
+ |
k = (j++ < 0) ? r : k + ((m >>> 1) | 1); |
1677 |
+ |
|
1678 |
|
} |
1679 |
|
} |
1680 |
|
} |
1681 |
|
} |
1682 |
|
|
1683 |
|
/** |
1684 |
< |
* Clear out and cancel submissions, ignoring exceptions |
1685 |
< |
*/ |
1686 |
< |
private void cancelSubmissions() { |
1687 |
< |
ForkJoinTask<?> task; |
1688 |
< |
while ((task = submissionQueue.poll()) != null) { |
1689 |
< |
try { |
1690 |
< |
task.cancel(false); |
1691 |
< |
} catch (Throwable ignore) { |
1684 |
> |
* Runs tasks until {@code isQuiescent()}. We piggyback on |
1685 |
> |
* active count ctl maintenance, but rather than blocking |
1686 |
> |
* when tasks cannot be found, we rescan until all others cannot |
1687 |
> |
* find tasks either. |
1688 |
> |
*/ |
1689 |
> |
final void helpQuiescePool(WorkQueue w) { |
1690 |
> |
for (boolean active = true;;) { |
1691 |
> |
w.runLocalTasks(); // exhaust local queue |
1692 |
> |
WorkQueue q = findNonEmptyStealQueue(w); |
1693 |
> |
if (q != null) { |
1694 |
> |
ForkJoinTask<?> t; |
1695 |
> |
if (!active) { // re-establish active count |
1696 |
> |
long c; |
1697 |
> |
active = true; |
1698 |
> |
do {} while (!U.compareAndSwapLong |
1699 |
> |
(this, CTL, c = ctl, c + AC_UNIT)); |
1700 |
> |
} |
1701 |
> |
if ((t = q.poll()) != null) |
1702 |
> |
w.runSubtask(t); |
1703 |
> |
} |
1704 |
> |
else { |
1705 |
> |
long c; |
1706 |
> |
if (active) { // decrement active count without queuing |
1707 |
> |
active = false; |
1708 |
> |
do {} while (!U.compareAndSwapLong |
1709 |
> |
(this, CTL, c = ctl, c -= AC_UNIT)); |
1710 |
> |
} |
1711 |
> |
else |
1712 |
> |
c = ctl; // re-increment on exit |
1713 |
> |
if ((int)(c >> AC_SHIFT) + parallelism == 0) { |
1714 |
> |
do {} while (!U.compareAndSwapLong |
1715 |
> |
(this, CTL, c = ctl, c + AC_UNIT)); |
1716 |
> |
break; |
1717 |
> |
} |
1718 |
|
} |
1719 |
|
} |
1720 |
|
} |
1721 |
|
|
1722 |
< |
// misc support for ForkJoinWorkerThread |
1722 |
> |
/** |
1723 |
> |
* Gets and removes a local or stolen task for the given worker |
1724 |
> |
* |
1725 |
> |
* @return a task, if available |
1726 |
> |
*/ |
1727 |
> |
final ForkJoinTask<?> nextTaskFor(WorkQueue w) { |
1728 |
> |
for (ForkJoinTask<?> t;;) { |
1729 |
> |
WorkQueue q; |
1730 |
> |
if ((t = w.nextLocalTask()) != null) |
1731 |
> |
return t; |
1732 |
> |
if ((q = findNonEmptyStealQueue(w)) == null) |
1733 |
> |
return null; |
1734 |
> |
if ((t = q.poll()) != null) |
1735 |
> |
return t; |
1736 |
> |
} |
1737 |
> |
} |
1738 |
|
|
1739 |
|
/** |
1740 |
< |
* Returns pool number |
1740 |
> |
* Returns the approximate (non-atomic) number of idle threads per |
1741 |
> |
* active thread to offset steal queue size for method |
1742 |
> |
* ForkJoinTask.getSurplusQueuedTaskCount(). |
1743 |
|
*/ |
1744 |
< |
final int getPoolNumber() { |
1745 |
< |
return poolNumber; |
1744 |
> |
final int idlePerActive() { |
1745 |
> |
// Approximate at powers of two for small values, saturate past 4 |
1746 |
> |
int p = parallelism; |
1747 |
> |
int a = p + (int)(ctl >> AC_SHIFT); |
1748 |
> |
return (a > (p >>>= 1) ? 0 : |
1749 |
> |
a > (p >>>= 1) ? 1 : |
1750 |
> |
a > (p >>>= 1) ? 2 : |
1751 |
> |
a > (p >>>= 1) ? 4 : |
1752 |
> |
8); |
1753 |
> |
} |
1754 |
> |
|
1755 |
> |
// Termination |
1756 |
> |
|
1757 |
> |
/** |
1758 |
> |
* Sets SHUTDOWN bit of runState under lock |
1759 |
> |
*/ |
1760 |
> |
private void enableShutdown() { |
1761 |
> |
ReentrantLock lock = this.lock; |
1762 |
> |
if (runState >= 0) { |
1763 |
> |
lock.lock(); // don't need try/finally |
1764 |
> |
runState |= SHUTDOWN; |
1765 |
> |
lock.unlock(); |
1766 |
> |
} |
1767 |
|
} |
1768 |
|
|
1769 |
|
/** |
1770 |
< |
* Tries to accumulates steal count from a worker, clearing |
1771 |
< |
* the worker's value. |
1770 |
> |
* Possibly initiates and/or completes termination. Upon |
1771 |
> |
* termination, cancels all queued tasks and then |
1772 |
|
* |
1773 |
< |
* @return true if worker steal count now zero |
1773 |
> |
* @param now if true, unconditionally terminate, else only |
1774 |
> |
* if no work and no active workers |
1775 |
> |
* @return true if now terminating or terminated |
1776 |
|
*/ |
1777 |
< |
final boolean tryAccumulateStealCount(ForkJoinWorkerThread w) { |
1778 |
< |
int sc = w.stealCount; |
1779 |
< |
long c = stealCount; |
1780 |
< |
// CAS even if zero, for fence effects |
1781 |
< |
if (UNSAFE.compareAndSwapLong(this, stealCountOffset, c, c + sc)) { |
1782 |
< |
if (sc != 0) |
1783 |
< |
w.stealCount = 0; |
1784 |
< |
return true; |
1777 |
> |
private boolean tryTerminate(boolean now) { |
1778 |
> |
for (long c;;) { |
1779 |
> |
if (((c = ctl) & STOP_BIT) != 0) { // already terminating |
1780 |
> |
if ((short)(c >>> TC_SHIFT) == -parallelism) { |
1781 |
> |
ReentrantLock lock = this.lock; // signal when no workers |
1782 |
> |
lock.lock(); // don't need try/finally |
1783 |
> |
termination.signalAll(); // signal when 0 workers |
1784 |
> |
lock.unlock(); |
1785 |
> |
} |
1786 |
> |
return true; |
1787 |
> |
} |
1788 |
> |
if (!now) { |
1789 |
> |
if ((int)(c >> AC_SHIFT) != -parallelism || runState >= 0 || |
1790 |
> |
hasQueuedSubmissions()) |
1791 |
> |
return false; |
1792 |
> |
// Check for unqueued inactive workers. One pass suffices. |
1793 |
> |
WorkQueue[] ws = workQueues; WorkQueue w; |
1794 |
> |
if (ws != null) { |
1795 |
> |
int n = ws.length; |
1796 |
> |
for (int i = 1; i < n; i += 2) { |
1797 |
> |
if ((w = ws[i]) != null && w.eventCount >= 0) |
1798 |
> |
return false; |
1799 |
> |
} |
1800 |
> |
} |
1801 |
> |
} |
1802 |
> |
if (U.compareAndSwapLong(this, CTL, c, c | STOP_BIT)) |
1803 |
> |
startTerminating(); |
1804 |
|
} |
1168 |
– |
return sc == 0; |
1805 |
|
} |
1806 |
|
|
1807 |
|
/** |
1808 |
< |
* Returns the approximate (non-atomic) number of idle threads per |
1809 |
< |
* active thread. |
1808 |
> |
* Initiates termination: Runs three passes through workQueues: |
1809 |
> |
* (0) Setting termination status, followed by wakeups of queued |
1810 |
> |
* workers; (1) cancelling all tasks; (2) interrupting lagging |
1811 |
> |
* threads (likely in external tasks, but possibly also blocked in |
1812 |
> |
* joins). Each pass repeats previous steps because of potential |
1813 |
> |
* lagging thread creation. |
1814 |
|
*/ |
1815 |
< |
final int idlePerActive() { |
1816 |
< |
int pc = parallelism; // use parallelism, not rc |
1817 |
< |
int ac = runState; // no mask -- artifically boosts during shutdown |
1818 |
< |
// Use exact results for small values, saturate past 4 |
1819 |
< |
return pc <= ac? 0 : pc >>> 1 <= ac? 1 : pc >>> 2 <= ac? 3 : pc >>> 3; |
1815 |
> |
private void startTerminating() { |
1816 |
> |
for (int pass = 0; pass < 3; ++pass) { |
1817 |
> |
WorkQueue[] ws = workQueues; |
1818 |
> |
if (ws != null) { |
1819 |
> |
WorkQueue w; Thread wt; |
1820 |
> |
int n = ws.length; |
1821 |
> |
for (int j = 0; j < n; ++j) { |
1822 |
> |
if ((w = ws[j]) != null) { |
1823 |
> |
w.runState = -1; |
1824 |
> |
if (pass > 0) { |
1825 |
> |
w.cancelAll(); |
1826 |
> |
if (pass > 1 && (wt = w.owner) != null && |
1827 |
> |
!wt.isInterrupted()) { |
1828 |
> |
try { |
1829 |
> |
wt.interrupt(); |
1830 |
> |
} catch (SecurityException ignore) { |
1831 |
> |
} |
1832 |
> |
} |
1833 |
> |
} |
1834 |
> |
} |
1835 |
> |
} |
1836 |
> |
// Wake up workers parked on event queue |
1837 |
> |
int i, e; long c; Thread p; |
1838 |
> |
while ((i = ((~(e = (int)(c = ctl)) << 1) | 1) & SMASK) < n && |
1839 |
> |
(w = ws[i]) != null && |
1840 |
> |
w.eventCount == (e | INT_SIGN)) { |
1841 |
> |
long nc = ((long)(w.nextWait & E_MASK) | |
1842 |
> |
((c + AC_UNIT) & AC_MASK) | |
1843 |
> |
(c & (TC_MASK|STOP_BIT))); |
1844 |
> |
if (U.compareAndSwapLong(this, CTL, c, nc)) { |
1845 |
> |
w.eventCount = (e + E_SEQ) & E_MASK; |
1846 |
> |
if ((p = w.parker) != null) |
1847 |
> |
U.unpark(p); |
1848 |
> |
} |
1849 |
> |
} |
1850 |
> |
} |
1851 |
> |
} |
1852 |
|
} |
1853 |
|
|
1854 |
< |
// Public and protected methods |
1854 |
> |
// Exported methods |
1855 |
|
|
1856 |
|
// Constructors |
1857 |
|
|
1898 |
|
* use {@link #defaultForkJoinWorkerThreadFactory}. |
1899 |
|
* @param handler the handler for internal worker threads that |
1900 |
|
* terminate due to unrecoverable errors encountered while executing |
1901 |
< |
* tasks. For default value, use <code>null</code>. |
1901 |
> |
* tasks. For default value, use {@code null}. |
1902 |
|
* @param asyncMode if true, |
1903 |
|
* establishes local first-in-first-out scheduling mode for forked |
1904 |
|
* tasks that are never joined. This mode may be more appropriate |
1905 |
|
* than default locally stack-based mode in applications in which |
1906 |
|
* worker threads only process event-style asynchronous tasks. |
1907 |
< |
* For default value, use <code>false</code>. |
1907 |
> |
* For default value, use {@code false}. |
1908 |
|
* @throws IllegalArgumentException if parallelism less than or |
1909 |
|
* equal to zero, or greater than implementation limit |
1910 |
|
* @throws NullPointerException if the factory is null |
1920 |
|
checkPermission(); |
1921 |
|
if (factory == null) |
1922 |
|
throw new NullPointerException(); |
1923 |
< |
if (parallelism <= 0 || parallelism > MAX_WORKERS) |
1923 |
> |
if (parallelism <= 0 || parallelism > MAX_ID) |
1924 |
|
throw new IllegalArgumentException(); |
1925 |
|
this.parallelism = parallelism; |
1926 |
|
this.factory = factory; |
1927 |
|
this.ueh = handler; |
1928 |
< |
this.locallyFifo = asyncMode; |
1929 |
< |
int arraySize = initialArraySizeFor(parallelism); |
1930 |
< |
this.workers = new ForkJoinWorkerThread[arraySize]; |
1931 |
< |
this.submissionQueue = new LinkedTransferQueue<ForkJoinTask<?>>(); |
1932 |
< |
this.workerLock = new ReentrantLock(); |
1933 |
< |
this.termination = new Phaser(1); |
1934 |
< |
this.poolNumber = poolNumberGenerator.incrementAndGet(); |
1935 |
< |
} |
1936 |
< |
|
1937 |
< |
/** |
1938 |
< |
* Returns initial power of two size for workers array. |
1939 |
< |
* @param pc the initial parallelism level |
1940 |
< |
*/ |
1941 |
< |
private static int initialArraySizeFor(int pc) { |
1942 |
< |
// If possible, initially allocate enough space for one spare |
1943 |
< |
int size = pc < MAX_WORKERS ? pc + 1 : MAX_WORKERS; |
1944 |
< |
// See Hackers Delight, sec 3.2. We know MAX_WORKERS < (1 >>> 16) |
1945 |
< |
size |= size >>> 1; |
1946 |
< |
size |= size >>> 2; |
1947 |
< |
size |= size >>> 4; |
1948 |
< |
size |= size >>> 8; |
1949 |
< |
return size + 1; |
1928 |
> |
this.localMode = asyncMode ? FIFO_QUEUE : LIFO_QUEUE; |
1929 |
> |
this.nextPoolIndex = 1; |
1930 |
> |
long np = (long)(-parallelism); // offset ctl counts |
1931 |
> |
this.ctl = ((np << AC_SHIFT) & AC_MASK) | ((np << TC_SHIFT) & TC_MASK); |
1932 |
> |
// initialize workQueues array with room for 2*parallelism if possible |
1933 |
> |
int n = parallelism << 1; |
1934 |
> |
if (n >= MAX_ID) |
1935 |
> |
n = MAX_ID; |
1936 |
> |
else { // See Hackers Delight, sec 3.2, where n < (1 << 16) |
1937 |
> |
n |= n >>> 1; n |= n >>> 2; n |= n >>> 4; n |= n >>> 8; |
1938 |
> |
} |
1939 |
> |
this.workQueues = new WorkQueue[(n + 1) << 1]; |
1940 |
> |
ReentrantLock lck = this.lock = new ReentrantLock(); |
1941 |
> |
this.termination = lck.newCondition(); |
1942 |
> |
this.stealCount = new AtomicLong(); |
1943 |
> |
this.nextWorkerNumber = new AtomicInteger(); |
1944 |
> |
StringBuilder sb = new StringBuilder("ForkJoinPool-"); |
1945 |
> |
sb.append(poolNumberGenerator.incrementAndGet()); |
1946 |
> |
sb.append("-worker-"); |
1947 |
> |
this.workerNamePrefix = sb.toString(); |
1948 |
> |
// Create initial submission queue |
1949 |
> |
WorkQueue sq = tryAddSharedQueue(0); |
1950 |
> |
if (sq != null) |
1951 |
> |
sq.growArray(false); |
1952 |
|
} |
1953 |
|
|
1954 |
|
// Execution methods |
1955 |
|
|
1956 |
|
/** |
1283 |
– |
* Common code for execute, invoke and submit |
1284 |
– |
*/ |
1285 |
– |
private <T> void doSubmit(ForkJoinTask<T> task) { |
1286 |
– |
if (task == null) |
1287 |
– |
throw new NullPointerException(); |
1288 |
– |
if (runState >= SHUTDOWN) |
1289 |
– |
throw new RejectedExecutionException(); |
1290 |
– |
submissionQueue.offer(task); |
1291 |
– |
int c; // try to increment event count -- CAS failure OK |
1292 |
– |
UNSAFE.compareAndSwapInt(this, eventCountOffset, c = eventCount, c+1); |
1293 |
– |
helpMaintainParallelism(); // create, start, or resume some workers |
1294 |
– |
} |
1295 |
– |
|
1296 |
– |
/** |
1957 |
|
* Performs the given task, returning its result upon completion. |
1958 |
+ |
* If the computation encounters an unchecked Exception or Error, |
1959 |
+ |
* it is rethrown as the outcome of this invocation. Rethrown |
1960 |
+ |
* exceptions behave in the same way as regular exceptions, but, |
1961 |
+ |
* when possible, contain stack traces (as displayed for example |
1962 |
+ |
* using {@code ex.printStackTrace()}) of both the current thread |
1963 |
+ |
* as well as the thread actually encountering the exception; |
1964 |
+ |
* minimally only the latter. |
1965 |
|
* |
1966 |
|
* @param task the task |
1967 |
|
* @return the task's result |
1994 |
|
* scheduled for execution |
1995 |
|
*/ |
1996 |
|
public void execute(Runnable task) { |
1997 |
+ |
if (task == null) |
1998 |
+ |
throw new NullPointerException(); |
1999 |
|
ForkJoinTask<?> job; |
2000 |
|
if (task instanceof ForkJoinTask<?>) // avoid re-wrap |
2001 |
|
job = (ForkJoinTask<?>) task; |
2024 |
|
* scheduled for execution |
2025 |
|
*/ |
2026 |
|
public <T> ForkJoinTask<T> submit(Callable<T> task) { |
2027 |
+ |
if (task == null) |
2028 |
+ |
throw new NullPointerException(); |
2029 |
|
ForkJoinTask<T> job = ForkJoinTask.adapt(task); |
2030 |
|
doSubmit(job); |
2031 |
|
return job; |
2037 |
|
* scheduled for execution |
2038 |
|
*/ |
2039 |
|
public <T> ForkJoinTask<T> submit(Runnable task, T result) { |
2040 |
+ |
if (task == null) |
2041 |
+ |
throw new NullPointerException(); |
2042 |
|
ForkJoinTask<T> job = ForkJoinTask.adapt(task, result); |
2043 |
|
doSubmit(job); |
2044 |
|
return job; |
2050 |
|
* scheduled for execution |
2051 |
|
*/ |
2052 |
|
public ForkJoinTask<?> submit(Runnable task) { |
2053 |
+ |
if (task == null) |
2054 |
+ |
throw new NullPointerException(); |
2055 |
|
ForkJoinTask<?> job; |
2056 |
|
if (task instanceof ForkJoinTask<?>) // avoid re-wrap |
2057 |
|
job = (ForkJoinTask<?>) task; |
2117 |
|
|
2118 |
|
/** |
2119 |
|
* Returns the number of worker threads that have started but not |
2120 |
< |
* yet terminated. This result returned by this method may differ |
2120 |
> |
* yet terminated. The result returned by this method may differ |
2121 |
|
* from {@link #getParallelism} when threads are created to |
2122 |
|
* maintain parallelism when others are cooperatively blocked. |
2123 |
|
* |
2124 |
|
* @return the number of worker threads |
2125 |
|
*/ |
2126 |
|
public int getPoolSize() { |
2127 |
< |
return workerCounts >>> TOTAL_COUNT_SHIFT; |
2127 |
> |
return parallelism + (short)(ctl >>> TC_SHIFT); |
2128 |
|
} |
2129 |
|
|
2130 |
|
/** |
2134 |
|
* @return {@code true} if this pool uses async mode |
2135 |
|
*/ |
2136 |
|
public boolean getAsyncMode() { |
2137 |
< |
return locallyFifo; |
2137 |
> |
return localMode != 0; |
2138 |
|
} |
2139 |
|
|
2140 |
|
/** |
2146 |
|
* @return the number of worker threads |
2147 |
|
*/ |
2148 |
|
public int getRunningThreadCount() { |
2149 |
< |
return workerCounts & RUNNING_COUNT_MASK; |
2149 |
> |
int rc = 0; |
2150 |
> |
WorkQueue[] ws; WorkQueue w; |
2151 |
> |
if ((ws = workQueues) != null) { |
2152 |
> |
int n = ws.length; |
2153 |
> |
for (int i = 1; i < n; i += 2) { |
2154 |
> |
Thread.State s; ForkJoinWorkerThread wt; |
2155 |
> |
if ((w = ws[i]) != null && (wt = w.owner) != null && |
2156 |
> |
w.eventCount >= 0 && |
2157 |
> |
(s = wt.getState()) != Thread.State.BLOCKED && |
2158 |
> |
s != Thread.State.WAITING && |
2159 |
> |
s != Thread.State.TIMED_WAITING) |
2160 |
> |
++rc; |
2161 |
> |
} |
2162 |
> |
} |
2163 |
> |
return rc; |
2164 |
|
} |
2165 |
|
|
2166 |
|
/** |
2171 |
|
* @return the number of active threads |
2172 |
|
*/ |
2173 |
|
public int getActiveThreadCount() { |
2174 |
< |
return runState & ACTIVE_COUNT_MASK; |
2174 |
> |
int r = parallelism + (int)(ctl >> AC_SHIFT); |
2175 |
> |
return (r <= 0) ? 0 : r; // suppress momentarily negative values |
2176 |
|
} |
2177 |
|
|
2178 |
|
/** |
2187 |
|
* @return {@code true} if all threads are currently idle |
2188 |
|
*/ |
2189 |
|
public boolean isQuiescent() { |
2190 |
< |
return (runState & ACTIVE_COUNT_MASK) == 0; |
2190 |
> |
return (int)(ctl >> AC_SHIFT) + parallelism == 0; |
2191 |
|
} |
2192 |
|
|
2193 |
|
/** |
2202 |
|
* @return the number of steals |
2203 |
|
*/ |
2204 |
|
public long getStealCount() { |
2205 |
< |
return stealCount; |
2205 |
> |
long count = stealCount.get(); |
2206 |
> |
WorkQueue[] ws; WorkQueue w; |
2207 |
> |
if ((ws = workQueues) != null) { |
2208 |
> |
int n = ws.length; |
2209 |
> |
for (int i = 1; i < n; i += 2) { |
2210 |
> |
if ((w = ws[i]) != null) |
2211 |
> |
count += w.totalSteals; |
2212 |
> |
} |
2213 |
> |
} |
2214 |
> |
return count; |
2215 |
|
} |
2216 |
|
|
2217 |
|
/** |
2226 |
|
*/ |
2227 |
|
public long getQueuedTaskCount() { |
2228 |
|
long count = 0; |
2229 |
< |
ForkJoinWorkerThread[] ws = workers; |
2230 |
< |
int n = ws.length; |
2231 |
< |
for (int i = 0; i < n; ++i) { |
2232 |
< |
ForkJoinWorkerThread w = ws[i]; |
2233 |
< |
if (w != null) |
2234 |
< |
count += w.getQueueSize(); |
2229 |
> |
WorkQueue[] ws; WorkQueue w; |
2230 |
> |
if ((ws = workQueues) != null) { |
2231 |
> |
int n = ws.length; |
2232 |
> |
for (int i = 1; i < n; i += 2) { |
2233 |
> |
if ((w = ws[i]) != null) |
2234 |
> |
count += w.queueSize(); |
2235 |
> |
} |
2236 |
|
} |
2237 |
|
return count; |
2238 |
|
} |
2239 |
|
|
2240 |
|
/** |
2241 |
|
* Returns an estimate of the number of tasks submitted to this |
2242 |
< |
* pool that have not yet begun executing. This method takes time |
2243 |
< |
* proportional to the number of submissions. |
2242 |
> |
* pool that have not yet begun executing. This method may take |
2243 |
> |
* time proportional to the number of submissions. |
2244 |
|
* |
2245 |
|
* @return the number of queued submissions |
2246 |
|
*/ |
2247 |
|
public int getQueuedSubmissionCount() { |
2248 |
< |
return submissionQueue.size(); |
2248 |
> |
int count = 0; |
2249 |
> |
WorkQueue[] ws; WorkQueue w; |
2250 |
> |
if ((ws = workQueues) != null) { |
2251 |
> |
int n = ws.length; |
2252 |
> |
for (int i = 0; i < n; i += 2) { |
2253 |
> |
if ((w = ws[i]) != null) |
2254 |
> |
count += w.queueSize(); |
2255 |
> |
} |
2256 |
> |
} |
2257 |
> |
return count; |
2258 |
|
} |
2259 |
|
|
2260 |
|
/** |
2264 |
|
* @return {@code true} if there are any queued submissions |
2265 |
|
*/ |
2266 |
|
public boolean hasQueuedSubmissions() { |
2267 |
< |
return !submissionQueue.isEmpty(); |
2267 |
> |
WorkQueue[] ws; WorkQueue w; |
2268 |
> |
if ((ws = workQueues) != null) { |
2269 |
> |
int n = ws.length; |
2270 |
> |
for (int i = 0; i < n; i += 2) { |
2271 |
> |
if ((w = ws[i]) != null && w.queueSize() != 0) |
2272 |
> |
return true; |
2273 |
> |
} |
2274 |
> |
} |
2275 |
> |
return false; |
2276 |
|
} |
2277 |
|
|
2278 |
|
/** |
2283 |
|
* @return the next submission, or {@code null} if none |
2284 |
|
*/ |
2285 |
|
protected ForkJoinTask<?> pollSubmission() { |
2286 |
< |
return submissionQueue.poll(); |
2286 |
> |
WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t; |
2287 |
> |
if ((ws = workQueues) != null) { |
2288 |
> |
int n = ws.length; |
2289 |
> |
for (int i = 0; i < n; i += 2) { |
2290 |
> |
if ((w = ws[i]) != null && (t = w.poll()) != null) |
2291 |
> |
return t; |
2292 |
> |
} |
2293 |
> |
} |
2294 |
> |
return null; |
2295 |
|
} |
2296 |
|
|
2297 |
|
/** |
2312 |
|
* @return the number of elements transferred |
2313 |
|
*/ |
2314 |
|
protected int drainTasksTo(Collection<? super ForkJoinTask<?>> c) { |
2315 |
< |
int count = submissionQueue.drainTo(c); |
2316 |
< |
ForkJoinWorkerThread[] ws = workers; |
2317 |
< |
int n = ws.length; |
2318 |
< |
for (int i = 0; i < n; ++i) { |
2319 |
< |
ForkJoinWorkerThread w = ws[i]; |
2320 |
< |
if (w != null) |
2321 |
< |
count += w.drainTasksTo(c); |
2315 |
> |
int count = 0; |
2316 |
> |
WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t; |
2317 |
> |
if ((ws = workQueues) != null) { |
2318 |
> |
int n = ws.length; |
2319 |
> |
for (int i = 0; i < n; ++i) { |
2320 |
> |
if ((w = ws[i]) != null) { |
2321 |
> |
while ((t = w.poll()) != null) { |
2322 |
> |
c.add(t); |
2323 |
> |
++count; |
2324 |
> |
} |
2325 |
> |
} |
2326 |
> |
} |
2327 |
|
} |
2328 |
|
return count; |
2329 |
|
} |
2339 |
|
long st = getStealCount(); |
2340 |
|
long qt = getQueuedTaskCount(); |
2341 |
|
long qs = getQueuedSubmissionCount(); |
2342 |
< |
int wc = workerCounts; |
1613 |
< |
int tc = wc >>> TOTAL_COUNT_SHIFT; |
1614 |
< |
int rc = wc & RUNNING_COUNT_MASK; |
2342 |
> |
int rc = getRunningThreadCount(); |
2343 |
|
int pc = parallelism; |
2344 |
< |
int rs = runState; |
2345 |
< |
int ac = rs & ACTIVE_COUNT_MASK; |
2344 |
> |
long c = ctl; |
2345 |
> |
int tc = pc + (short)(c >>> TC_SHIFT); |
2346 |
> |
int ac = pc + (int)(c >> AC_SHIFT); |
2347 |
> |
if (ac < 0) // ignore transient negative |
2348 |
> |
ac = 0; |
2349 |
> |
String level; |
2350 |
> |
if ((c & STOP_BIT) != 0) |
2351 |
> |
level = (tc == 0) ? "Terminated" : "Terminating"; |
2352 |
> |
else |
2353 |
> |
level = runState < 0 ? "Shutting down" : "Running"; |
2354 |
|
return super.toString() + |
2355 |
< |
"[" + runLevelToString(rs) + |
2355 |
> |
"[" + level + |
2356 |
|
", parallelism = " + pc + |
2357 |
|
", size = " + tc + |
2358 |
|
", active = " + ac + |
2363 |
|
"]"; |
2364 |
|
} |
2365 |
|
|
1630 |
– |
private static String runLevelToString(int s) { |
1631 |
– |
return ((s & TERMINATED) != 0 ? "Terminated" : |
1632 |
– |
((s & TERMINATING) != 0 ? "Terminating" : |
1633 |
– |
((s & SHUTDOWN) != 0 ? "Shutting down" : |
1634 |
– |
"Running"))); |
1635 |
– |
} |
1636 |
– |
|
2366 |
|
/** |
2367 |
|
* Initiates an orderly shutdown in which previously submitted |
2368 |
|
* tasks are executed, but no new tasks will be accepted. |
2377 |
|
*/ |
2378 |
|
public void shutdown() { |
2379 |
|
checkPermission(); |
2380 |
< |
advanceRunLevel(SHUTDOWN); |
2380 |
> |
enableShutdown(); |
2381 |
|
tryTerminate(false); |
2382 |
|
} |
2383 |
|
|
2399 |
|
*/ |
2400 |
|
public List<Runnable> shutdownNow() { |
2401 |
|
checkPermission(); |
2402 |
+ |
enableShutdown(); |
2403 |
|
tryTerminate(true); |
2404 |
|
return Collections.emptyList(); |
2405 |
|
} |
2410 |
|
* @return {@code true} if all tasks have completed following shut down |
2411 |
|
*/ |
2412 |
|
public boolean isTerminated() { |
2413 |
< |
return runState >= TERMINATED; |
2413 |
> |
long c = ctl; |
2414 |
> |
return ((c & STOP_BIT) != 0L && |
2415 |
> |
(short)(c >>> TC_SHIFT) == -parallelism); |
2416 |
|
} |
2417 |
|
|
2418 |
|
/** |
2420 |
|
* commenced but not yet completed. This method may be useful for |
2421 |
|
* debugging. A return of {@code true} reported a sufficient |
2422 |
|
* period after shutdown may indicate that submitted tasks have |
2423 |
< |
* ignored or suppressed interruption, causing this executor not |
2424 |
< |
* to properly terminate. |
2423 |
> |
* ignored or suppressed interruption, or are waiting for IO, |
2424 |
> |
* causing this executor not to properly terminate. (See the |
2425 |
> |
* advisory notes for class {@link ForkJoinTask} stating that |
2426 |
> |
* tasks should not normally entail blocking operations. But if |
2427 |
> |
* they do, they must abort them on interrupt.) |
2428 |
|
* |
2429 |
|
* @return {@code true} if terminating but not yet terminated |
2430 |
|
*/ |
2431 |
|
public boolean isTerminating() { |
2432 |
< |
return (runState & (TERMINATING|TERMINATED)) == TERMINATING; |
2432 |
> |
long c = ctl; |
2433 |
> |
return ((c & STOP_BIT) != 0L && |
2434 |
> |
(short)(c >>> TC_SHIFT) != -parallelism); |
2435 |
|
} |
2436 |
|
|
2437 |
|
/** |
2440 |
|
* @return {@code true} if this pool has been shut down |
2441 |
|
*/ |
2442 |
|
public boolean isShutdown() { |
2443 |
< |
return runState >= SHUTDOWN; |
2443 |
> |
return runState < 0; |
2444 |
|
} |
2445 |
|
|
2446 |
|
/** |
2456 |
|
*/ |
2457 |
|
public boolean awaitTermination(long timeout, TimeUnit unit) |
2458 |
|
throws InterruptedException { |
2459 |
+ |
long nanos = unit.toNanos(timeout); |
2460 |
+ |
final ReentrantLock lock = this.lock; |
2461 |
+ |
lock.lock(); |
2462 |
|
try { |
2463 |
< |
return termination.awaitAdvanceInterruptibly(0, timeout, unit) > 0; |
2464 |
< |
} catch(TimeoutException ex) { |
2465 |
< |
return false; |
2463 |
> |
for (;;) { |
2464 |
> |
if (isTerminated()) |
2465 |
> |
return true; |
2466 |
> |
if (nanos <= 0) |
2467 |
> |
return false; |
2468 |
> |
nanos = termination.awaitNanos(nanos); |
2469 |
> |
} |
2470 |
> |
} finally { |
2471 |
> |
lock.unlock(); |
2472 |
|
} |
2473 |
|
} |
2474 |
|
|
2480 |
|
* {@code isReleasable} must return {@code true} if blocking is |
2481 |
|
* not necessary. Method {@code block} blocks the current thread |
2482 |
|
* if necessary (perhaps internally invoking {@code isReleasable} |
2483 |
< |
* before actually blocking). The unusual methods in this API |
2484 |
< |
* accommodate synchronizers that may, but don't usually, block |
2485 |
< |
* for long periods. Similarly, they allow more efficient internal |
2486 |
< |
* handling of cases in which additional workers may be, but |
2487 |
< |
* usually are not, needed to ensure sufficient parallelism. |
2488 |
< |
* Toward this end, implementations of method {@code isReleasable} |
2489 |
< |
* must be amenable to repeated invocation. |
2483 |
> |
* before actually blocking). These actions are performed by any |
2484 |
> |
* thread invoking {@link ForkJoinPool#managedBlock}. The |
2485 |
> |
* unusual methods in this API accommodate synchronizers that may, |
2486 |
> |
* but don't usually, block for long periods. Similarly, they |
2487 |
> |
* allow more efficient internal handling of cases in which |
2488 |
> |
* additional workers may be, but usually are not, needed to |
2489 |
> |
* ensure sufficient parallelism. Toward this end, |
2490 |
> |
* implementations of method {@code isReleasable} must be amenable |
2491 |
> |
* to repeated invocation. |
2492 |
|
* |
2493 |
|
* <p>For example, here is a ManagedBlocker based on a |
2494 |
|
* ReentrantLock: |
2553 |
|
* |
2554 |
|
* <p>If the caller is not a {@link ForkJoinTask}, this method is |
2555 |
|
* behaviorally equivalent to |
2556 |
< |
* <pre> {@code |
2556 |
> |
a * <pre> {@code |
2557 |
|
* while (!blocker.isReleasable()) |
2558 |
|
* if (blocker.block()) |
2559 |
|
* return; |
2568 |
|
public static void managedBlock(ManagedBlocker blocker) |
2569 |
|
throws InterruptedException { |
2570 |
|
Thread t = Thread.currentThread(); |
2571 |
< |
if (t instanceof ForkJoinWorkerThread) { |
2572 |
< |
ForkJoinWorkerThread w = (ForkJoinWorkerThread) t; |
2573 |
< |
w.pool.awaitBlocker(blocker); |
2574 |
< |
} |
2575 |
< |
else { |
2576 |
< |
do {} while (!blocker.isReleasable() && !blocker.block()); |
2571 |
> |
ForkJoinPool p = ((t instanceof ForkJoinWorkerThread) ? |
2572 |
> |
((ForkJoinWorkerThread)t).pool : null); |
2573 |
> |
while (!blocker.isReleasable()) { |
2574 |
> |
if (p == null || p.tryCompensate()) { |
2575 |
> |
try { |
2576 |
> |
do {} while (!blocker.isReleasable() && !blocker.block()); |
2577 |
> |
} finally { |
2578 |
> |
if (p != null) |
2579 |
> |
p.incrementActiveCount(); |
2580 |
> |
} |
2581 |
> |
break; |
2582 |
> |
} |
2583 |
|
} |
2584 |
|
} |
2585 |
|
|
2596 |
|
} |
2597 |
|
|
2598 |
|
// Unsafe mechanics |
2599 |
< |
|
2600 |
< |
private static final sun.misc.Unsafe UNSAFE = getUnsafe(); |
2601 |
< |
private static final long workerCountsOffset = |
2602 |
< |
objectFieldOffset("workerCounts", ForkJoinPool.class); |
2603 |
< |
private static final long runStateOffset = |
2604 |
< |
objectFieldOffset("runState", ForkJoinPool.class); |
2605 |
< |
private static final long eventCountOffset = |
2606 |
< |
objectFieldOffset("eventCount", ForkJoinPool.class); |
2607 |
< |
private static final long eventWaitersOffset = |
2608 |
< |
objectFieldOffset("eventWaiters",ForkJoinPool.class); |
2609 |
< |
private static final long stealCountOffset = |
1856 |
< |
objectFieldOffset("stealCount",ForkJoinPool.class); |
1857 |
< |
private static final long spareWaitersOffset = |
1858 |
< |
objectFieldOffset("spareWaiters",ForkJoinPool.class); |
1859 |
< |
|
1860 |
< |
private static long objectFieldOffset(String field, Class<?> klazz) { |
2599 |
> |
private static final sun.misc.Unsafe U; |
2600 |
> |
private static final long CTL; |
2601 |
> |
private static final long RUNSTATE; |
2602 |
> |
private static final long PARKBLOCKER; |
2603 |
> |
|
2604 |
> |
static { |
2605 |
> |
poolNumberGenerator = new AtomicInteger(); |
2606 |
> |
modifyThreadPermission = new RuntimePermission("modifyThread"); |
2607 |
> |
defaultForkJoinWorkerThreadFactory = |
2608 |
> |
new DefaultForkJoinWorkerThreadFactory(); |
2609 |
> |
int s; |
2610 |
|
try { |
2611 |
< |
return UNSAFE.objectFieldOffset(klazz.getDeclaredField(field)); |
2612 |
< |
} catch (NoSuchFieldException e) { |
2613 |
< |
// Convert Exception to corresponding Error |
2614 |
< |
NoSuchFieldError error = new NoSuchFieldError(field); |
2615 |
< |
error.initCause(e); |
2616 |
< |
throw error; |
2611 |
> |
U = getUnsafe(); |
2612 |
> |
Class<?> k = ForkJoinPool.class; |
2613 |
> |
Class<?> tk = Thread.class; |
2614 |
> |
CTL = U.objectFieldOffset |
2615 |
> |
(k.getDeclaredField("ctl")); |
2616 |
> |
RUNSTATE = U.objectFieldOffset |
2617 |
> |
(k.getDeclaredField("runState")); |
2618 |
> |
PARKBLOCKER = U.objectFieldOffset |
2619 |
> |
(tk.getDeclaredField("parkBlocker")); |
2620 |
> |
} catch (Exception e) { |
2621 |
> |
throw new Error(e); |
2622 |
|
} |
2623 |
|
} |
2624 |
|
|
2649 |
|
} |
2650 |
|
} |
2651 |
|
} |
2652 |
+ |
|
2653 |
|
} |