ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/jsr166y/ForkJoinWorkerThread.java
(Generate patch)

Comparing jsr166/src/jsr166y/ForkJoinWorkerThread.java (file contents):
Revision 1.21 by jsr166, Mon Jul 27 20:57:44 2009 UTC vs.
Revision 1.34 by dl, Fri Jun 4 14:37:54 2010 UTC

# Line 8 | Line 8 | package jsr166y;
8  
9   import java.util.concurrent.*;
10  
11 + import java.util.Random;
12   import java.util.Collection;
13 + import java.util.concurrent.locks.LockSupport;
14  
15   /**
16   * A thread managed by a {@link ForkJoinPool}.  This class is
17   * subclassable solely for the sake of adding functionality -- there
18 < * are no overridable methods dealing with scheduling or
19 < * execution. However, you can override initialization and termination
20 < * methods surrounding the main task processing loop.  If you do
21 < * create such a subclass, you will also need to supply a custom
22 < * ForkJoinWorkerThreadFactory to use it in a ForkJoinPool.
18 > * are no overridable methods dealing with scheduling or execution.
19 > * However, you can override initialization and termination methods
20 > * surrounding the main task processing loop.  If you do create such a
21 > * subclass, you will also need to supply a custom {@link
22 > * ForkJoinPool.ForkJoinWorkerThreadFactory} to use it in a {@code
23 > * ForkJoinPool}.
24   *
25   * @since 1.7
26   * @author Doug Lea
27   */
28   public class ForkJoinWorkerThread extends Thread {
29      /*
30 <     * Algorithm overview:
30 >     * Overview:
31       *
32 <     * 1. Work-Stealing: Work-stealing queues are special forms of
33 <     * Deques that support only three of the four possible
34 <     * end-operations -- push, pop, and deq (aka steal), and only do
35 <     * so under the constraints that push and pop are called only from
36 <     * the owning thread, while deq may be called from other threads.
37 <     * (If you are unfamiliar with them, you probably want to read
38 <     * Herlihy and Shavit's book "The Art of Multiprocessor
39 <     * programming", chapter 16 describing these in more detail before
40 <     * proceeding.)  The main work-stealing queue design is roughly
41 <     * similar to "Dynamic Circular Work-Stealing Deque" by David
42 <     * Chase and Yossi Lev, SPAA 2005
43 <     * (http://research.sun.com/scalable/pubs/index.html).  The main
44 <     * difference ultimately stems from gc requirements that we null
45 <     * out taken slots as soon as we can, to maintain as small a
46 <     * footprint as possible even in programs generating huge numbers
47 <     * of tasks. To accomplish this, we shift the CAS arbitrating pop
48 <     * vs deq (steal) from being on the indices ("base" and "sp") to
49 <     * the slots themselves (mainly via method "casSlotNull()"). So,
50 <     * both a successful pop and deq mainly entail CAS'ing a non-null
51 <     * slot to null.  Because we rely on CASes of references, we do
52 <     * not need tag bits on base or sp.  They are simple ints as used
53 <     * in any circular array-based queue (see for example ArrayDeque).
54 <     * Updates to the indices must still be ordered in a way that
55 <     * guarantees that (sp - base) > 0 means the queue is empty, but
56 <     * otherwise may err on the side of possibly making the queue
57 <     * appear nonempty when a push, pop, or deq have not fully
58 <     * committed. Note that this means that the deq operation,
59 <     * considered individually, is not wait-free. One thief cannot
60 <     * successfully continue until another in-progress one (or, if
61 <     * previously empty, a push) completes.  However, in the
62 <     * aggregate, we ensure at least probabilistic non-blockingness. If
63 <     * an attempted steal fails, a thief always chooses a different
32 >     * ForkJoinWorkerThreads are managed by ForkJoinPools and perform
33 >     * ForkJoinTasks. This class includes bookkeeping in support of
34 >     * worker activation, suspension, and lifecycle control described
35 >     * in more detail in the internal documentation of class
36 >     * ForkJoinPool. And as described further below, this class also
37 >     * includes special-cased support for some ForkJoinTask
38 >     * methods. But the main mechanics involve work-stealing:
39 >     *
40 >     * Work-stealing queues are special forms of Deques that support
41 >     * only three of the four possible end-operations -- push, pop,
42 >     * and deq (aka steal), under the further constraints that push
43 >     * and pop are called only from the owning thread, while deq may
44 >     * be called from other threads.  (If you are unfamiliar with
45 >     * them, you probably want to read Herlihy and Shavit's book "The
46 >     * Art of Multiprocessor programming", chapter 16 describing these
47 >     * in more detail before proceeding.)  The main work-stealing
48 >     * queue design is roughly similar to those in the papers "Dynamic
49 >     * Circular Work-Stealing Deque" by Chase and Lev, SPAA 2005
50 >     * (http://research.sun.com/scalable/pubs/index.html) and
51 >     * "Idempotent work stealing" by Michael, Saraswat, and Vechev,
52 >     * PPoPP 2009 (http://portal.acm.org/citation.cfm?id=1504186).
53 >     * The main differences ultimately stem from gc requirements that
54 >     * we null out taken slots as soon as we can, to maintain as small
55 >     * a footprint as possible even in programs generating huge
56 >     * numbers of tasks. To accomplish this, we shift the CAS
57 >     * arbitrating pop vs deq (steal) from being on the indices
58 >     * ("base" and "sp") to the slots themselves (mainly via method
59 >     * "casSlotNull()"). So, both a successful pop and deq mainly
60 >     * entail a CAS of a slot from non-null to null.  Because we rely
61 >     * on CASes of references, we do not need tag bits on base or sp.
62 >     * They are simple ints as used in any circular array-based queue
63 >     * (see for example ArrayDeque).  Updates to the indices must
64 >     * still be ordered in a way that guarantees that sp == base means
65 >     * the queue is empty, but otherwise may err on the side of
66 >     * possibly making the queue appear nonempty when a push, pop, or
67 >     * deq have not fully committed. Note that this means that the deq
68 >     * operation, considered individually, is not wait-free. One thief
69 >     * cannot successfully continue until another in-progress one (or,
70 >     * if previously empty, a push) completes.  However, in the
71 >     * aggregate, we ensure at least probabilistic non-blockingness.
72 >     * If an attempted steal fails, a thief always chooses a different
73       * random victim target to try next. So, in order for one thief to
74       * progress, it suffices for any in-progress deq or new push on
75       * any empty queue to complete. One reason this works well here is
76       * that apparently-nonempty often means soon-to-be-stealable,
77 <     * which gives threads a chance to activate if necessary before
78 <     * stealing (see below).
77 >     * which gives threads a chance to set activation status if
78 >     * necessary before stealing.
79 >     *
80 >     * This approach also enables support for "async mode" where local
81 >     * task processing is in FIFO, not LIFO order; simply by using a
82 >     * version of deq rather than pop when locallyFifo is true (as set
83 >     * by the ForkJoinPool).  This allows use in message-passing
84 >     * frameworks in which tasks are never joined.
85       *
86       * Efficient implementation of this approach currently relies on
87       * an uncomfortable amount of "Unsafe" mechanics. To maintain
88       * correct orderings, reads and writes of variable base require
89 <     * volatile ordering.  Variable sp does not require volatile write
90 <     * but needs cheaper store-ordering on writes.  Because they are
91 <     * protected by volatile base reads, reads of the queue array and
92 <     * its slots do not need volatile load semantics, but writes (in
93 <     * push) require store order and CASes (in pop and deq) require
94 <     * (volatile) CAS semantics. Since these combinations aren't
95 <     * supported using ordinary volatiles, the only way to accomplish
96 <     * these efficiently is to use direct Unsafe calls. (Using external
89 >     * volatile ordering.  Variable sp does not require volatile
90 >     * writes but still needs store-ordering, which we accomplish by
91 >     * pre-incrementing sp before filling the slot with an ordered
92 >     * store.  (Pre-incrementing also enables backouts used in
93 >     * scanWhileJoining.)  Because they are protected by volatile base
94 >     * reads, reads of the queue array and its slots by other threads
95 >     * do not need volatile load semantics, but writes (in push)
96 >     * require store order and CASes (in pop and deq) require
97 >     * (volatile) CAS semantics.  (Michael, Saraswat, and Vechev's
98 >     * algorithm has similar properties, but without support for
99 >     * nulling slots.)  Since these combinations aren't supported
100 >     * using ordinary volatiles, the only way to accomplish these
101 >     * efficiently is to use direct Unsafe calls. (Using external
102       * AtomicIntegers and AtomicReferenceArrays for the indices and
103       * array is significantly slower because of memory locality and
104 <     * indirection effects.) Further, performance on most platforms is
105 <     * very sensitive to placement and sizing of the (resizable) queue
106 <     * array.  Even though these queues don't usually become all that
107 <     * big, the initial size must be large enough to counteract cache
104 >     * indirection effects.)
105 >     *
106 >     * Further, performance on most platforms is very sensitive to
107 >     * placement and sizing of the (resizable) queue array.  Even
108 >     * though these queues don't usually become all that big, the
109 >     * initial size must be large enough to counteract cache
110       * contention effects across multiple queues (especially in the
111       * presence of GC cardmarking). Also, to improve thread-locality,
112 <     * queues are currently initialized immediately after the thread
113 <     * gets the initial signal to start processing tasks.  However,
114 <     * all queue-related methods except pushTask are written in a way
115 <     * that allows them to instead be lazily allocated and/or disposed
116 <     * of when empty. All together, these low-level implementation
117 <     * choices produce as much as a factor of 4 performance
118 <     * improvement compared to naive implementations, and enable the
119 <     * processing of billions of tasks per second, sometimes at the
120 <     * expense of ugliness.
121 <     *
122 <     * 2. Run control: The primary run control is based on a global
123 <     * counter (activeCount) held by the pool. It uses an algorithm
124 <     * similar to that in Herlihy and Shavit section 17.6 to cause
125 <     * threads to eventually block when all threads declare they are
126 <     * inactive. (See variable "scans".)  For this to work, threads
127 <     * must be declared active when executing tasks, and before
128 <     * stealing a task. They must be inactive before blocking on the
129 <     * Pool Barrier (awaiting a new submission or other Pool
130 <     * event). In between, there is some free play which we take
131 <     * advantage of to avoid contention and rapid flickering of the
132 <     * global activeCount: If inactive, we activate only if a victim
133 <     * queue appears to be nonempty (see above).  Similarly, a thread
134 <     * tries to inactivate only after a full scan of other threads.
110 <     * The net effect is that contention on activeCount is rarely a
111 <     * measurable performance issue. (There are also a few other cases
112 <     * where we scan for work rather than retry/block upon
113 <     * contention.)
114 <     *
115 <     * 3. Selection control. We maintain policy of always choosing to
116 <     * run local tasks rather than stealing, and always trying to
117 <     * steal tasks before trying to run a new submission. All steals
118 <     * are currently performed in randomly-chosen deq-order. It may be
119 <     * worthwhile to bias these with locality / anti-locality
120 <     * information, but doing this well probably requires more
121 <     * lower-level information from JVMs than currently provided.
112 >     * queues are initialized after starting.  All together, these
113 >     * low-level implementation choices produce as much as a factor of
114 >     * 4 performance improvement compared to naive implementations,
115 >     * and enable the processing of billions of tasks per second,
116 >     * sometimes at the expense of ugliness.
117 >     */
118 >
119 >    /**
120 >     * Generator for initial random seeds for random victim
121 >     * selection. This is used only to create initial seeds. Random
122 >     * steals use a cheaper xorshift generator per steal attempt. We
123 >     * expect only rare contention on seedGenerator, so just use a
124 >     * plain Random.
125 >     */
126 >    private static final Random seedGenerator = new Random();
127 >
128 >    /**
129 >     * The timeout value for suspending spares. Spare workers that
130 >     * remain unsignalled for more than this time may be trimmed
131 >     * (killed and removed from pool).  Since our goal is to avoid
132 >     * long-term thread buildup, the exact value of timeout does not
133 >     * matter too much so long as it avoids most false-alarm timeouts
134 >     * under GC stalls or momentarily high system load.
135       */
136 +    private static final long SPARE_KEEPALIVE_NANOS =
137 +        5L * 1000L * 1000L * 1000L; // 5 secs
138  
139      /**
140       * Capacity of work-stealing queue array upon initialization.
141 <     * Must be a power of two. Initial size must be at least 2, but is
141 >     * Must be a power of two. Initial size must be at least 4, but is
142       * padded to minimize cache effects.
143       */
144      private static final int INITIAL_QUEUE_CAPACITY = 1 << 13;
# Line 143 | Line 158 | public class ForkJoinWorkerThread extend
158  
159      /**
160       * The work-stealing queue array. Size must be a power of two.
161 <     * Initialized when thread starts, to improve memory locality.
161 >     * Initialized in onStart, to improve memory locality.
162       */
163      private ForkJoinTask<?>[] queue;
164  
165      /**
151     * Index (mod queue.length) of next queue slot to push to or pop
152     * from. It is written only by owner thread, via ordered store.
153     * Both sp and base are allowed to wrap around on overflow, but
154     * (sp - base) still estimates size.
155     */
156    private volatile int sp;
157
158    /**
166       * Index (mod queue.length) of least valid queue slot, which is
167       * always the next position to steal from if nonempty.
168       */
169      private volatile int base;
170  
171      /**
172 <     * Activity status. When true, this worker is considered active.
173 <     * Must be false upon construction. It must be true when executing
174 <     * tasks, and BEFORE stealing a task. It must be false before
175 <     * calling pool.sync.
176 <     */
177 <    private boolean active;
172 >     * Index (mod queue.length) of next queue slot to push to or pop
173 >     * from. It is written only by owner thread, and accessed by other
174 >     * threads only after reading (volatile) base.  Both sp and base
175 >     * are allowed to wrap around on overflow, but (sp - base) still
176 >     * estimates size.
177 >     */
178 >    private int sp;
179  
180      /**
181 <     * Run state of this worker. Supports simple versions of the usual
182 <     * shutdown/shutdownNow control.
181 >     * Run state of this worker. In addition to the usual run levels,
182 >     * tracks if this worker is suspended as a spare, and if it was
183 >     * killed (trimmed) while suspended. However, "active" status is
184 >     * maintained separately.
185       */
186      private volatile int runState;
187  
188 +    private static final int TERMINATING = 0x01;
189 +    private static final int TERMINATED  = 0x02;
190 +    private static final int SUSPENDED   = 0x04; // inactive spare
191 +    private static final int TRIMMED     = 0x08; // killed while suspended
192 +
193 +    /**
194 +     * Number of LockSupport.park calls to block this thread for
195 +     * suspension or event waits. Used for internal instrumention;
196 +     * currently not exported but included because volatile write upon
197 +     * park also provides a workaround for a JVM bug.
198 +     */
199 +    private volatile int parkCount;
200 +
201 +    /**
202 +     * Number of steals, transferred and reset in pool callbacks pool
203 +     * when idle Accessed directly by pool.
204 +     */
205 +    int stealCount;
206 +
207      /**
208       * Seed for random number generator for choosing steal victims.
209 <     * Uses Marsaglia xorshift. Must be nonzero upon initialization.
209 >     * Uses Marsaglia xorshift. Must be initialized as nonzero.
210       */
211      private int seed;
212  
213      /**
214 <     * Number of steals, transferred to pool when idle
214 >     * Activity status. When true, this worker is considered active.
215 >     * Accessed directly by pool.  Must be false upon construction.
216       */
217 <    private int stealCount;
217 >    boolean active;
218 >
219 >    /**
220 >     * True if use local fifo, not default lifo, for local polling.
221 >     * Shadows value from ForkJoinPool, which resets it if changed
222 >     * pool-wide.
223 >     */
224 >    private boolean locallyFifo;
225  
226      /**
227       * Index of this worker in pool array. Set once by pool before
228 <     * running, and accessed directly by pool during cleanup etc.
228 >     * running, and accessed directly by pool to locate this worker in
229 >     * its workers array.
230       */
231      int poolIndex;
232  
233      /**
234 <     * The last barrier event waited for. Accessed in pool callback
235 <     * methods, but only by current thread.
234 >     * The last pool event waited for. Accessed only by pool in
235 >     * callback methods invoked within this thread.
236       */
237 <    long lastEventCount;
237 >    int lastEventCount;
238  
239      /**
240 <     * True if use local fifo, not default lifo, for local polling
240 >     * Encoded index and event count of next event waiter. Used only
241 >     * by ForkJoinPool for managing event waiters.
242       */
243 <    private boolean locallyFifo;
243 >    volatile long nextWaiter;
244  
245      /**
246       * Creates a ForkJoinWorkerThread operating in the given pool.
# Line 212 | Line 251 | public class ForkJoinWorkerThread extend
251      protected ForkJoinWorkerThread(ForkJoinPool pool) {
252          if (pool == null) throw new NullPointerException();
253          this.pool = pool;
254 <        // Note: poolIndex is set by pool during construction
255 <        // Remaining initialization is deferred to onStart
254 >        // To avoid exposing construction details to subclasses,
255 >        // remaining initialization is in start() and onStart()
256 >    }
257 >
258 >    /**
259 >     * Performs additional initialization and starts this thread
260 >     */
261 >    final void start(int poolIndex, boolean locallyFifo,
262 >                     UncaughtExceptionHandler ueh) {
263 >        this.poolIndex = poolIndex;
264 >        this.locallyFifo = locallyFifo;
265 >        if (ueh != null)
266 >            setUncaughtExceptionHandler(ueh);
267 >        setDaemon(true);
268 >        start();
269      }
270  
271 <    // Public access methods
271 >    // Public/protected methods
272  
273      /**
274       * Returns the pool hosting this thread.
# Line 241 | Line 293 | public class ForkJoinWorkerThread extend
293      }
294  
295      /**
296 <     * Establishes local first-in-first-out scheduling mode for forked
297 <     * tasks that are never joined.
298 <     *
299 <     * @param async if true, use locally FIFO scheduling
296 >     * Initializes internal state after construction but before
297 >     * processing any tasks. If you override this method, you must
298 >     * invoke super.onStart() at the beginning of the method.
299 >     * Initialization requires care: Most fields must have legal
300 >     * default values, to ensure that attempted accesses from other
301 >     * threads work correctly even before this thread starts
302 >     * processing tasks.
303       */
304 <    void setAsyncMode(boolean async) {
305 <        locallyFifo = async;
306 <    }
252 <
253 <    // Runstate management
254 <
255 <    // Runstate values. Order matters
256 <    private static final int RUNNING     = 0;
257 <    private static final int SHUTDOWN    = 1;
258 <    private static final int TERMINATING = 2;
259 <    private static final int TERMINATED  = 3;
304 >    protected void onStart() {
305 >        int rs = seedGenerator.nextInt();
306 >        seed = rs == 0? 1 : rs; // seed must be nonzero
307  
308 <    final boolean isShutdown()    { return runState >= SHUTDOWN;  }
309 <    final boolean isTerminating() { return runState >= TERMINATING;  }
310 <    final boolean isTerminated()  { return runState == TERMINATED; }
311 <    final boolean shutdown()      { return transitionRunStateTo(SHUTDOWN); }
265 <    final boolean shutdownNow()   { return transitionRunStateTo(TERMINATING); }
308 >        // Allocate name string and queue array in this thread
309 >        String pid = Integer.toString(pool.getPoolNumber());
310 >        String wid = Integer.toString(poolIndex);
311 >        setName("ForkJoinPool-" + pid + "-worker-" + wid);
312  
313 <    /**
268 <     * Transitions to at least the given state.
269 <     *
270 <     * @return {@code true} if not already at least at given state
271 <     */
272 <    private boolean transitionRunStateTo(int state) {
273 <        for (;;) {
274 <            int s = runState;
275 <            if (s >= state)
276 <                return false;
277 <            if (UNSAFE.compareAndSwapInt(this, runStateOffset, s, state))
278 <                return true;
279 <        }
280 <    }
281 <
282 <    /**
283 <     * Tries to set status to active; fails on contention.
284 <     */
285 <    private boolean tryActivate() {
286 <        if (!active) {
287 <            if (!pool.tryIncrementActiveCount())
288 <                return false;
289 <            active = true;
290 <        }
291 <        return true;
313 >        queue = new ForkJoinTask<?>[INITIAL_QUEUE_CAPACITY];
314      }
315  
316      /**
317 <     * Tries to set status to inactive; fails on contention.
317 >     * Performs cleanup associated with termination of this worker
318 >     * thread.  If you override this method, you must invoke
319 >     * {@code super.onTermination} at the end of the overridden method.
320 >     *
321 >     * @param exception the exception causing this thread to abort due
322 >     * to an unrecoverable error, or {@code null} if completed normally
323       */
324 <    private boolean tryInactivate() {
325 <        if (active) {
326 <            if (!pool.tryDecrementActiveCount())
327 <                return false;
328 <            active = false;
324 >    protected void onTermination(Throwable exception) {
325 >        try {
326 >            cancelTasks();
327 >            setTerminated();
328 >            pool.workerTerminated(this);
329 >        } catch (Throwable ex) {        // Shouldn't ever happen
330 >            if (exception == null)      // but if so, at least rethrown
331 >                exception = ex;
332 >        } finally {
333 >            if (exception != null)
334 >                UNSAFE.throwException(exception);
335          }
303        return true;
336      }
337  
338      /**
307     * Computes next value for random victim probe.  Scans don't
308     * require a very high quality generator, but also not a crummy
309     * one.  Marsaglia xor-shift is cheap and works well.
310     */
311    private static int xorShift(int r) {
312        r ^= r << 1;
313        r ^= r >>> 3;
314        r ^= r << 10;
315        return r;
316    }
317
318    // Lifecycle methods
319
320    /**
339       * This method is required to be public, but should never be
340       * called explicitly. It performs the main run loop to execute
341       * ForkJoinTasks.
# Line 326 | Line 344 | public class ForkJoinWorkerThread extend
344          Throwable exception = null;
345          try {
346              onStart();
329            pool.sync(this); // await first pool event
347              mainLoop();
348          } catch (Throwable ex) {
349              exception = ex;
# Line 335 | Line 352 | public class ForkJoinWorkerThread extend
352          }
353      }
354  
355 +    // helpers for run()
356 +
357      /**
358 <     * Executes tasks until shut down.
358 >     * Find and execute tasks and check status while running
359       */
360      private void mainLoop() {
361 <        while (!isShutdown()) {
362 <            ForkJoinTask<?> t = pollTask();
363 <            if (t != null || (t = pollSubmission()) != null)
364 <                t.quietlyExec();
365 <            else if (tryInactivate())
366 <                pool.sync(this);
361 >        boolean ran = false;      // true if ran task in last loop iter
362 >        boolean prevRan = false;  // true if ran on last or previous step
363 >        ForkJoinPool p = pool;
364 >        for (;;) {
365 >            p.preStep(this, prevRan);
366 >            if (runState != 0)
367 >                return;
368 >            ForkJoinTask<?> t; // try to get and run stolen or submitted task
369 >            if ((t = scan()) != null || (t = pollSubmission()) != null) {
370 >                t.tryExec();
371 >                if (base != sp)
372 >                    runLocalTasks();
373 >                prevRan = ran = true;
374 >            }
375 >            else {
376 >                prevRan = ran;
377 >                ran = false;
378 >            }
379          }
380      }
381  
382      /**
383 <     * Initializes internal state after construction but before
384 <     * processing any tasks. If you override this method, you must
354 <     * invoke super.onStart() at the beginning of the method.
355 <     * Initialization requires care: Most fields must have legal
356 <     * default values, to ensure that attempted accesses from other
357 <     * threads work correctly even before this thread starts
358 <     * processing tasks.
383 >     * Runs local tasks until queue is empty or shut down.  Call only
384 >     * while active.
385       */
386 <    protected void onStart() {
387 <        // Allocate while starting to improve chances of thread-local
388 <        // isolation
389 <        queue = new ForkJoinTask<?>[INITIAL_QUEUE_CAPACITY];
390 <        // Initial value of seed need not be especially random but
391 <        // should differ across workers and must be nonzero
392 <        int p = poolIndex + 1;
393 <        seed = p + (p << 8) + (p << 16) + (p << 24); // spread bits
386 >    private void runLocalTasks() {
387 >        while (runState == 0) {
388 >            ForkJoinTask<?> t = locallyFifo? locallyDeqTask() : popTask();
389 >            if (t != null)
390 >                t.tryExec();
391 >            else if (base == sp)
392 >                break;
393 >        }
394      }
395  
396      /**
397 <     * Performs cleanup associated with termination of this worker
372 <     * thread.  If you override this method, you must invoke
373 <     * {@code super.onTermination} at the end of the overridden method.
397 >     * If a submission exists, try to activate and take it
398       *
399 <     * @param exception the exception causing this thread to abort due
376 <     * to an unrecoverable error, or {@code null} if completed normally
399 >     * @return a task, if available
400       */
401 <    protected void onTermination(Throwable exception) {
402 <        // Execute remaining local tasks unless aborting or terminating
403 <        while (exception == null &&  !pool.isTerminating() && base != sp) {
404 <            try {
405 <                ForkJoinTask<?> t = popTask();
406 <                if (t != null)
384 <                    t.quietlyExec();
385 <            } catch (Throwable ex) {
386 <                exception = ex;
401 >    private ForkJoinTask<?> pollSubmission() {
402 >        ForkJoinPool p = pool;
403 >        while (p.hasQueuedSubmissions()) {
404 >            if (active || (active = p.tryIncrementActiveCount())) {
405 >                ForkJoinTask<?> t = p.pollSubmission();
406 >                return t != null ? t : scan(); // if missed, rescan
407              }
408          }
409 <        // Cancel other tasks, transition status, notify pool, and
390 <        // propagate exception to uncaught exception handler
391 <        try {
392 <            do {} while (!tryInactivate()); // ensure inactive
393 <            cancelTasks();
394 <            runState = TERMINATED;
395 <            pool.workerTerminated(this);
396 <        } catch (Throwable ex) {        // Shouldn't ever happen
397 <            if (exception == null)      // but if so, at least rethrown
398 <                exception = ex;
399 <        } finally {
400 <            if (exception != null)
401 <                ForkJoinTask.rethrowException(exception);
402 <        }
409 >        return null;
410      }
411  
412 <    // Intrinsics-based support for queue operations.
413 <
414 <    /**
415 <     * Adds in store-order the given task at given slot of q to null.
416 <     * Caller must ensure q is non-null and index is in range.
412 >    /*
413 >     * Intrinsics-based atomic writes for queue slots. These are
414 >     * basically the same as methods in AtomicObjectArray, but
415 >     * specialized for (1) ForkJoinTask elements (2) requirement that
416 >     * nullness and bounds checks have already been performed by
417 >     * callers and (3) effective offsets are known not to overflow
418 >     * from int to long (because of MAXIMUM_QUEUE_CAPACITY). We don't
419 >     * need corresponding version for reads: plain array reads are OK
420 >     * because they protected by other volatile reads and are
421 >     * confirmed by CASes.
422 >     *
423 >     * Most uses don't actually call these methods, but instead contain
424 >     * inlined forms that enable more predictable optimization.  We
425 >     * don't define the version of write used in pushTask at all, but
426 >     * instead inline there a store-fenced array slot write.
427       */
411    private static void setSlot(ForkJoinTask<?>[] q, int i,
412                                ForkJoinTask<?> t) {
413        UNSAFE.putOrderedObject(q, (i << qShift) + qBase, t);
414    }
428  
429      /**
430 <     * CAS given slot of q to null. Caller must ensure q is non-null
431 <     * and index is in range.
430 >     * CASes slot i of array q from t to null. Caller must ensure q is
431 >     * non-null and index is in range.
432       */
433 <    private static boolean casSlotNull(ForkJoinTask<?>[] q, int i,
434 <                                       ForkJoinTask<?> t) {
433 >    private static final boolean casSlotNull(ForkJoinTask<?>[] q, int i,
434 >                                             ForkJoinTask<?> t) {
435          return UNSAFE.compareAndSwapObject(q, (i << qShift) + qBase, t, null);
436      }
437  
438      /**
439 <     * Sets sp in store-order.
439 >     * Performs a volatile write of the given task at given slot of
440 >     * array q.  Caller must ensure q is non-null and index is in
441 >     * range. This method is used only during resets and backouts.
442       */
443 <    private void storeSp(int s) {
444 <        UNSAFE.putOrderedInt(this, spOffset, s);
443 >    private static final void writeSlot(ForkJoinTask<?>[] q, int i,
444 >                                              ForkJoinTask<?> t) {
445 >        UNSAFE.putObjectVolatile(q, (i << qShift) + qBase, t);
446      }
447  
448 <    // Main queue methods
448 >    // queue methods
449  
450      /**
451 <     * Pushes a task. Called only by current thread.
451 >     * Pushes a task. Call only from this thread.
452       *
453       * @param t the task. Caller must ensure non-null.
454       */
455      final void pushTask(ForkJoinTask<?> t) {
456          ForkJoinTask<?>[] q = queue;
457 <        int mask = q.length - 1;
458 <        int s = sp;
459 <        setSlot(q, s & mask, t);
460 <        storeSp(++s);
461 <        if ((s -= base) == 1)
462 <            pool.signalWork();
463 <        else if (s >= mask)
448 <            growQueue();
457 >        int mask = q.length - 1; // implicit assert q != null
458 >        int s = sp++;            // ok to increment sp before slot write
459 >        UNSAFE.putOrderedObject(q, ((s & mask) << qShift) + qBase, t);
460 >        if ((s -= base) == 0)
461 >            pool.signalWork();   // was empty
462 >        else if (s == mask)
463 >            growQueue();         // is full
464      }
465  
466      /**
467       * Tries to take a task from the base of the queue, failing if
468 <     * either empty or contended.
468 >     * empty or contended. Note: Specializations of this code appear
469 >     * in scan and scanWhileJoining.
470       *
471       * @return a task, or null if none or contended
472       */
473      final ForkJoinTask<?> deqTask() {
474          ForkJoinTask<?> t;
475          ForkJoinTask<?>[] q;
476 <        int i;
477 <        int b;
462 <        if (sp != (b = base) &&
476 >        int b, i;
477 >        if ((b = base) != sp &&
478              (q = queue) != null && // must read q after b
479              (t = q[i = (q.length - 1) & b]) != null &&
480 <            casSlotNull(q, i, t)) {
480 >            UNSAFE.compareAndSwapObject(q, (i << qShift) + qBase, t, null)) {
481              base = b + 1;
482              return t;
483          }
# Line 470 | Line 485 | public class ForkJoinWorkerThread extend
485      }
486  
487      /**
488 <     * Returns a popped task, or null if empty. Ensures active status
489 <     * if non-null. Called only by current thread.
488 >     * Tries to take a task from the base of own queue. Assumes active
489 >     * status.  Called only by current thread.
490 >     *
491 >     * @return a task, or null if none
492 >     */
493 >    final ForkJoinTask<?> locallyDeqTask() {
494 >        ForkJoinTask<?>[] q = queue;
495 >        if (q != null) {
496 >            ForkJoinTask<?> t;
497 >            int b, i;
498 >            while (sp != (b = base)) {
499 >                if ((t = q[i = (q.length - 1) & b]) != null &&
500 >                    UNSAFE.compareAndSwapObject(q, (i << qShift) + qBase,
501 >                                                t, null)) {
502 >                    base = b + 1;
503 >                    return t;
504 >                }
505 >            }
506 >        }
507 >        return null;
508 >    }
509 >
510 >    /**
511 >     * Returns a popped task, or null if empty. Assumes active status.
512 >     * Called only by current thread. (Note: a specialization of this
513 >     * code appears in popWhileJoining.)
514       */
515      final ForkJoinTask<?> popTask() {
516 <        int s = sp;
517 <        while (s != base) {
518 <            if (tryActivate()) {
519 <                ForkJoinTask<?>[] q = queue;
520 <                int mask = q.length - 1;
521 <                int i = (s - 1) & mask;
522 <                ForkJoinTask<?> t = q[i];
523 <                if (t == null || !casSlotNull(q, i, t))
485 <                    break;
486 <                storeSp(s - 1);
516 >        int s;
517 >        ForkJoinTask<?>[] q;
518 >        if (base != (s = sp) && (q = queue) != null) {
519 >            int i = (q.length - 1) & --s;
520 >            ForkJoinTask<?> t = q[i];
521 >            if (t != null && UNSAFE.compareAndSwapObject
522 >                (q, (i << qShift) + qBase, t, null)) {
523 >                sp = s;
524                  return t;
525              }
526          }
# Line 491 | Line 528 | public class ForkJoinWorkerThread extend
528      }
529  
530      /**
531 <     * Specialized version of popTask to pop only if
532 <     * topmost element is the given task. Called only
533 <     * by current thread while active.
531 >     * Specialized version of popTask to pop only if topmost element
532 >     * is the given task. Called only by current thread while
533 >     * active.
534       *
535       * @param t the task. Caller must ensure non-null.
536       */
537      final boolean unpushTask(ForkJoinTask<?> t) {
538 <        ForkJoinTask<?>[] q = queue;
539 <        int mask = q.length - 1;
540 <        int s = sp - 1;
541 <        if (casSlotNull(q, s & mask, t)) {
542 <            storeSp(s);
538 >        int s;
539 >        ForkJoinTask<?>[] q;
540 >        if (base != (s = sp) && (q = queue) != null &&
541 >            UNSAFE.compareAndSwapObject
542 >            (q, (((q.length - 1) & --s) << qShift) + qBase, t, null)) {
543 >            sp = s;
544              return true;
545          }
546          return false;
547      }
548  
549      /**
550 <     * Returns next task.
550 >     * Returns next task or null if empty or contended
551       */
552      final ForkJoinTask<?> peekTask() {
553          ForkJoinTask<?>[] q = queue;
# Line 542 | Line 580 | public class ForkJoinWorkerThread extend
580              ForkJoinTask<?> t = oldQ[oldIndex];
581              if (t != null && !casSlotNull(oldQ, oldIndex, t))
582                  t = null;
583 <            setSlot(newQ, b & newMask, t);
583 >            writeSlot(newQ, b & newMask, t);
584          } while (++b != bf);
585          pool.signalWork();
586      }
587  
588      /**
589 +     * Computes next value for random victim probe in scan().  Scans
590 +     * don't require a very high quality generator, but also not a
591 +     * crummy one.  Marsaglia xor-shift is cheap and works well enough.
592 +     * Note: This is manually inlined in scan()
593 +     */
594 +    private static final int xorShift(int r) {
595 +        r ^= r << 13;
596 +        r ^= r >>> 17;
597 +        return r ^ (r << 5);
598 +    }
599 +
600 +    /**
601       * Tries to steal a task from another worker. Starts at a random
602       * index of workers array, and probes workers until finding one
603       * with non-empty queue or finding that all are empty.  It
604       * randomly selects the first n probes. If these are empty, it
605 <     * resorts to a full circular traversal, which is necessary to
606 <     * accurately set active status by caller. Also restarts if pool
607 <     * events occurred since last scan, which forces refresh of
608 <     * workers array, in case barrier was associated with resize.
605 >     * resorts to a circular sweep, which is necessary to accurately
606 >     * set active status. (The circular sweep uses steps of
607 >     * approximately half the array size plus 1, to avoid bias
608 >     * stemming from leftmost packing of the array in ForkJoinPool.)
609       *
610       * This method must be both fast and quiet -- usually avoiding
611       * memory accesses that could disrupt cache sharing etc other than
612 <     * those needed to check for and take tasks. This accounts for,
613 <     * among other things, updating random seed in place without
614 <     * storing it until exit.
612 >     * those needed to check for and take tasks (or to activate if not
613 >     * already active). This accounts for, among other things,
614 >     * updating random seed in place without storing it until exit.
615       *
616       * @return a task, or null if none found
617       */
618      private ForkJoinTask<?> scan() {
619 <        ForkJoinTask<?> t = null;
620 <        int r = seed;                    // extract once to keep scan quiet
621 <        ForkJoinWorkerThread[] ws;       // refreshed on outer loop
622 <        int mask;                        // must be power 2 minus 1 and > 0
623 <        outer:do {
624 <            if ((ws = pool.workers) != null && (mask = ws.length - 1) > 0) {
625 <                int idx = r;
626 <                int probes = ~mask;      // use random index while negative
627 <                for (;;) {
628 <                    r = xorShift(r);     // update random seed
629 <                    ForkJoinWorkerThread v = ws[mask & idx];
630 <                    if (v == null || v.sp == v.base) {
631 <                        if (probes <= mask)
632 <                            idx = (probes++ < 0) ? r : (idx + 1);
633 <                        else
634 <                            break;
619 >        ForkJoinPool p = pool;
620 >        ForkJoinWorkerThread[] ws;        // worker array
621 >        int n;                            // upper bound of #workers
622 >        if ((ws = p.workers) != null && (n = ws.length) > 1) {
623 >            boolean canSteal = active;    // shadow active status
624 >            int r = seed;                 // extract seed once
625 >            int mask = n - 1;
626 >            int j = -n;                   // loop counter
627 >            int k = r;                    // worker index, random if j < 0
628 >            for (;;) {
629 >                ForkJoinWorkerThread v = ws[k & mask];
630 >                r ^= r << 13; r ^= r >>> 17; r ^= r << 5; // inline xorshift
631 >                if (v != null && v.base != v.sp) {
632 >                    int b, i;             // inline specialized deqTask
633 >                    ForkJoinTask<?>[] q;
634 >                    ForkJoinTask<?> t;
635 >                    if ((canSteal ||      // ensure active status
636 >                         (canSteal = active = p.tryIncrementActiveCount())) &&
637 >                        (q = v.queue) != null &&
638 >                        (t = q[i = (q.length - 1) & (b = v.base)]) != null &&
639 >                        UNSAFE.compareAndSwapObject
640 >                        (q, (i << qShift) + qBase, t, null)) {
641 >                        v.base = b + 1;
642 >                        seed = r;
643 >                        ++stealCount;
644 >                        return t;
645                      }
646 <                    else if (!tryActivate() || (t = v.deqTask()) == null)
647 <                        continue outer;  // restart on contention
588 <                    else
589 <                        break outer;
646 >                    j = -n;
647 >                    k = r;                // restart on contention
648                  }
649 +                else if (++j <= 0)
650 +                    k = r;
651 +                else if (j <= n)
652 +                    k += (n >>> 1) | 1;
653 +                else
654 +                    break;
655              }
656 <        } while (pool.hasNewSyncEvent(this)); // retry on pool events
657 <        seed = r;
594 <        return t;
656 >        }
657 >        return null;
658      }
659  
660 +    // Run State management
661 +
662 +    // status check methods used mainly by ForkJoinPool
663 +    final boolean isTerminating() { return (runState & TERMINATING) != 0; }
664 +    final boolean isTerminated()  { return (runState & TERMINATED) != 0; }
665 +    final boolean isSuspended()   { return (runState & SUSPENDED) != 0; }
666 +    final boolean isTrimmed()     { return (runState & TRIMMED) != 0; }
667 +
668      /**
669 <     * Gets and removes a local or stolen task.
599 <     *
600 <     * @return a task, if available
669 >     * Sets state to TERMINATING, also resuming if suspended.
670       */
671 <    final ForkJoinTask<?> pollTask() {
672 <        ForkJoinTask<?> t = locallyFifo ? deqTask() : popTask();
673 <        if (t == null && (t = scan()) != null)
674 <            ++stealCount;
675 <        return t;
671 >    final void shutdown() {
672 >        for (;;) {
673 >            int s = runState;
674 >            if ((s & SUSPENDED) != 0) { // kill and wakeup if suspended
675 >                if (UNSAFE.compareAndSwapInt(this, runStateOffset, s,
676 >                                             (s & ~SUSPENDED) |
677 >                                             (TRIMMED|TERMINATING))) {
678 >                    LockSupport.unpark(this);
679 >                    break;
680 >                }
681 >            }
682 >            else if (UNSAFE.compareAndSwapInt(this, runStateOffset, s,
683 >                                              s | TERMINATING))
684 >                break;
685 >        }
686 >    }
687 >
688 >    /**
689 >     * Sets state to TERMINATED. Called only by this thread.
690 >     */
691 >    private void setTerminated() {
692 >        int s;
693 >        do {} while (!UNSAFE.compareAndSwapInt(this, runStateOffset,
694 >                                               s = runState,
695 >                                               s | (TERMINATING|TERMINATED)));
696      }
697  
698      /**
699 <     * Gets a local task.
699 >     * Instrumented version of park. Also used by ForkJoinPool.awaitEvent
700 >     */
701 >    final void doPark() {
702 >        ++parkCount;
703 >        LockSupport.park(this);
704 >    }
705 >
706 >    /**
707 >     * If suspended, tries to set status to unsuspended.
708 >     * Caller must unpark to actually resume
709       *
710 <     * @return a task, if available
710 >     * @return true if successful
711       */
712 <    final ForkJoinTask<?> pollLocalTask() {
713 <        return locallyFifo ? deqTask() : popTask();
712 >    final boolean tryUnsuspend() {
713 >        int s;
714 >        return (((s = runState) & SUSPENDED) != 0 &&
715 >                UNSAFE.compareAndSwapInt(this, runStateOffset, s,
716 >                                         s & ~SUSPENDED));
717      }
718  
719      /**
720 <     * Returns a pool submission, if one exists, activating first.
720 >     * Sets suspended status and blocks as spare until resumed,
721 >     * shutdown, or timed out.
722       *
723 <     * @return a submission, if available
723 >     * @return false if trimmed
724       */
725 <    private ForkJoinTask<?> pollSubmission() {
725 >    final boolean suspendAsSpare() {
726 >        for (;;) {               // set suspended unless terminating
727 >            int s = runState;
728 >            if ((s & TERMINATING) != 0) { // must kill
729 >                if (UNSAFE.compareAndSwapInt(this, runStateOffset, s,
730 >                                             s | (TRIMMED | TERMINATING)))
731 >                    return false;
732 >            }
733 >            else if (UNSAFE.compareAndSwapInt(this, runStateOffset, s,
734 >                                              s | SUSPENDED))
735 >                break;
736 >        }
737 >        lastEventCount = 0;      // reset upon resume
738          ForkJoinPool p = pool;
739 <        while (p.hasQueuedSubmissions()) {
740 <            ForkJoinTask<?> t;
741 <            if (tryActivate() && (t = p.pollSubmission()) != null)
742 <                return t;
739 >        p.releaseWaiters();      // help others progress
740 >        p.accumulateStealCount(this);
741 >        interrupted();           // clear/ignore interrupts
742 >        if (poolIndex < p.getParallelism()) { // untimed wait
743 >            while ((runState & SUSPENDED) != 0)
744 >                doPark();
745 >            return true;
746          }
747 <        return null;
747 >        return timedSuspend();   // timed wait if apparently non-core
748 >    }
749 >
750 >    /**
751 >     * Blocks as spare until resumed or timed out
752 >     * @return false if trimmed
753 >     */
754 >    private boolean timedSuspend() {
755 >        long nanos = SPARE_KEEPALIVE_NANOS;
756 >        long startTime = System.nanoTime();
757 >        while ((runState & SUSPENDED) != 0) {
758 >            ++parkCount;
759 >            if ((nanos -= (System.nanoTime() - startTime)) > 0)
760 >                LockSupport.parkNanos(this, nanos);
761 >            else { // try to trim on timeout
762 >                int s = runState;
763 >                if (UNSAFE.compareAndSwapInt(this, runStateOffset, s,
764 >                                             (s & ~SUSPENDED) |
765 >                                             (TRIMMED|TERMINATING)))
766 >                    return false;
767 >            }
768 >        }
769 >        return true;
770      }
771  
772 <    // Methods accessed only by Pool
772 >    // Misc support methods for ForkJoinPool
773 >
774 >    /**
775 >     * Returns an estimate of the number of tasks in the queue.  Also
776 >     * used by ForkJoinTask.
777 >     */
778 >    final int getQueueSize() {
779 >        return -base + sp;
780 >    }
781 >
782 >    /**
783 >     * Set locallyFifo mode. Called only by ForkJoinPool
784 >     */
785 >    final void setAsyncMode(boolean async) {
786 >        locallyFifo = async;
787 >    }
788  
789      /**
790       * Removes and cancels all tasks in queue.  Can be called from any
791       * thread.
792       */
793      final void cancelTasks() {
794 <        ForkJoinTask<?> t;
795 <        while (base != sp && (t = deqTask()) != null)
796 <            t.cancelIgnoringExceptions();
794 >        while (base != sp) {
795 >            ForkJoinTask<?> t = deqTask();
796 >            if (t != null)
797 >                t.cancelIgnoringExceptions();
798 >        }
799      }
800  
801      /**
# Line 647 | Line 803 | public class ForkJoinWorkerThread extend
803       *
804       * @return the number of tasks drained
805       */
806 <    final int drainTasksTo(Collection<ForkJoinTask<?>> c) {
806 >    final int drainTasksTo(Collection<? super ForkJoinTask<?>> c) {
807          int n = 0;
808 <        ForkJoinTask<?> t;
809 <        while (base != sp && (t = deqTask()) != null) {
810 <            c.add(t);
811 <            ++n;
808 >        while (base != sp) {
809 >            ForkJoinTask<?> t = deqTask();
810 >            if (t != null) {
811 >                c.add(t);
812 >                ++n;
813 >            }
814          }
815          return n;
816      }
817  
818 +    // Support methods for ForkJoinTask
819 +
820      /**
821 <     * Gets and clears steal count for accumulation by pool.  Called
822 <     * only when known to be idle (in pool.sync and termination).
821 >     * Returns an estimate of the number of tasks, offset by a
822 >     * function of number of idle workers.
823 >     *
824 >     * This method provides a cheap heuristic guide for task
825 >     * partitioning when programmers, frameworks, tools, or languages
826 >     * have little or no idea about task granularity.  In essence by
827 >     * offering this method, we ask users only about tradeoffs in
828 >     * overhead vs expected throughput and its variance, rather than
829 >     * how finely to partition tasks.
830 >     *
831 >     * In a steady state strict (tree-structured) computation, each
832 >     * thread makes available for stealing enough tasks for other
833 >     * threads to remain active. Inductively, if all threads play by
834 >     * the same rules, each thread should make available only a
835 >     * constant number of tasks.
836 >     *
837 >     * The minimum useful constant is just 1. But using a value of 1
838 >     * would require immediate replenishment upon each steal to
839 >     * maintain enough tasks, which is infeasible.  Further,
840 >     * partitionings/granularities of offered tasks should minimize
841 >     * steal rates, which in general means that threads nearer the top
842 >     * of computation tree should generate more than those nearer the
843 >     * bottom. In perfect steady state, each thread is at
844 >     * approximately the same level of computation tree. However,
845 >     * producing extra tasks amortizes the uncertainty of progress and
846 >     * diffusion assumptions.
847 >     *
848 >     * So, users will want to use values larger, but not much larger
849 >     * than 1 to both smooth over transient shortages and hedge
850 >     * against uneven progress; as traded off against the cost of
851 >     * extra task overhead. We leave the user to pick a threshold
852 >     * value to compare with the results of this call to guide
853 >     * decisions, but recommend values such as 3.
854 >     *
855 >     * When all threads are active, it is on average OK to estimate
856 >     * surplus strictly locally. In steady-state, if one thread is
857 >     * maintaining say 2 surplus tasks, then so are others. So we can
858 >     * just use estimated queue length (although note that (sp - base)
859 >     * can be an overestimate because of stealers lagging increments
860 >     * of base).  However, this strategy alone leads to serious
861 >     * mis-estimates in some non-steady-state conditions (ramp-up,
862 >     * ramp-down, other stalls). We can detect many of these by
863 >     * further considering the number of "idle" threads, that are
864 >     * known to have zero queued tasks, so compensate by a factor of
865 >     * (#idle/#active) threads.
866       */
867 <    final int getAndClearStealCount() {
868 <        int sc = stealCount;
666 <        stealCount = 0;
667 <        return sc;
867 >    final int getEstimatedSurplusTaskCount() {
868 >        return sp - base - pool.idlePerActive();
869      }
870  
871      /**
872 <     * Returns {@code true} if at least one worker in the given array
672 <     * appears to have at least one queued task.
872 >     * Gets and removes a local task.
873       *
874 <     * @param ws array of workers
874 >     * @return a task, if available
875       */
876 <    static boolean hasQueuedTasks(ForkJoinWorkerThread[] ws) {
877 <        if (ws != null) {
878 <            int len = ws.length;
879 <            for (int j = 0; j < 2; ++j) { // need two passes for clean sweep
680 <                for (int i = 0; i < len; ++i) {
681 <                    ForkJoinWorkerThread w = ws[i];
682 <                    if (w != null && w.sp != w.base)
683 <                        return true;
684 <                }
685 <            }
876 >    final ForkJoinTask<?> pollLocalTask() {
877 >        while (base != sp) {
878 >            if (active || (active = pool.tryIncrementActiveCount()))
879 >                return locallyFifo? locallyDeqTask() : popTask();
880          }
881 <        return false;
881 >        return null;
882      }
883  
884 <    // Support methods for ForkJoinTask
884 >    /**
885 >     * Gets and removes a local or stolen task.
886 >     *
887 >     * @return a task, if available
888 >     */
889 >    final ForkJoinTask<?> pollTask() {
890 >        ForkJoinTask<?> t;
891 >        return (t = pollLocalTask()) != null ? t : scan();
892 >    }
893  
894      /**
895 <     * Returns an estimate of the number of tasks in the queue.
895 >     * Executes or processes other tasks awaiting the given task
896 >     * @return task completion status
897       */
898 <    final int getQueueSize() {
899 <        // suppress momentarily negative values
900 <        return Math.max(0, sp - base);
898 >    final int execWhileJoining(ForkJoinTask<?> joinMe) {
899 >        int s;
900 >        while ((s = joinMe.status) >= 0) {
901 >            ForkJoinTask<?> t = base != sp?
902 >                popWhileJoining(joinMe) :
903 >                scanWhileJoining(joinMe);
904 >            if (t != null)
905 >                t.tryExec();
906 >        }
907 >        return s;
908      }
909  
910      /**
911 <     * Returns an estimate of the number of tasks, offset by a
912 <     * function of number of idle workers.
911 >     * Returns or stolen task, if available, unless joinMe is done
912 >     *
913 >     * This method is intrinsically nonmodular. To maintain the
914 >     * property that tasks are never stolen if the awaited task is
915 >     * ready, we must interleave mechanics of scan with status
916 >     * checks. We rely here on the commit points of deq that allow us
917 >     * to cancel a steal even after CASing slot to null, but before
918 >     * adjusting base index: If, after the CAS, we see that joinMe is
919 >     * ready, we can back out by placing the task back into the slot,
920 >     * without adjusting index. The loop is otherwise a variant of the
921 >     * one in scan().
922 >     *
923       */
924 <    final int getEstimatedSurplusTaskCount() {
925 <        // The halving approximates weighting idle vs non-idle workers
926 <        return (sp - base) - (pool.getIdleThreadCount() >>> 1);
924 >    private ForkJoinTask<?> scanWhileJoining(ForkJoinTask<?> joinMe) {
925 >        int r = seed;
926 >        ForkJoinPool p = pool;
927 >        ForkJoinWorkerThread[] ws;
928 >        int n;
929 >        outer:while ((ws = p.workers) != null && (n = ws.length) > 1) {
930 >            int mask = n - 1;
931 >            int k = r;
932 >            boolean contended = false; // to retry loop if deq contends
933 >            for (int j = -n; j <= n; ++j) {
934 >                if (joinMe.status < 0)
935 >                    break outer;
936 >                int b;
937 >                ForkJoinTask<?>[] q;
938 >                ForkJoinWorkerThread v = ws[k & mask];
939 >                r ^= r << 13; r ^= r >>> 17; r ^= r << 5; // xorshift
940 >                if (v != null && (b=v.base) != v.sp && (q=v.queue) != null) {
941 >                    int i = (q.length - 1) & b;
942 >                    ForkJoinTask<?> t = q[i];
943 >                    if (t != null && UNSAFE.compareAndSwapObject
944 >                        (q, (i << qShift) + qBase, t, null)) {
945 >                        if (joinMe.status >= 0) {
946 >                            v.base = b + 1;
947 >                            seed = r;
948 >                            ++stealCount;
949 >                            return t;
950 >                        }
951 >                        UNSAFE.putObjectVolatile(q, (i<<qShift)+qBase, t);
952 >                        break outer; // back out
953 >                    }
954 >                    contended = true;
955 >                }
956 >                k = j < 0 ? r : (k + ((n >>> 1) | 1));
957 >            }
958 >            if (!contended && p.tryAwaitBusyJoin(joinMe))
959 >                break;
960 >        }
961 >        return null;
962      }
963  
964      /**
965 <     * Scans, returning early if joinMe done.
965 >     * Version of popTask with join checks surrounding extraction.
966 >     * Uses the same backout strategy as helpJoinTask. Note that
967 >     * we ignore locallyFifo flag for local tasks here since helping
968 >     * joins only make sense in LIFO mode.
969 >     *
970 >     * @return a popped task, if available, unless joinMe is done
971       */
972 <    final ForkJoinTask<?> scanWhileJoining(ForkJoinTask<?> joinMe) {
973 <        ForkJoinTask<?> t = pollTask();
974 <        if (t != null && joinMe.status < 0 && sp == base) {
975 <            pushTask(t); // unsteal if done and this task would be stealable
976 <            t = null;
972 >    private ForkJoinTask<?> popWhileJoining(ForkJoinTask<?> joinMe) {
973 >        int s;
974 >        ForkJoinTask<?>[] q;
975 >        while ((s = sp) != base && (q = queue) != null && joinMe.status >= 0) {
976 >            int i = (q.length - 1) & --s;
977 >            ForkJoinTask<?> t = q[i];
978 >            if (t != null && UNSAFE.compareAndSwapObject
979 >                (q, (i << qShift) + qBase, t, null)) {
980 >                if (joinMe.status >= 0) {
981 >                    sp = s;
982 >                    return t;
983 >                }
984 >                UNSAFE.putObjectVolatile(q, (i << qShift) + qBase, t);
985 >                break;  // back out
986 >            }
987          }
988 <        return t;
988 >        return null;
989      }
990  
991      /**
# Line 723 | Line 993 | public class ForkJoinWorkerThread extend
993       */
994      final void helpQuiescePool() {
995          for (;;) {
996 <            ForkJoinTask<?> t = pollTask();
997 <            if (t != null)
998 <                t.quietlyExec();
999 <            else if (tryInactivate() && pool.isQuiescent())
1000 <                break;
996 >            ForkJoinTask<?> t = pollLocalTask();
997 >            if (t != null || (t = scan()) != null)
998 >                t.tryExec();
999 >            else {
1000 >                ForkJoinPool p = pool;
1001 >                if (active) {
1002 >                    active = false; // inactivate
1003 >                    do {} while (!p.tryDecrementActiveCount());
1004 >                }
1005 >                if (p.isQuiescent()) {
1006 >                    active = true; // re-activate
1007 >                    do {} while (!p.tryIncrementActiveCount());
1008 >                    return;
1009 >                }
1010 >            }
1011          }
732        do {} while (!tryActivate()); // re-activate on exit
1012      }
1013  
1014      // Unsafe mechanics
1015  
1016      private static final sun.misc.Unsafe UNSAFE = getUnsafe();
738    private static final long spOffset =
739        objectFieldOffset("sp", ForkJoinWorkerThread.class);
1017      private static final long runStateOffset =
1018          objectFieldOffset("runState", ForkJoinWorkerThread.class);
1019 <    private static final long qBase;
1019 >    private static final long qBase =
1020 >        UNSAFE.arrayBaseOffset(ForkJoinTask[].class);
1021      private static final int qShift;
1022  
1023      static {
746        qBase = UNSAFE.arrayBaseOffset(ForkJoinTask[].class);
1024          int s = UNSAFE.arrayIndexScale(ForkJoinTask[].class);
1025          if ((s & (s-1)) != 0)
1026              throw new Error("data type scale not a power of two");

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines