ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/jsr166y/ForkJoinWorkerThread.java
(Generate patch)

Comparing jsr166/src/jsr166y/ForkJoinWorkerThread.java (file contents):
Revision 1.28 by dl, Mon Aug 3 13:40:07 2009 UTC vs.
Revision 1.33 by dl, Thu May 27 16:46:49 2010 UTC

# Line 8 | Line 8 | package jsr166y;
8  
9   import java.util.concurrent.*;
10  
11 + import java.util.Random;
12   import java.util.Collection;
13 + import java.util.concurrent.locks.LockSupport;
14  
15   /**
16   * A thread managed by a {@link ForkJoinPool}.  This class is
# Line 25 | Line 27 | import java.util.Collection;
27   */
28   public class ForkJoinWorkerThread extends Thread {
29      /*
30 <     * Algorithm overview:
30 >     * Overview:
31       *
32 <     * 1. Work-Stealing: Work-stealing queues are special forms of
33 <     * Deques that support only three of the four possible
34 <     * end-operations -- push, pop, and deq (aka steal), and only do
35 <     * so under the constraints that push and pop are called only from
36 <     * the owning thread, while deq may be called from other threads.
37 <     * (If you are unfamiliar with them, you probably want to read
38 <     * Herlihy and Shavit's book "The Art of Multiprocessor
39 <     * programming", chapter 16 describing these in more detail before
40 <     * proceeding.)  The main work-stealing queue design is roughly
41 <     * similar to "Dynamic Circular Work-Stealing Deque" by David
42 <     * Chase and Yossi Lev, SPAA 2005
43 <     * (http://research.sun.com/scalable/pubs/index.html).  The main
44 <     * difference ultimately stems from gc requirements that we null
45 <     * out taken slots as soon as we can, to maintain as small a
46 <     * footprint as possible even in programs generating huge numbers
47 <     * of tasks. To accomplish this, we shift the CAS arbitrating pop
48 <     * vs deq (steal) from being on the indices ("base" and "sp") to
49 <     * the slots themselves (mainly via method "casSlotNull()"). So,
50 <     * both a successful pop and deq mainly entail CAS'ing a non-null
51 <     * slot to null.  Because we rely on CASes of references, we do
52 <     * not need tag bits on base or sp.  They are simple ints as used
53 <     * in any circular array-based queue (see for example ArrayDeque).
54 <     * Updates to the indices must still be ordered in a way that
55 <     * guarantees that (sp - base) > 0 means the queue is empty, but
56 <     * otherwise may err on the side of possibly making the queue
57 <     * appear nonempty when a push, pop, or deq have not fully
58 <     * committed. Note that this means that the deq operation,
59 <     * considered individually, is not wait-free. One thief cannot
60 <     * successfully continue until another in-progress one (or, if
61 <     * previously empty, a push) completes.  However, in the
62 <     * aggregate, we ensure at least probabilistic
63 <     * non-blockingness. If an attempted steal fails, a thief always
64 <     * chooses a different random victim target to try next. So, in
65 <     * order for one thief to progress, it suffices for any
66 <     * in-progress deq or new push on any empty queue to complete. One
67 <     * reason this works well here is that apparently-nonempty often
68 <     * means soon-to-be-stealable, which gives threads a chance to
69 <     * activate if necessary before stealing (see below).
32 >     * ForkJoinWorkerThreads are managed by ForkJoinPools and perform
33 >     * ForkJoinTasks. This class includes bookkeeping in support of
34 >     * worker activation, suspension, and lifecycle control described
35 >     * in more detail in the internal documentation of class
36 >     * ForkJoinPool. And as described further below, this class also
37 >     * includes special-cased support for some ForkJoinTask
38 >     * methods. But the main mechanics involve work-stealing:
39 >     *
40 >     * Work-stealing queues are special forms of Deques that support
41 >     * only three of the four possible end-operations -- push, pop,
42 >     * and deq (aka steal), under the further constraints that push
43 >     * and pop are called only from the owning thread, while deq may
44 >     * be called from other threads.  (If you are unfamiliar with
45 >     * them, you probably want to read Herlihy and Shavit's book "The
46 >     * Art of Multiprocessor programming", chapter 16 describing these
47 >     * in more detail before proceeding.)  The main work-stealing
48 >     * queue design is roughly similar to those in the papers "Dynamic
49 >     * Circular Work-Stealing Deque" by Chase and Lev, SPAA 2005
50 >     * (http://research.sun.com/scalable/pubs/index.html) and
51 >     * "Idempotent work stealing" by Michael, Saraswat, and Vechev,
52 >     * PPoPP 2009 (http://portal.acm.org/citation.cfm?id=1504186).
53 >     * The main differences ultimately stem from gc requirements that
54 >     * we null out taken slots as soon as we can, to maintain as small
55 >     * a footprint as possible even in programs generating huge
56 >     * numbers of tasks. To accomplish this, we shift the CAS
57 >     * arbitrating pop vs deq (steal) from being on the indices
58 >     * ("base" and "sp") to the slots themselves (mainly via method
59 >     * "casSlotNull()"). So, both a successful pop and deq mainly
60 >     * entail a CAS of a slot from non-null to null.  Because we rely
61 >     * on CASes of references, we do not need tag bits on base or sp.
62 >     * They are simple ints as used in any circular array-based queue
63 >     * (see for example ArrayDeque).  Updates to the indices must
64 >     * still be ordered in a way that guarantees that sp == base means
65 >     * the queue is empty, but otherwise may err on the side of
66 >     * possibly making the queue appear nonempty when a push, pop, or
67 >     * deq have not fully committed. Note that this means that the deq
68 >     * operation, considered individually, is not wait-free. One thief
69 >     * cannot successfully continue until another in-progress one (or,
70 >     * if previously empty, a push) completes.  However, in the
71 >     * aggregate, we ensure at least probabilistic non-blockingness.
72 >     * If an attempted steal fails, a thief always chooses a different
73 >     * random victim target to try next. So, in order for one thief to
74 >     * progress, it suffices for any in-progress deq or new push on
75 >     * any empty queue to complete. One reason this works well here is
76 >     * that apparently-nonempty often means soon-to-be-stealable,
77 >     * which gives threads a chance to set activation status if
78 >     * necessary before stealing.
79       *
80       * This approach also enables support for "async mode" where local
81       * task processing is in FIFO, not LIFO order; simply by using a
# Line 75 | Line 86 | public class ForkJoinWorkerThread extend
86       * Efficient implementation of this approach currently relies on
87       * an uncomfortable amount of "Unsafe" mechanics. To maintain
88       * correct orderings, reads and writes of variable base require
89 <     * volatile ordering.  Variable sp does not require volatile write
90 <     * but needs cheaper store-ordering on writes.  Because they are
91 <     * protected by volatile base reads, reads of the queue array and
92 <     * its slots do not need volatile load semantics, but writes (in
93 <     * push) require store order and CASes (in pop and deq) require
94 <     * (volatile) CAS semantics.  (See "Idempotent work stealing" by
95 <     * Michael, Saraswat, and Vechev, PPoPP 2009
96 <     * http://portal.acm.org/citation.cfm?id=1504186 for an algorithm
97 <     * with similar properties, but without support for nulling
98 <     * slots.)  Since these combinations aren't supported using
99 <     * ordinary volatiles, the only way to accomplish these
89 >     * volatile ordering.  Variable sp does not require volatile
90 >     * writes but still needs store-ordering, which we accomplish by
91 >     * pre-incrementing sp before filling the slot with an ordered
92 >     * store.  (Pre-incrementing also enables backouts used in
93 >     * scanWhileJoining.)  Because they are protected by volatile base
94 >     * reads, reads of the queue array and its slots by other threads
95 >     * do not need volatile load semantics, but writes (in push)
96 >     * require store order and CASes (in pop and deq) require
97 >     * (volatile) CAS semantics.  (Michael, Saraswat, and Vechev's
98 >     * algorithm has similar properties, but without support for
99 >     * nulling slots.)  Since these combinations aren't supported
100 >     * using ordinary volatiles, the only way to accomplish these
101       * efficiently is to use direct Unsafe calls. (Using external
102       * AtomicIntegers and AtomicReferenceArrays for the indices and
103       * array is significantly slower because of memory locality and
104       * indirection effects.)
105 <     *
105 >     *
106       * Further, performance on most platforms is very sensitive to
107       * placement and sizing of the (resizable) queue array.  Even
108       * though these queues don't usually become all that big, the
109       * initial size must be large enough to counteract cache
110       * contention effects across multiple queues (especially in the
111       * presence of GC cardmarking). Also, to improve thread-locality,
112 <     * queues are currently initialized immediately after the thread
113 <     * gets the initial signal to start processing tasks.  However,
114 <     * all queue-related methods except pushTask are written in a way
115 <     * that allows them to instead be lazily allocated and/or disposed
116 <     * of when empty. All together, these low-level implementation
117 <     * choices produce as much as a factor of 4 performance
118 <     * improvement compared to naive implementations, and enable the
119 <     * processing of billions of tasks per second, sometimes at the
120 <     * expense of ugliness.
121 <     *
122 <     * 2. Run control: The primary run control is based on a global
123 <     * counter (activeCount) held by the pool. It uses an algorithm
124 <     * similar to that in Herlihy and Shavit section 17.6 to cause
113 <     * threads to eventually block when all threads declare they are
114 <     * inactive. For this to work, threads must be declared active
115 <     * when executing tasks, and before stealing a task. They must be
116 <     * inactive before blocking on the Pool Barrier (awaiting a new
117 <     * submission or other Pool event). In between, there is some free
118 <     * play which we take advantage of to avoid contention and rapid
119 <     * flickering of the global activeCount: If inactive, we activate
120 <     * only if a victim queue appears to be nonempty (see above).
121 <     * Similarly, a thread tries to inactivate only after a full scan
122 <     * of other threads.  The net effect is that contention on
123 <     * activeCount is rarely a measurable performance issue. (There
124 <     * are also a few other cases where we scan for work rather than
125 <     * retry/block upon contention.)
126 <     *
127 <     * 3. Selection control. We maintain policy of always choosing to
128 <     * run local tasks rather than stealing, and always trying to
129 <     * steal tasks before trying to run a new submission. All steals
130 <     * are currently performed in randomly-chosen deq-order. It may be
131 <     * worthwhile to bias these with locality / anti-locality
132 <     * information, but doing this well probably requires more
133 <     * lower-level information from JVMs than currently provided.
112 >     * queues are initialized after starting.  All together, these
113 >     * low-level implementation choices produce as much as a factor of
114 >     * 4 performance improvement compared to naive implementations,
115 >     * and enable the processing of billions of tasks per second,
116 >     * sometimes at the expense of ugliness.
117 >     */
118 >
119 >    /**
120 >     * Generator for initial random seeds for random victim
121 >     * selection. This is used only to create initial seeds. Random
122 >     * steals use a cheaper xorshift generator per steal attempt. We
123 >     * expect only rare contention on seedGenerator, so just use a
124 >     * plain Random.
125       */
126 +    private static final Random seedGenerator = new Random();
127 +
128 +    /**
129 +     * The timeout value for suspending spares. Spare workers that
130 +     * remain unsignalled for more than this time may be trimmed
131 +     * (killed and removed from pool).  Since our goal is to avoid
132 +     * long-term thread buildup, the exact value of timeout does not
133 +     * matter too much so long as it avoids most false-alarm timeouts
134 +     * under GC stalls or momentarily high system load.
135 +     */
136 +    private static final long SPARE_KEEPALIVE_NANOS =
137 +        5L * 1000L * 1000L * 1000L; // 5 secs
138  
139      /**
140       * Capacity of work-stealing queue array upon initialization.
# Line 155 | Line 158 | public class ForkJoinWorkerThread extend
158  
159      /**
160       * The work-stealing queue array. Size must be a power of two.
161 <     * Initialized when thread starts, to improve memory locality.
161 >     * Initialized in onStart, to improve memory locality.
162       */
163      private ForkJoinTask<?>[] queue;
164  
165      /**
163     * Index (mod queue.length) of next queue slot to push to or pop
164     * from. It is written only by owner thread, via ordered store.
165     * Both sp and base are allowed to wrap around on overflow, but
166     * (sp - base) still estimates size.
167     */
168    private volatile int sp;
169
170    /**
166       * Index (mod queue.length) of least valid queue slot, which is
167       * always the next position to steal from if nonempty.
168       */
169      private volatile int base;
170  
171      /**
172 <     * Activity status. When true, this worker is considered active.
173 <     * Must be false upon construction. It must be true when executing
174 <     * tasks, and BEFORE stealing a task. It must be false before
175 <     * calling pool.sync.
176 <     */
177 <    private boolean active;
172 >     * Index (mod queue.length) of next queue slot to push to or pop
173 >     * from. It is written only by owner thread, and accessed by other
174 >     * threads only after reading (volatile) base.  Both sp and base
175 >     * are allowed to wrap around on overflow, but (sp - base) still
176 >     * estimates size.
177 >     */
178 >    private int sp;
179  
180      /**
181 <     * Run state of this worker. Supports simple versions of the usual
182 <     * shutdown/shutdownNow control.
181 >     * Run state of this worker. In addition to the usual run levels,
182 >     * tracks if this worker is suspended as a spare, and if it was
183 >     * killed (trimmed) while suspended. However, "active" status is
184 >     * maintained separately.
185       */
186      private volatile int runState;
187  
188 +    private static final int TERMINATING = 0x01;
189 +    private static final int TERMINATED  = 0x02;
190 +    private static final int SUSPENDED   = 0x04; // inactive spare
191 +    private static final int TRIMMED     = 0x08; // killed while suspended
192 +
193 +    /**
194 +     * Number of LockSupport.park calls to block this thread for
195 +     * suspension or event waits. Used for internal instrumention;
196 +     * currently not exported but included because volatile write upon
197 +     * park also provides a workaround for a JVM bug.
198 +     */
199 +    private volatile int parkCount;
200 +
201 +    /**
202 +     * Number of steals, transferred and reset in pool callbacks pool
203 +     * when idle Accessed directly by pool.
204 +     */
205 +    int stealCount;
206 +
207      /**
208       * Seed for random number generator for choosing steal victims.
209 <     * Uses Marsaglia xorshift. Must be nonzero upon initialization.
209 >     * Uses Marsaglia xorshift. Must be initialized as nonzero.
210       */
211      private int seed;
212  
213      /**
214 <     * Number of steals, transferred to pool when idle
214 >     * Activity status. When true, this worker is considered active.
215 >     * Accessed directly by pool.  Must be false upon construction.
216       */
217 <    private int stealCount;
217 >    boolean active;
218 >
219 >    /**
220 >     * True if use local fifo, not default lifo, for local polling.
221 >     * Shadows value from ForkJoinPool, which resets it if changed
222 >     * pool-wide.
223 >     */
224 >    private boolean locallyFifo;
225  
226      /**
227       * Index of this worker in pool array. Set once by pool before
228 <     * running, and accessed directly by pool during cleanup etc.
228 >     * running, and accessed directly by pool to locate this worker in
229 >     * its workers array.
230       */
231      int poolIndex;
232  
233      /**
234 <     * The last barrier event waited for. Accessed in pool callback
235 <     * methods, but only by current thread.
234 >     * The last pool event waited for. Accessed only by pool in
235 >     * callback methods invoked within this thread.
236       */
237 <    long lastEventCount;
237 >    int lastEventCount;
238  
239      /**
240 <     * True if use local fifo, not default lifo, for local polling
240 >     * Encoded index and event count of next event waiter. Used only
241 >     * by ForkJoinPool for managing event waiters.
242       */
243 <    private boolean locallyFifo;
243 >    volatile long nextWaiter;
244  
245      /**
246       * Creates a ForkJoinWorkerThread operating in the given pool.
# Line 224 | Line 251 | public class ForkJoinWorkerThread extend
251      protected ForkJoinWorkerThread(ForkJoinPool pool) {
252          if (pool == null) throw new NullPointerException();
253          this.pool = pool;
254 <        // Note: poolIndex is set by pool during construction
255 <        // Remaining initialization is deferred to onStart
254 >        // To avoid exposing construction details to subclasses,
255 >        // remaining initialization is in start() and onStart()
256 >    }
257 >
258 >    /**
259 >     * Performs additional initialization and starts this thread
260 >     */
261 >    final void start(int poolIndex, boolean locallyFifo,
262 >                     UncaughtExceptionHandler ueh) {
263 >        this.poolIndex = poolIndex;
264 >        this.locallyFifo = locallyFifo;
265 >        if (ueh != null)
266 >            setUncaughtExceptionHandler(ueh);
267 >        setDaemon(true);
268 >        start();
269      }
270  
271 <    // Public access methods
271 >    // Public/protected methods
272  
273      /**
274       * Returns the pool hosting this thread.
# Line 253 | Line 293 | public class ForkJoinWorkerThread extend
293      }
294  
295      /**
296 <     * Establishes local first-in-first-out scheduling mode for forked
297 <     * tasks that are never joined.
298 <     *
299 <     * @param async if true, use locally FIFO scheduling
296 >     * Initializes internal state after construction but before
297 >     * processing any tasks. If you override this method, you must
298 >     * invoke super.onStart() at the beginning of the method.
299 >     * Initialization requires care: Most fields must have legal
300 >     * default values, to ensure that attempted accesses from other
301 >     * threads work correctly even before this thread starts
302 >     * processing tasks.
303       */
304 <    void setAsyncMode(boolean async) {
305 <        locallyFifo = async;
306 <    }
264 <
265 <    // Runstate management
266 <
267 <    // Runstate values. Order matters
268 <    private static final int RUNNING     = 0;
269 <    private static final int SHUTDOWN    = 1;
270 <    private static final int TERMINATING = 2;
271 <    private static final int TERMINATED  = 3;
272 <
273 <    final boolean isShutdown()    { return runState >= SHUTDOWN;  }
274 <    final boolean isTerminating() { return runState >= TERMINATING;  }
275 <    final boolean isTerminated()  { return runState == TERMINATED; }
276 <    final boolean shutdown()      { return transitionRunStateTo(SHUTDOWN); }
277 <    final boolean shutdownNow()   { return transitionRunStateTo(TERMINATING); }
304 >    protected void onStart() {
305 >        int rs = seedGenerator.nextInt();
306 >        seed = rs == 0? 1 : rs; // seed must be nonzero
307  
308 <    /**
309 <     * Transitions to at least the given state.
310 <     *
311 <     * @return {@code true} if not already at least at given state
283 <     */
284 <    private boolean transitionRunStateTo(int state) {
285 <        for (;;) {
286 <            int s = runState;
287 <            if (s >= state)
288 <                return false;
289 <            if (UNSAFE.compareAndSwapInt(this, runStateOffset, s, state))
290 <                return true;
291 <        }
292 <    }
308 >        // Allocate name string and queue array in this thread
309 >        String pid = Integer.toString(pool.getPoolNumber());
310 >        String wid = Integer.toString(poolIndex);
311 >        setName("ForkJoinPool-" + pid + "-worker-" + wid);
312  
313 <    /**
295 <     * Tries to set status to active; fails on contention.
296 <     */
297 <    private boolean tryActivate() {
298 <        if (!active) {
299 <            if (!pool.tryIncrementActiveCount())
300 <                return false;
301 <            active = true;
302 <        }
303 <        return true;
313 >        queue = new ForkJoinTask<?>[INITIAL_QUEUE_CAPACITY];
314      }
315  
316      /**
317 <     * Tries to set status to inactive; fails on contention.
317 >     * Performs cleanup associated with termination of this worker
318 >     * thread.  If you override this method, you must invoke
319 >     * {@code super.onTermination} at the end of the overridden method.
320 >     *
321 >     * @param exception the exception causing this thread to abort due
322 >     * to an unrecoverable error, or {@code null} if completed normally
323       */
324 <    private boolean tryInactivate() {
325 <        if (active) {
326 <            if (!pool.tryDecrementActiveCount())
327 <                return false;
328 <            active = false;
324 >    protected void onTermination(Throwable exception) {
325 >        try {
326 >            cancelTasks();
327 >            setTerminated();
328 >            pool.workerTerminated(this);
329 >        } catch (Throwable ex) {        // Shouldn't ever happen
330 >            if (exception == null)      // but if so, at least rethrown
331 >                exception = ex;
332 >        } finally {
333 >            if (exception != null)
334 >                UNSAFE.throwException(exception);
335          }
315        return true;
316    }
317
318    /**
319     * Computes next value for random victim probe.  Scans don't
320     * require a very high quality generator, but also not a crummy
321     * one.  Marsaglia xor-shift is cheap and works well.
322     */
323    private static int xorShift(int r) {
324        r ^= (r << 13);
325        r ^= (r >>> 17);
326        return r ^ (r << 5);
336      }
337  
329    // Lifecycle methods
330
338      /**
339       * This method is required to be public, but should never be
340       * called explicitly. It performs the main run loop to execute
# Line 337 | Line 344 | public class ForkJoinWorkerThread extend
344          Throwable exception = null;
345          try {
346              onStart();
340            pool.sync(this); // await first pool event
347              mainLoop();
348          } catch (Throwable ex) {
349              exception = ex;
# Line 346 | Line 352 | public class ForkJoinWorkerThread extend
352          }
353      }
354  
355 +    // helpers for run()
356 +
357      /**
358 <     * Executes tasks until shut down.
358 >     * Find and execute tasks and check status while running
359       */
360      private void mainLoop() {
361 <        while (!isShutdown()) {
362 <            ForkJoinTask<?> t = pollTask();
363 <            if (t != null || (t = pollSubmission()) != null)
364 <                t.quietlyExec();
365 <            else if (tryInactivate())
366 <                pool.sync(this);
361 >        boolean ran = false; // true if ran task on previous step
362 >        ForkJoinPool p = pool;
363 >        for (;;) {
364 >            p.preStep(this, ran);
365 >            if (runState != 0)
366 >                return;
367 >            ForkJoinTask<?> t; // try to get and run stolen or submitted task
368 >            if (ran = (t = scan()) != null || (t = pollSubmission()) != null) {
369 >                t.tryExec();
370 >                if (base != sp)
371 >                    runLocalTasks();
372 >            }
373          }
374      }
375  
376      /**
377 <     * Initializes internal state after construction but before
378 <     * processing any tasks. If you override this method, you must
365 <     * invoke super.onStart() at the beginning of the method.
366 <     * Initialization requires care: Most fields must have legal
367 <     * default values, to ensure that attempted accesses from other
368 <     * threads work correctly even before this thread starts
369 <     * processing tasks.
377 >     * Runs local tasks until queue is empty or shut down.  Call only
378 >     * while active.
379       */
380 <    protected void onStart() {
381 <        // Allocate while starting to improve chances of thread-local
382 <        // isolation
383 <        queue = new ForkJoinTask<?>[INITIAL_QUEUE_CAPACITY];
384 <        // Initial value of seed need not be especially random but
385 <        // should differ across workers and must be nonzero
386 <        int p = poolIndex + 1;
387 <        seed = p + (p << 8) + (p << 16) + (p << 24); // spread bits
380 >    private void runLocalTasks() {
381 >        while (runState == 0) {
382 >            ForkJoinTask<?> t = locallyFifo? locallyDeqTask() : popTask();
383 >            if (t != null)
384 >                t.tryExec();
385 >            else if (base == sp)
386 >                break;
387 >        }
388      }
389  
390      /**
391 <     * Performs cleanup associated with termination of this worker
383 <     * thread.  If you override this method, you must invoke
384 <     * {@code super.onTermination} at the end of the overridden method.
391 >     * If a submission exists, try to activate and take it
392       *
393 <     * @param exception the exception causing this thread to abort due
387 <     * to an unrecoverable error, or {@code null} if completed normally
393 >     * @return a task, if available
394       */
395 <    protected void onTermination(Throwable exception) {
396 <        // Execute remaining local tasks unless aborting or terminating
397 <        while (exception == null && pool.isProcessingTasks() && base != sp) {
398 <            try {
399 <                ForkJoinTask<?> t = popTask();
400 <                if (t != null)
395 <                    t.quietlyExec();
396 <            } catch (Throwable ex) {
397 <                exception = ex;
395 >    private ForkJoinTask<?> pollSubmission() {
396 >        ForkJoinPool p = pool;
397 >        while (p.hasQueuedSubmissions()) {
398 >            if (active || (active = p.tryIncrementActiveCount())) {
399 >                ForkJoinTask<?> t = p.pollSubmission();
400 >                return t != null ? t : scan(); // if missed, rescan
401              }
402          }
403 <        // Cancel other tasks, transition status, notify pool, and
401 <        // propagate exception to uncaught exception handler
402 <        try {
403 <            do {} while (!tryInactivate()); // ensure inactive
404 <            cancelTasks();
405 <            runState = TERMINATED;
406 <            pool.workerTerminated(this);
407 <        } catch (Throwable ex) {        // Shouldn't ever happen
408 <            if (exception == null)      // but if so, at least rethrown
409 <                exception = ex;
410 <        } finally {
411 <            if (exception != null)
412 <                ForkJoinTask.rethrowException(exception);
413 <        }
403 >        return null;
404      }
405  
406 <    // Intrinsics-based support for queue operations.
407 <
408 <    /**
409 <     * Adds in store-order the given task at given slot of q to null.
410 <     * Caller must ensure q is non-null and index is in range.
406 >    /*
407 >     * Intrinsics-based atomic writes for queue slots. These are
408 >     * basically the same as methods in AtomicObjectArray, but
409 >     * specialized for (1) ForkJoinTask elements (2) requirement that
410 >     * nullness and bounds checks have already been performed by
411 >     * callers and (3) effective offsets are known not to overflow
412 >     * from int to long (because of MAXIMUM_QUEUE_CAPACITY). We don't
413 >     * need corresponding version for reads: plain array reads are OK
414 >     * because they protected by other volatile reads and are
415 >     * confirmed by CASes.
416 >     *
417 >     * Most uses don't actually call these methods, but instead contain
418 >     * inlined forms that enable more predictable optimization.  We
419 >     * don't define the version of write used in pushTask at all, but
420 >     * instead inline there a store-fenced array slot write.
421       */
422    private static void setSlot(ForkJoinTask<?>[] q, int i,
423                                ForkJoinTask<?> t) {
424        UNSAFE.putOrderedObject(q, (i << qShift) + qBase, t);
425    }
422  
423      /**
424 <     * CAS given slot of q to null. Caller must ensure q is non-null
425 <     * and index is in range.
424 >     * CASes slot i of array q from t to null. Caller must ensure q is
425 >     * non-null and index is in range.
426       */
427 <    private static boolean casSlotNull(ForkJoinTask<?>[] q, int i,
428 <                                       ForkJoinTask<?> t) {
427 >    private static final boolean casSlotNull(ForkJoinTask<?>[] q, int i,
428 >                                             ForkJoinTask<?> t) {
429          return UNSAFE.compareAndSwapObject(q, (i << qShift) + qBase, t, null);
430      }
431  
432      /**
433 <     * Sets sp in store-order.
433 >     * Performs a volatile write of the given task at given slot of
434 >     * array q.  Caller must ensure q is non-null and index is in
435 >     * range. This method is used only during resets and backouts.
436       */
437 <    private void storeSp(int s) {
438 <        UNSAFE.putOrderedInt(this, spOffset, s);
437 >    private static final void writeSlot(ForkJoinTask<?>[] q, int i,
438 >                                              ForkJoinTask<?> t) {
439 >        UNSAFE.putObjectVolatile(q, (i << qShift) + qBase, t);
440      }
441  
442 <    // Main queue methods
442 >    // queue methods
443  
444      /**
445 <     * Pushes a task. Called only by current thread.
445 >     * Pushes a task. Call only from this thread.
446       *
447       * @param t the task. Caller must ensure non-null.
448       */
449      final void pushTask(ForkJoinTask<?> t) {
450 +        int s;
451          ForkJoinTask<?>[] q = queue;
452 <        int mask = q.length - 1;
453 <        int s = sp;
454 <        setSlot(q, s & mask, t);
455 <        storeSp(++s);
456 <        if ((s -= base) == 1)
452 >        int mask = q.length - 1; // implicit assert q != null
453 >        UNSAFE.putOrderedObject(q, (((s = sp++) & mask) << qShift) + qBase, t);
454 >        if ((s -= base) <= 0)
455              pool.signalWork();
456 <        else if (s >= mask)
456 >        else if (s + 1 >= mask)
457              growQueue();
458      }
459  
460      /**
461       * Tries to take a task from the base of the queue, failing if
462 <     * either empty or contended.
462 >     * empty or contended. Note: Specializations of this code appear
463 >     * in scan and scanWhileJoining.
464       *
465       * @return a task, or null if none or contended
466       */
467      final ForkJoinTask<?> deqTask() {
468          ForkJoinTask<?> t;
469          ForkJoinTask<?>[] q;
470 <        int i;
471 <        int b;
473 <        if (sp != (b = base) &&
470 >        int b, i;
471 >        if ((b = base) != sp &&
472              (q = queue) != null && // must read q after b
473              (t = q[i = (q.length - 1) & b]) != null &&
474 <            casSlotNull(q, i, t)) {
474 >            UNSAFE.compareAndSwapObject(q, (i << qShift) + qBase, t, null)) {
475              base = b + 1;
476              return t;
477          }
# Line 481 | Line 479 | public class ForkJoinWorkerThread extend
479      }
480  
481      /**
482 <     * Tries to take a task from the base of own queue, activating if
483 <     * necessary, failing only if empty. Called only by current thread.
482 >     * Tries to take a task from the base of own queue. Assumes active
483 >     * status.  Called only by current thread.
484       *
485       * @return a task, or null if none
486       */
487      final ForkJoinTask<?> locallyDeqTask() {
488 <        int b;
489 <        while (sp != (b = base)) {
490 <            if (tryActivate()) {
491 <                ForkJoinTask<?>[] q = queue;
492 <                int i = (q.length - 1) & b;
493 <                ForkJoinTask<?> t = q[i];
494 <                if (t != null && casSlotNull(q, i, t)) {
488 >        ForkJoinTask<?>[] q = queue;
489 >        if (q != null) {
490 >            ForkJoinTask<?> t;
491 >            int b, i;
492 >            while (sp != (b = base)) {
493 >                if ((t = q[i = (q.length - 1) & b]) != null &&
494 >                    UNSAFE.compareAndSwapObject(q, (i << qShift) + qBase,
495 >                                                t, null)) {
496                      base = b + 1;
497                      return t;
498                  }
# Line 503 | Line 502 | public class ForkJoinWorkerThread extend
502      }
503  
504      /**
505 <     * Returns a popped task, or null if empty. Ensures active status
506 <     * if non-null. Called only by current thread.
505 >     * Returns a popped task, or null if empty. Assumes active status.
506 >     * Called only by current thread. (Note: a specialization of this
507 >     * code appears in popWhileJoining.)
508       */
509      final ForkJoinTask<?> popTask() {
510 <        int s = sp;
511 <        while (s != base) {
512 <            if (tryActivate()) {
513 <                ForkJoinTask<?>[] q = queue;
514 <                int mask = q.length - 1;
515 <                int i = (s - 1) & mask;
516 <                ForkJoinTask<?> t = q[i];
517 <                if (t == null || !casSlotNull(q, i, t))
518 <                    break;
519 <                storeSp(s - 1);
510 >        int s;
511 >        ForkJoinTask<?>[] q;
512 >        if (base != (s = sp) && (q = queue) != null) {
513 >            int i = (q.length - 1) & --s;
514 >            ForkJoinTask<?> t = q[i];
515 >            if (t != null && UNSAFE.compareAndSwapObject
516 >                (q, (i << qShift) + qBase, t, null)) {
517 >                sp = s;
518                  return t;
519              }
520          }
# Line 524 | Line 522 | public class ForkJoinWorkerThread extend
522      }
523  
524      /**
525 <     * Specialized version of popTask to pop only if
526 <     * topmost element is the given task. Called only
527 <     * by current thread while active.
525 >     * Specialized version of popTask to pop only if topmost element
526 >     * is the given task. Called only by current thread while
527 >     * active.
528       *
529       * @param t the task. Caller must ensure non-null.
530       */
531      final boolean unpushTask(ForkJoinTask<?> t) {
532 <        ForkJoinTask<?>[] q = queue;
533 <        int mask = q.length - 1;
534 <        int s = sp - 1;
535 <        if (casSlotNull(q, s & mask, t)) {
536 <            storeSp(s);
532 >        int s;
533 >        ForkJoinTask<?>[] q;
534 >        if (base != (s = sp) && (q = queue) != null &&
535 >            UNSAFE.compareAndSwapObject
536 >            (q, (((q.length - 1) & --s) << qShift) + qBase, t, null)) {
537 >            sp = s;
538              return true;
539          }
540          return false;
# Line 575 | Line 574 | public class ForkJoinWorkerThread extend
574              ForkJoinTask<?> t = oldQ[oldIndex];
575              if (t != null && !casSlotNull(oldQ, oldIndex, t))
576                  t = null;
577 <            setSlot(newQ, b & newMask, t);
577 >            writeSlot(newQ, b & newMask, t);
578          } while (++b != bf);
579          pool.signalWork();
580      }
581  
582      /**
583 +     * Computes next value for random victim probe in scan().  Scans
584 +     * don't require a very high quality generator, but also not a
585 +     * crummy one.  Marsaglia xor-shift is cheap and works well enough.
586 +     * Note: This is manually inlined in scan()
587 +     */
588 +    private static final int xorShift(int r) {
589 +        r ^= r << 13;
590 +        r ^= r >>> 17;
591 +        return r ^ (r << 5);
592 +    }
593 +
594 +    /**
595       * Tries to steal a task from another worker. Starts at a random
596       * index of workers array, and probes workers until finding one
597       * with non-empty queue or finding that all are empty.  It
598       * randomly selects the first n probes. If these are empty, it
599 <     * resorts to a full circular traversal, which is necessary to
600 <     * accurately set active status by caller. Also restarts if pool
601 <     * events occurred since last scan, which forces refresh of
602 <     * workers array, in case barrier was associated with resize.
599 >     * resorts to a circular sweep, which is necessary to accurately
600 >     * set active status. (The circular sweep uses steps of
601 >     * approximately half the array size plus 1, to avoid bias
602 >     * stemming from leftmost packing of the array in ForkJoinPool.)
603       *
604       * This method must be both fast and quiet -- usually avoiding
605       * memory accesses that could disrupt cache sharing etc other than
606 <     * those needed to check for and take tasks. This accounts for,
607 <     * among other things, updating random seed in place without
608 <     * storing it until exit.
606 >     * those needed to check for and take tasks (or to activate if not
607 >     * already active). This accounts for, among other things,
608 >     * updating random seed in place without storing it until exit.
609       *
610       * @return a task, or null if none found
611       */
612      private ForkJoinTask<?> scan() {
613 <        ForkJoinTask<?> t = null;
614 <        int r = seed;                    // extract once to keep scan quiet
615 <        ForkJoinWorkerThread[] ws;       // refreshed on outer loop
616 <        int mask;                        // must be power 2 minus 1 and > 0
617 <        outer:do {
618 <            if ((ws = pool.workers) != null && (mask = ws.length - 1) > 0) {
619 <                int idx = r;
620 <                int probes = ~mask;      // use random index while negative
621 <                for (;;) {
622 <                    r = xorShift(r);     // update random seed
623 <                    ForkJoinWorkerThread v = ws[mask & idx];
624 <                    if (v == null || v.sp == v.base) {
625 <                        if (probes <= mask)
626 <                            idx = (probes++ < 0) ? r : (idx + 1);
627 <                        else
628 <                            break;
613 >        ForkJoinPool p = pool;
614 >        ForkJoinWorkerThread[] ws;        // worker array
615 >        int n;                            // upper bound of #workers
616 >        if ((ws = p.workers) != null && (n = ws.length) > 1) {
617 >            boolean canSteal = active;    // shadow active status
618 >            int r = seed;                 // extract seed once
619 >            int mask = n - 1;
620 >            int j = -n;                   // loop counter
621 >            int k = r;                    // worker index, random if j < 0
622 >            for (;;) {
623 >                ForkJoinWorkerThread v = ws[k & mask];
624 >                r ^= r << 13; r ^= r >>> 17; r ^= r << 5; // inline xorshift
625 >                if (v != null && v.base != v.sp) {
626 >                    int b, i;             // inline specialized deqTask
627 >                    ForkJoinTask<?>[] q;
628 >                    ForkJoinTask<?> t;
629 >                    if ((canSteal ||      // ensure active status
630 >                         (canSteal = active = p.tryIncrementActiveCount())) &&
631 >                        (q = v.queue) != null &&
632 >                        (t = q[i = (q.length - 1) & (b = v.base)]) != null &&
633 >                        UNSAFE.compareAndSwapObject
634 >                        (q, (i << qShift) + qBase, t, null)) {
635 >                        v.base = b + 1;
636 >                        seed = r;
637 >                        ++stealCount;
638 >                        return t;
639                      }
640 <                    else if (!tryActivate() || (t = v.deqTask()) == null)
641 <                        continue outer;  // restart on contention
621 <                    else
622 <                        break outer;
640 >                    j = -n;
641 >                    k = r;                // restart on contention
642                  }
643 +                else if (++j <= 0)
644 +                    k = r;
645 +                else if (j <= n)
646 +                    k += (n >>> 1) | 1;
647 +                else
648 +                    break;
649              }
650 <        } while (pool.hasNewSyncEvent(this)); // retry on pool events
651 <        seed = r;
627 <        return t;
650 >        }
651 >        return null;
652      }
653  
654 +    // Run State management
655 +
656 +    // status check methods used mainly by ForkJoinPool
657 +    final boolean isTerminating() { return (runState & TERMINATING) != 0; }
658 +    final boolean isTerminated()  { return (runState & TERMINATED) != 0; }
659 +    final boolean isSuspended()   { return (runState & SUSPENDED) != 0; }
660 +    final boolean isTrimmed()     { return (runState & TRIMMED) != 0; }
661 +
662      /**
663 <     * Gets and removes a local or stolen task.
632 <     *
633 <     * @return a task, if available
663 >     * Sets state to TERMINATING, also resuming if suspended.
664       */
665 <    final ForkJoinTask<?> pollTask() {
666 <        ForkJoinTask<?> t = locallyFifo ? locallyDeqTask() : popTask();
667 <        if (t == null && (t = scan()) != null)
668 <            ++stealCount;
669 <        return t;
665 >    final void shutdown() {
666 >        for (;;) {
667 >            int s = runState;
668 >            if ((s & SUSPENDED) != 0) { // kill and wakeup if suspended
669 >                if (UNSAFE.compareAndSwapInt(this, runStateOffset, s,
670 >                                             (s & ~SUSPENDED) |
671 >                                             (TRIMMED|TERMINATING))) {
672 >                    LockSupport.unpark(this);
673 >                    break;
674 >                }
675 >            }
676 >            else if (UNSAFE.compareAndSwapInt(this, runStateOffset, s,
677 >                                              s | TERMINATING))
678 >                break;
679 >        }
680      }
681  
682      /**
683 <     * Gets a local task.
683 >     * Sets state to TERMINATED. Called only by this thread.
684 >     */
685 >    private void setTerminated() {
686 >        int s;
687 >        do {} while (!UNSAFE.compareAndSwapInt(this, runStateOffset,
688 >                                               s = runState,
689 >                                               s | (TERMINATING|TERMINATED)));
690 >    }
691 >
692 >    /**
693 >     * Instrumented version of park. Also used by ForkJoinPool.awaitEvent
694 >     */
695 >    final void doPark() {
696 >        ++parkCount;
697 >        LockSupport.park(this);
698 >    }
699 >
700 >    /**
701 >     * If suspended, tries to set status to unsuspended.
702 >     * Caller must unpark to actually resume
703       *
704 <     * @return a task, if available
704 >     * @return true if successful
705       */
706 <    final ForkJoinTask<?> pollLocalTask() {
707 <        return locallyFifo ? locallyDeqTask() : popTask();
706 >    final boolean tryUnsuspend() {
707 >        int s;
708 >        return (((s = runState) & SUSPENDED) != 0 &&
709 >                UNSAFE.compareAndSwapInt(this, runStateOffset, s,
710 >                                         s & ~SUSPENDED));
711      }
712  
713      /**
714 <     * Returns a pool submission, if one exists, activating first.
714 >     * Sets suspended status and blocks as spare until resumed,
715 >     * shutdown, or timed out.
716       *
717 <     * @return a submission, if available
717 >     * @return false if trimmed
718       */
719 <    private ForkJoinTask<?> pollSubmission() {
719 >    final boolean suspendAsSpare() {
720 >        for (;;) {               // set suspended unless terminating
721 >            int s = runState;
722 >            if ((s & TERMINATING) != 0) { // must kill
723 >                if (UNSAFE.compareAndSwapInt(this, runStateOffset, s,
724 >                                             s | (TRIMMED | TERMINATING)))
725 >                    return false;
726 >            }
727 >            else if (UNSAFE.compareAndSwapInt(this, runStateOffset, s,
728 >                                              s | SUSPENDED))
729 >                break;
730 >        }
731 >        lastEventCount = 0;      // reset upon resume
732          ForkJoinPool p = pool;
733 <        while (p.hasQueuedSubmissions()) {
734 <            ForkJoinTask<?> t;
735 <            if (tryActivate() && (t = p.pollSubmission()) != null)
736 <                return t;
733 >        p.releaseWaiters();      // help others progress
734 >        p.accumulateStealCount(this);
735 >        interrupted();           // clear/ignore interrupts
736 >        if (poolIndex < p.getParallelism()) { // untimed wait
737 >            while ((runState & SUSPENDED) != 0)
738 >                doPark();
739 >            return true;
740          }
741 <        return null;
741 >        return timedSuspend();   // timed wait if apparently non-core
742 >    }
743 >
744 >    /**
745 >     * Blocks as spare until resumed or timed out
746 >     * @return false if trimmed
747 >     */
748 >    private boolean timedSuspend() {
749 >        long nanos = SPARE_KEEPALIVE_NANOS;
750 >        long startTime = System.nanoTime();
751 >        while ((runState & SUSPENDED) != 0) {
752 >            ++parkCount;
753 >            if ((nanos -= (System.nanoTime() - startTime)) > 0)
754 >                LockSupport.parkNanos(this, nanos);
755 >            else { // try to trim on timeout
756 >                int s = runState;
757 >                if (UNSAFE.compareAndSwapInt(this, runStateOffset, s,
758 >                                             (s & ~SUSPENDED) |
759 >                                             (TRIMMED|TERMINATING)))
760 >                    return false;
761 >            }
762 >        }
763 >        return true;
764 >    }
765 >
766 >    // Misc support methods for ForkJoinPool
767 >
768 >    /**
769 >     * Returns an estimate of the number of tasks in the queue.  Also
770 >     * used by ForkJoinTask.
771 >     */
772 >    final int getQueueSize() {
773 >        return -base + sp;
774      }
775  
776 <    // Methods accessed only by Pool
776 >    /**
777 >     * Set locallyFifo mode. Called only by ForkJoinPool
778 >     */
779 >    final void setAsyncMode(boolean async) {
780 >        locallyFifo = async;
781 >    }
782  
783      /**
784       * Removes and cancels all tasks in queue.  Can be called from any
785       * thread.
786       */
787      final void cancelTasks() {
788 <        ForkJoinTask<?> t;
789 <        while (base != sp && (t = deqTask()) != null)
790 <            t.cancelIgnoringExceptions();
788 >        while (base != sp) {
789 >            ForkJoinTask<?> t = deqTask();
790 >            if (t != null)
791 >                t.cancelIgnoringExceptions();
792 >        }
793      }
794  
795      /**
# Line 682 | Line 799 | public class ForkJoinWorkerThread extend
799       */
800      final int drainTasksTo(Collection<? super ForkJoinTask<?>> c) {
801          int n = 0;
802 <        ForkJoinTask<?> t;
803 <        while (base != sp && (t = deqTask()) != null) {
804 <            c.add(t);
805 <            ++n;
802 >        while (base != sp) {
803 >            ForkJoinTask<?> t = deqTask();
804 >            if (t != null) {
805 >                c.add(t);
806 >                ++n;
807 >            }
808          }
809          return n;
810      }
811  
812 +    // Support methods for ForkJoinTask
813 +
814      /**
815 <     * Gets and clears steal count for accumulation by pool.  Called
816 <     * only when known to be idle (in pool.sync and termination).
815 >     * Returns an estimate of the number of tasks, offset by a
816 >     * function of number of idle workers.
817 >     *
818 >     * This method provides a cheap heuristic guide for task
819 >     * partitioning when programmers, frameworks, tools, or languages
820 >     * have little or no idea about task granularity.  In essence by
821 >     * offering this method, we ask users only about tradeoffs in
822 >     * overhead vs expected throughput and its variance, rather than
823 >     * how finely to partition tasks.
824 >     *
825 >     * In a steady state strict (tree-structured) computation, each
826 >     * thread makes available for stealing enough tasks for other
827 >     * threads to remain active. Inductively, if all threads play by
828 >     * the same rules, each thread should make available only a
829 >     * constant number of tasks.
830 >     *
831 >     * The minimum useful constant is just 1. But using a value of 1
832 >     * would require immediate replenishment upon each steal to
833 >     * maintain enough tasks, which is infeasible.  Further,
834 >     * partitionings/granularities of offered tasks should minimize
835 >     * steal rates, which in general means that threads nearer the top
836 >     * of computation tree should generate more than those nearer the
837 >     * bottom. In perfect steady state, each thread is at
838 >     * approximately the same level of computation tree. However,
839 >     * producing extra tasks amortizes the uncertainty of progress and
840 >     * diffusion assumptions.
841 >     *
842 >     * So, users will want to use values larger, but not much larger
843 >     * than 1 to both smooth over transient shortages and hedge
844 >     * against uneven progress; as traded off against the cost of
845 >     * extra task overhead. We leave the user to pick a threshold
846 >     * value to compare with the results of this call to guide
847 >     * decisions, but recommend values such as 3.
848 >     *
849 >     * When all threads are active, it is on average OK to estimate
850 >     * surplus strictly locally. In steady-state, if one thread is
851 >     * maintaining say 2 surplus tasks, then so are others. So we can
852 >     * just use estimated queue length (although note that (sp - base)
853 >     * can be an overestimate because of stealers lagging increments
854 >     * of base).  However, this strategy alone leads to serious
855 >     * mis-estimates in some non-steady-state conditions (ramp-up,
856 >     * ramp-down, other stalls). We can detect many of these by
857 >     * further considering the number of "idle" threads, that are
858 >     * known to have zero queued tasks, so compensate by a factor of
859 >     * (#idle/#active) threads.
860       */
861 <    final int getAndClearStealCount() {
862 <        int sc = stealCount;
699 <        stealCount = 0;
700 <        return sc;
861 >    final int getEstimatedSurplusTaskCount() {
862 >        return sp - base - pool.idlePerActive();
863      }
864  
865      /**
866 <     * Returns {@code true} if at least one worker in the given array
705 <     * appears to have at least one queued task.
866 >     * Gets and removes a local task.
867       *
868 <     * @param ws array of workers
868 >     * @return a task, if available
869       */
870 <    static boolean hasQueuedTasks(ForkJoinWorkerThread[] ws) {
871 <        if (ws != null) {
872 <            int len = ws.length;
873 <            for (int j = 0; j < 2; ++j) { // need two passes for clean sweep
713 <                for (int i = 0; i < len; ++i) {
714 <                    ForkJoinWorkerThread w = ws[i];
715 <                    if (w != null && w.sp != w.base)
716 <                        return true;
717 <                }
718 <            }
870 >    final ForkJoinTask<?> pollLocalTask() {
871 >        while (base != sp) {
872 >            if (active || (active = pool.tryIncrementActiveCount()))
873 >                return locallyFifo? locallyDeqTask() : popTask();
874          }
875 <        return false;
875 >        return null;
876      }
877  
878 <    // Support methods for ForkJoinTask
878 >    /**
879 >     * Gets and removes a local or stolen task.
880 >     *
881 >     * @return a task, if available
882 >     */
883 >    final ForkJoinTask<?> pollTask() {
884 >        ForkJoinTask<?> t;
885 >        return (t = pollLocalTask()) != null ? t : scan();
886 >    }
887  
888      /**
889 <     * Returns an estimate of the number of tasks in the queue.
889 >     * Executes or processes other tasks awaiting the given task
890 >     * @return task completion status
891       */
892 <    final int getQueueSize() {
893 <        // suppress momentarily negative values
894 <        return Math.max(0, sp - base);
892 >    final int execWhileJoining(ForkJoinTask<?> joinMe) {
893 >        int s;
894 >        while ((s = joinMe.status) >= 0) {
895 >            ForkJoinTask<?> t = base != sp?
896 >                popWhileJoining(joinMe) :
897 >                scanWhileJoining(joinMe);
898 >            if (t != null)
899 >                t.tryExec();
900 >        }
901 >        return s;
902      }
903  
904      /**
905 <     * Returns an estimate of the number of tasks, offset by a
906 <     * function of number of idle workers.
905 >     * Returns or stolen task, if available, unless joinMe is done
906 >     *
907 >     * This method is intrinsically nonmodular. To maintain the
908 >     * property that tasks are never stolen if the awaited task is
909 >     * ready, we must interleave mechanics of scan with status
910 >     * checks. We rely here on the commit points of deq that allow us
911 >     * to cancel a steal even after CASing slot to null, but before
912 >     * adjusting base index: If, after the CAS, we see that joinMe is
913 >     * ready, we can back out by placing the task back into the slot,
914 >     * without adjusting index. The loop is otherwise a variant of the
915 >     * one in scan().
916 >     *
917       */
918 <    final int getEstimatedSurplusTaskCount() {
919 <        // The halving approximates weighting idle vs non-idle workers
920 <        return (sp - base) - (pool.getIdleThreadCount() >>> 1);
918 >    private ForkJoinTask<?> scanWhileJoining(ForkJoinTask<?> joinMe) {
919 >        int r = seed;
920 >        ForkJoinPool p = pool;
921 >        ForkJoinWorkerThread[] ws;
922 >        int n;
923 >        outer:while ((ws = p.workers) != null && (n = ws.length) > 1) {
924 >            int mask = n - 1;
925 >            int k = r;
926 >            boolean contended = false; // to retry loop if deq contends
927 >            for (int j = -n; j <= n; ++j) {
928 >                if (joinMe.status < 0)
929 >                    break outer;
930 >                int b;
931 >                ForkJoinTask<?>[] q;
932 >                ForkJoinWorkerThread v = ws[k & mask];
933 >                r ^= r << 13; r ^= r >>> 17; r ^= r << 5; // xorshift
934 >                if (v != null && (b=v.base) != v.sp && (q=v.queue) != null) {
935 >                    int i = (q.length - 1) & b;
936 >                    ForkJoinTask<?> t = q[i];
937 >                    if (t != null && UNSAFE.compareAndSwapObject
938 >                        (q, (i << qShift) + qBase, t, null)) {
939 >                        if (joinMe.status >= 0) {
940 >                            v.base = b + 1;
941 >                            seed = r;
942 >                            ++stealCount;
943 >                            return t;
944 >                        }
945 >                        UNSAFE.putObjectVolatile(q, (i<<qShift)+qBase, t);
946 >                        break outer; // back out
947 >                    }
948 >                    contended = true;
949 >                }
950 >                k = j < 0 ? r : (k + ((n >>> 1) | 1));
951 >            }
952 >            if (!contended && p.tryAwaitBusyJoin(joinMe))
953 >                break;
954 >        }
955 >        return null;
956      }
957  
958      /**
959 <     * Scans, returning early if joinMe done.
959 >     * Version of popTask with join checks surrounding extraction.
960 >     * Uses the same backout strategy as helpJoinTask. Note that
961 >     * we ignore locallyFifo flag for local tasks here since helping
962 >     * joins only make sense in LIFO mode.
963 >     *
964 >     * @return a popped task, if available, unless joinMe is done
965       */
966 <    final ForkJoinTask<?> scanWhileJoining(ForkJoinTask<?> joinMe) {
967 <        ForkJoinTask<?> t = pollTask();
968 <        if (t != null && joinMe.status < 0 && sp == base) {
969 <            pushTask(t); // unsteal if done and this task would be stealable
970 <            t = null;
966 >    private ForkJoinTask<?> popWhileJoining(ForkJoinTask<?> joinMe) {
967 >        int s;
968 >        ForkJoinTask<?>[] q;
969 >        while ((s = sp) != base && (q = queue) != null && joinMe.status >= 0) {
970 >            int i = (q.length - 1) & --s;
971 >            ForkJoinTask<?> t = q[i];
972 >            if (t != null && UNSAFE.compareAndSwapObject
973 >                (q, (i << qShift) + qBase, t, null)) {
974 >                if (joinMe.status >= 0) {
975 >                    sp = s;
976 >                    return t;
977 >                }
978 >                UNSAFE.putObjectVolatile(q, (i << qShift) + qBase, t);
979 >                break;  // back out
980 >            }
981          }
982 <        return t;
982 >        return null;
983      }
984  
985      /**
# Line 756 | Line 987 | public class ForkJoinWorkerThread extend
987       */
988      final void helpQuiescePool() {
989          for (;;) {
990 <            ForkJoinTask<?> t = pollTask();
991 <            if (t != null)
992 <                t.quietlyExec();
993 <            else if (tryInactivate() && pool.isQuiescent())
994 <                break;
990 >            ForkJoinTask<?> t = pollLocalTask();
991 >            if (t != null || (t = scan()) != null)
992 >                t.tryExec();
993 >            else {
994 >                ForkJoinPool p = pool;
995 >                if (active) {
996 >                    active = false; // inactivate
997 >                    do {} while (!p.tryDecrementActiveCount());
998 >                }
999 >                if (p.isQuiescent()) {
1000 >                    active = true; // re-activate
1001 >                    do {} while (!p.tryIncrementActiveCount());
1002 >                    return;
1003 >                }
1004 >            }
1005          }
765        do {} while (!tryActivate()); // re-activate on exit
1006      }
1007  
1008      // Unsafe mechanics
1009  
1010      private static final sun.misc.Unsafe UNSAFE = getUnsafe();
771    private static final long spOffset =
772        objectFieldOffset("sp", ForkJoinWorkerThread.class);
1011      private static final long runStateOffset =
1012          objectFieldOffset("runState", ForkJoinWorkerThread.class);
1013 <    private static final long qBase;
1013 >    private static final long qBase =
1014 >        UNSAFE.arrayBaseOffset(ForkJoinTask[].class);
1015      private static final int qShift;
1016  
1017      static {
779        qBase = UNSAFE.arrayBaseOffset(ForkJoinTask[].class);
1018          int s = UNSAFE.arrayIndexScale(ForkJoinTask[].class);
1019          if ((s & (s-1)) != 0)
1020              throw new Error("data type scale not a power of two");

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines