44 |
|
* of tasks. To accomplish this, we shift the CAS arbitrating pop |
45 |
|
* vs deq (steal) from being on the indices ("base" and "sp") to |
46 |
|
* the slots themselves (mainly via method "casSlotNull()"). So, |
47 |
< |
* both a successful pop and deq mainly entail CAS'ing a nonnull |
47 |
> |
* both a successful pop and deq mainly entail CAS'ing a non-null |
48 |
|
* slot to null. Because we rely on CASes of references, we do |
49 |
|
* not need tag bits on base or sp. They are simple ints as used |
50 |
|
* in any circular array-based queue (see for example ArrayDeque). |
56 |
|
* considered individually, is not wait-free. One thief cannot |
57 |
|
* successfully continue until another in-progress one (or, if |
58 |
|
* previously empty, a push) completes. However, in the |
59 |
< |
* aggregate, we ensure at least probablistic non-blockingness. If |
59 |
> |
* aggregate, we ensure at least probabilistic non-blockingness. If |
60 |
|
* an attempted steal fails, a thief always chooses a different |
61 |
|
* random victim target to try next. So, in order for one thief to |
62 |
|
* progress, it suffices for any in-progress deq or new push on |
75 |
|
* push) require store order and CASes (in pop and deq) require |
76 |
|
* (volatile) CAS semantics. Since these combinations aren't |
77 |
|
* supported using ordinary volatiles, the only way to accomplish |
78 |
< |
* these effciently is to use direct Unsafe calls. (Using external |
78 |
> |
* these efficiently is to use direct Unsafe calls. (Using external |
79 |
|
* AtomicIntegers and AtomicReferenceArrays for the indices and |
80 |
|
* array is significantly slower because of memory locality and |
81 |
|
* indirection effects.) Further, performance on most platforms is |
239 |
|
|
240 |
|
/** |
241 |
|
* Establishes local first-in-first-out scheduling mode for forked |
242 |
< |
* tasks that are never joined. |
242 |
> |
* tasks that are never joined. |
243 |
|
* @param async if true, use locally FIFO scheduling |
244 |
|
*/ |
245 |
|
void setAsyncMode(boolean async) { |
400 |
|
// Intrinsics-based support for queue operations. |
401 |
|
|
402 |
|
/** |
403 |
< |
* Add in store-order the given task at given slot of q to |
404 |
< |
* null. Caller must ensure q is nonnull and index is in range. |
403 |
> |
* Adds in store-order the given task at given slot of q to null. |
404 |
> |
* Caller must ensure q is non-null and index is in range. |
405 |
|
*/ |
406 |
|
private static void setSlot(ForkJoinTask<?>[] q, int i, |
407 |
|
ForkJoinTask<?> t){ |
409 |
|
} |
410 |
|
|
411 |
|
/** |
412 |
< |
* CAS given slot of q to null. Caller must ensure q is nonnull |
412 |
> |
* CAS given slot of q to null. Caller must ensure q is non-null |
413 |
|
* and index is in range. |
414 |
|
*/ |
415 |
|
private static boolean casSlotNull(ForkJoinTask<?>[] q, int i, |
428 |
|
|
429 |
|
/** |
430 |
|
* Pushes a task. Called only by current thread. |
431 |
< |
* @param t the task. Caller must ensure nonnull |
431 |
> |
* @param t the task. Caller must ensure non-null. |
432 |
|
*/ |
433 |
|
final void pushTask(ForkJoinTask<?> t) { |
434 |
|
ForkJoinTask<?>[] q = queue; |
464 |
|
|
465 |
|
/** |
466 |
|
* Returns a popped task, or null if empty. Ensures active status |
467 |
< |
* if nonnull. Called only by current thread. |
467 |
> |
* if non-null. Called only by current thread. |
468 |
|
*/ |
469 |
|
final ForkJoinTask<?> popTask() { |
470 |
|
int s = sp; |
487 |
|
* Specialized version of popTask to pop only if |
488 |
|
* topmost element is the given task. Called only |
489 |
|
* by current thread while active. |
490 |
< |
* @param t the task. Caller must ensure nonnull |
490 |
> |
* @param t the task. Caller must ensure non-null |
491 |
|
*/ |
492 |
|
final boolean unpushTask(ForkJoinTask<?> t) { |
493 |
|
ForkJoinTask<?>[] q = queue; |