1 |
|
/* |
2 |
|
* Written by Doug Lea with assistance from members of JCP JSR-166 |
3 |
|
* Expert Group and released to the public domain, as explained at |
4 |
< |
* http://creativecommons.org/licenses/publicdomain |
4 |
> |
* http://creativecommons.org/publicdomain/zero/1.0/ |
5 |
|
*/ |
6 |
|
|
7 |
|
package jsr166y; |
8 |
|
|
9 |
– |
import java.util.concurrent.*; |
10 |
– |
|
9 |
|
import java.util.AbstractQueue; |
10 |
|
import java.util.Collection; |
13 |
– |
import java.util.ConcurrentModificationException; |
11 |
|
import java.util.Iterator; |
12 |
|
import java.util.NoSuchElementException; |
13 |
|
import java.util.Queue; |
14 |
+ |
import java.util.concurrent.TimeUnit; |
15 |
|
import java.util.concurrent.locks.LockSupport; |
16 |
+ |
|
17 |
|
/** |
18 |
|
* An unbounded {@link TransferQueue} based on linked nodes. |
19 |
|
* This queue orders elements FIFO (first-in-first-out) with respect |
22 |
|
* producer. The <em>tail</em> of the queue is that element that has |
23 |
|
* been on the queue the shortest time for some producer. |
24 |
|
* |
25 |
< |
* <p>Beware that, unlike in most collections, the {@code size} |
26 |
< |
* method is <em>NOT</em> a constant-time operation. Because of the |
25 |
> |
* <p>Beware that, unlike in most collections, the {@code size} method |
26 |
> |
* is <em>NOT</em> a constant-time operation. Because of the |
27 |
|
* asynchronous nature of these queues, determining the current number |
28 |
< |
* of elements requires a traversal of the elements. |
28 |
> |
* of elements requires a traversal of the elements, and so may report |
29 |
> |
* inaccurate results if this collection is modified during traversal. |
30 |
> |
* Additionally, the bulk operations {@code addAll}, |
31 |
> |
* {@code removeAll}, {@code retainAll}, {@code containsAll}, |
32 |
> |
* {@code equals}, and {@code toArray} are <em>not</em> guaranteed |
33 |
> |
* to be performed atomically. For example, an iterator operating |
34 |
> |
* concurrently with an {@code addAll} operation might view only some |
35 |
> |
* of the added elements. |
36 |
|
* |
37 |
|
* <p>This class and its iterator implement all of the |
38 |
|
* <em>optional</em> methods of the {@link Collection} and {@link |
76 |
|
* |
77 |
|
* A FIFO dual queue may be implemented using a variation of the |
78 |
|
* Michael & Scott (M&S) lock-free queue algorithm |
79 |
< |
* (http://www.cs.rochester.edu/u/scott/papers/1996_PODC_queues.pdf). |
79 |
> |
* (http://www.cs.rochester.edu/~scott/papers/1996_PODC_queues.pdf). |
80 |
|
* It maintains two pointer fields, "head", pointing to a |
81 |
|
* (matched) node that in turn points to the first actual |
82 |
|
* (unmatched) queue node (or null if empty); and "tail" that |
148 |
|
* that using very small constants in the range of 1-3 work best |
149 |
|
* over a range of platforms. Larger values introduce increasing |
150 |
|
* costs of cache misses and risks of long traversal chains, while |
151 |
< |
* smaller values increase CAS contentiona and overhead. |
151 |
> |
* smaller values increase CAS contention and overhead. |
152 |
|
* |
153 |
|
* Dual queues with slack differ from plain M&S dual queues by |
154 |
|
* virtue of only sometimes updating head or tail pointers when |
167 |
|
* targets. Even when using very small slack values, this |
168 |
|
* approach works well for dual queues because it allows all |
169 |
|
* operations up to the point of matching or appending an item |
170 |
< |
* (hence potentially releasing another thread) to be read-only, |
171 |
< |
* thus not introducing any further contention. As described |
172 |
< |
* below, we implement this by performing slack maintenance |
173 |
< |
* retries only after these points. |
170 |
> |
* (hence potentially allowing progress by another thread) to be |
171 |
> |
* read-only, thus not introducing any further contention. As |
172 |
> |
* described below, we implement this by performing slack |
173 |
> |
* maintenance retries only after these points. |
174 |
|
* |
175 |
|
* As an accompaniment to such techniques, traversal overhead can |
176 |
|
* be further reduced without increasing contention of head |
177 |
< |
* pointer updates. During traversals, threads may sometimes |
178 |
< |
* shortcut the "next" link path from the current "head" node to |
179 |
< |
* be closer to the currently known first unmatched node. Again, |
180 |
< |
* this may be triggered with using thresholds or randomization. |
177 |
> |
* pointer updates: Threads may sometimes shortcut the "next" link |
178 |
> |
* path from the current "head" node to be closer to the currently |
179 |
> |
* known first unmatched node, and similarly for tail. Again, this |
180 |
> |
* may be triggered with using thresholds or randomization. |
181 |
|
* |
182 |
|
* These ideas must be further extended to avoid unbounded amounts |
183 |
|
* of costly-to-reclaim garbage caused by the sequential "next" |
205 |
|
* mechanics because an update may leave head at a detached node. |
206 |
|
* And while direct writes are possible for tail updates, they |
207 |
|
* increase the risk of long retraversals, and hence long garbage |
208 |
< |
* chains which can be much more costly than is worthwhile |
208 |
> |
* chains, which can be much more costly than is worthwhile |
209 |
|
* considering that the cost difference of performing a CAS vs |
210 |
|
* write is smaller when they are not triggered on each operation |
211 |
|
* (especially considering that writes and CASes equally require |
212 |
|
* additional GC bookkeeping ("write barriers") that are sometimes |
213 |
|
* more costly than the writes themselves because of contention). |
214 |
|
* |
209 |
– |
* Removal of interior nodes (due to timed out or interrupted |
210 |
– |
* waits, or calls to remove or Iterator.remove) uses a scheme |
211 |
– |
* roughly similar to that in Scherer, Lea, and Scott's |
212 |
– |
* SynchronousQueue. Given a predecessor, we can unsplice any node |
213 |
– |
* except the (actual) tail of the queue. To avoid build-up of |
214 |
– |
* cancelled trailing nodes, upon a request to remove a trailing |
215 |
– |
* node, it is placed in field "cleanMe" to be unspliced upon the |
216 |
– |
* next call to unsplice any other node. Situations needing such |
217 |
– |
* mechanics are not common but do occur in practice; for example |
218 |
– |
* when an unbounded series of short timed calls to poll |
219 |
– |
* repeatedly time out but never otherwise fall off the list |
220 |
– |
* because of an untimed call to take at the front of the |
221 |
– |
* queue. (Note that maintaining field cleanMe does not otherwise |
222 |
– |
* much impact garbage retention even if never cleared by some |
223 |
– |
* other call because the held node will eventually either |
224 |
– |
* directly or indirectly lead to a self-link once off the list.) |
225 |
– |
* |
215 |
|
* *** Overview of implementation *** |
216 |
|
* |
217 |
< |
* We use a threshold-based approach to updates, with a target |
218 |
< |
* slack of two. The slack value is hard-wired: a path greater |
217 |
> |
* We use a threshold-based approach to updates, with a slack |
218 |
> |
* threshold of two -- that is, we update head/tail when the |
219 |
> |
* current pointer appears to be two or more steps away from the |
220 |
> |
* first/last node. The slack value is hard-wired: a path greater |
221 |
|
* than one is naturally implemented by checking equality of |
222 |
|
* traversal pointers except when the list has only one element, |
223 |
< |
* in which case we keep target slack at one. Avoiding tracking |
223 |
> |
* in which case we keep slack threshold at one. Avoiding tracking |
224 |
|
* explicit counts across method calls slightly simplifies an |
225 |
|
* already-messy implementation. Using randomization would |
226 |
|
* probably work better if there were a low-quality dirt-cheap |
227 |
|
* per-thread one available, but even ThreadLocalRandom is too |
228 |
|
* heavy for these purposes. |
229 |
|
* |
230 |
< |
* With such a small target slack value, it is rarely worthwhile |
231 |
< |
* to augment this with path short-circuiting; i.e., unsplicing |
232 |
< |
* nodes between head and the first unmatched node, or similarly |
233 |
< |
* for tail, rather than advancing head or tail proper. However, |
243 |
< |
* it is used (in awaitMatch) immediately before a waiting thread |
244 |
< |
* starts to block, as a final bit of helping at a point when |
245 |
< |
* contention with others is extremely unlikely (since if other |
246 |
< |
* threads that could release it are operating, then the current |
247 |
< |
* thread wouldn't be blocking). |
230 |
> |
* With such a small slack threshold value, it is not worthwhile |
231 |
> |
* to augment this with path short-circuiting (i.e., unsplicing |
232 |
> |
* interior nodes) except in the case of cancellation/removal (see |
233 |
> |
* below). |
234 |
|
* |
235 |
|
* We allow both the head and tail fields to be null before any |
236 |
|
* nodes are enqueued; initializing upon first append. This |
238 |
|
* efficient explicit control paths instead of letting JVMs insert |
239 |
|
* implicit NullPointerExceptions when they are null. While not |
240 |
|
* currently fully implemented, we also leave open the possibility |
241 |
< |
* of re-nulling these fields when empty (which is is complicated |
242 |
< |
* to arrange, for little benefit.) |
241 |
> |
* of re-nulling these fields when empty (which is complicated to |
242 |
> |
* arrange, for little benefit.) |
243 |
|
* |
244 |
|
* All enqueue/dequeue operations are handled by the single method |
245 |
|
* "xfer" with parameters indicating whether to act as some form |
246 |
|
* of offer, put, poll, take, or transfer (each possibly with |
247 |
|
* timeout). The relative complexity of using one monolithic |
248 |
|
* method outweighs the code bulk and maintenance problems of |
249 |
< |
* using nine separate methods. |
249 |
> |
* using separate methods for each case. |
250 |
|
* |
251 |
|
* Operation consists of up to three phases. The first is |
252 |
|
* implemented within method xfer, the second in tryAppend, and |
271 |
|
* |
272 |
|
* 2. Try to append a new node (method tryAppend) |
273 |
|
* |
274 |
< |
* Starting at current tail pointer, try to append a new node |
275 |
< |
* to the list (or if head was null, establish the first |
276 |
< |
* node). Nodes can be appended only if their predecessors are |
277 |
< |
* either already matched or are of the same mode. If we detect |
278 |
< |
* otherwise, then a new node with opposite mode must have been |
279 |
< |
* appended during traversal, so must restart at phase 1. The |
280 |
< |
* traversal and update steps are otherwise similar to phase 1: |
281 |
< |
* Retrying upon CAS misses and checking for staleness. In |
282 |
< |
* particular, if a self-link is encountered, then we can |
283 |
< |
* safely jump to a node on the list by continuing the |
284 |
< |
* traversal at current head. |
274 |
> |
* Starting at current tail pointer, find the actual last node |
275 |
> |
* and try to append a new node (or if head was null, establish |
276 |
> |
* the first node). Nodes can be appended only if their |
277 |
> |
* predecessors are either already matched or are of the same |
278 |
> |
* mode. If we detect otherwise, then a new node with opposite |
279 |
> |
* mode must have been appended during traversal, so we must |
280 |
> |
* restart at phase 1. The traversal and update steps are |
281 |
> |
* otherwise similar to phase 1: Retrying upon CAS misses and |
282 |
> |
* checking for staleness. In particular, if a self-link is |
283 |
> |
* encountered, then we can safely jump to a node on the list |
284 |
> |
* by continuing the traversal at current head. |
285 |
|
* |
286 |
|
* On successful append, if the call was ASYNC, return. |
287 |
|
* |
288 |
|
* 3. Await match or cancellation (method awaitMatch) |
289 |
|
* |
290 |
|
* Wait for another thread to match node; instead cancelling if |
291 |
< |
* current thread was interrupted or the wait timed out. On |
291 |
> |
* the current thread was interrupted or the wait timed out. On |
292 |
|
* multiprocessors, we use front-of-queue spinning: If a node |
293 |
|
* appears to be the first unmatched node in the queue, it |
294 |
|
* spins a bit before blocking. In either case, before blocking |
301 |
|
* of less-contended queues. During spins threads check their |
302 |
|
* interrupt status and generate a thread-local random number |
303 |
|
* to decide to occasionally perform a Thread.yield. While |
304 |
< |
* yield has underdefined specs, we assume that might it help, |
305 |
< |
* and will not hurt in limiting impact of spinning on busy |
306 |
< |
* systems. We also use much smaller (1/4) spins for nodes |
307 |
< |
* that are not known to be front but whose predecessors have |
308 |
< |
* not blocked -- these "chained" spins avoid artifacts of |
304 |
> |
* yield has underdefined specs, we assume that it might help, |
305 |
> |
* and will not hurt, in limiting impact of spinning on busy |
306 |
> |
* systems. We also use smaller (1/2) spins for nodes that are |
307 |
> |
* not known to be front but whose predecessors have not |
308 |
> |
* blocked -- these "chained" spins avoid artifacts of |
309 |
|
* front-of-queue rules which otherwise lead to alternating |
310 |
|
* nodes spinning vs blocking. Further, front threads that |
311 |
|
* represent phase changes (from data to request node or vice |
312 |
|
* versa) compared to their predecessors receive additional |
313 |
< |
* spins, reflecting the longer code path lengths necessary to |
314 |
< |
* release them under contention. |
313 |
> |
* chained spins, reflecting longer paths typically required to |
314 |
> |
* unblock threads during phase changes. |
315 |
> |
* |
316 |
> |
* |
317 |
> |
* ** Unlinking removed interior nodes ** |
318 |
> |
* |
319 |
> |
* In addition to minimizing garbage retention via self-linking |
320 |
> |
* described above, we also unlink removed interior nodes. These |
321 |
> |
* may arise due to timed out or interrupted waits, or calls to |
322 |
> |
* remove(x) or Iterator.remove. Normally, given a node that was |
323 |
> |
* at one time known to be the predecessor of some node s that is |
324 |
> |
* to be removed, we can unsplice s by CASing the next field of |
325 |
> |
* its predecessor if it still points to s (otherwise s must |
326 |
> |
* already have been removed or is now offlist). But there are two |
327 |
> |
* situations in which we cannot guarantee to make node s |
328 |
> |
* unreachable in this way: (1) If s is the trailing node of list |
329 |
> |
* (i.e., with null next), then it is pinned as the target node |
330 |
> |
* for appends, so can only be removed later after other nodes are |
331 |
> |
* appended. (2) We cannot necessarily unlink s given a |
332 |
> |
* predecessor node that is matched (including the case of being |
333 |
> |
* cancelled): the predecessor may already be unspliced, in which |
334 |
> |
* case some previous reachable node may still point to s. |
335 |
> |
* (For further explanation see Herlihy & Shavit "The Art of |
336 |
> |
* Multiprocessor Programming" chapter 9). Although, in both |
337 |
> |
* cases, we can rule out the need for further action if either s |
338 |
> |
* or its predecessor are (or can be made to be) at, or fall off |
339 |
> |
* from, the head of list. |
340 |
> |
* |
341 |
> |
* Without taking these into account, it would be possible for an |
342 |
> |
* unbounded number of supposedly removed nodes to remain |
343 |
> |
* reachable. Situations leading to such buildup are uncommon but |
344 |
> |
* can occur in practice; for example when a series of short timed |
345 |
> |
* calls to poll repeatedly time out but never otherwise fall off |
346 |
> |
* the list because of an untimed call to take at the front of the |
347 |
> |
* queue. |
348 |
> |
* |
349 |
> |
* When these cases arise, rather than always retraversing the |
350 |
> |
* entire list to find an actual predecessor to unlink (which |
351 |
> |
* won't help for case (1) anyway), we record a conservative |
352 |
> |
* estimate of possible unsplice failures (in "sweepVotes"). |
353 |
> |
* We trigger a full sweep when the estimate exceeds a threshold |
354 |
> |
* ("SWEEP_THRESHOLD") indicating the maximum number of estimated |
355 |
> |
* removal failures to tolerate before sweeping through, unlinking |
356 |
> |
* cancelled nodes that were not unlinked upon initial removal. |
357 |
> |
* We perform sweeps by the thread hitting threshold (rather than |
358 |
> |
* background threads or by spreading work to other threads) |
359 |
> |
* because in the main contexts in which removal occurs, the |
360 |
> |
* caller is already timed-out, cancelled, or performing a |
361 |
> |
* potentially O(n) operation (e.g. remove(x)), none of which are |
362 |
> |
* time-critical enough to warrant the overhead that alternatives |
363 |
> |
* would impose on other threads. |
364 |
> |
* |
365 |
> |
* Because the sweepVotes estimate is conservative, and because |
366 |
> |
* nodes become unlinked "naturally" as they fall off the head of |
367 |
> |
* the queue, and because we allow votes to accumulate even while |
368 |
> |
* sweeps are in progress, there are typically significantly fewer |
369 |
> |
* such nodes than estimated. Choice of a threshold value |
370 |
> |
* balances the likelihood of wasted effort and contention, versus |
371 |
> |
* providing a worst-case bound on retention of interior nodes in |
372 |
> |
* quiescent queues. The value defined below was chosen |
373 |
> |
* empirically to balance these under various timeout scenarios. |
374 |
> |
* |
375 |
> |
* Note that we cannot self-link unlinked interior nodes during |
376 |
> |
* sweeps. However, the associated garbage chains terminate when |
377 |
> |
* some successor ultimately falls off the head of the list and is |
378 |
> |
* self-linked. |
379 |
|
*/ |
380 |
|
|
381 |
|
/** True if on multiprocessor */ |
383 |
|
Runtime.getRuntime().availableProcessors() > 1; |
384 |
|
|
385 |
|
/** |
386 |
< |
* The number of times to spin (with on average one randomly |
387 |
< |
* interspersed call to Thread.yield) on multiprocessor before |
388 |
< |
* blocking when a node is apparently the first waiter in the |
389 |
< |
* queue. See above for explanation. Must be a power of two. The |
390 |
< |
* value is empirically derived -- it works pretty well across a |
391 |
< |
* variety of processors, numbers of CPUs, and OSes. |
386 |
> |
* The number of times to spin (with randomly interspersed calls |
387 |
> |
* to Thread.yield) on multiprocessor before blocking when a node |
388 |
> |
* is apparently the first waiter in the queue. See above for |
389 |
> |
* explanation. Must be a power of two. The value is empirically |
390 |
> |
* derived -- it works pretty well across a variety of processors, |
391 |
> |
* numbers of CPUs, and OSes. |
392 |
|
*/ |
393 |
|
private static final int FRONT_SPINS = 1 << 7; |
394 |
|
|
395 |
|
/** |
396 |
|
* The number of times to spin before blocking when a node is |
397 |
< |
* preceded by another node that is apparently spinning. |
397 |
> |
* preceded by another node that is apparently spinning. Also |
398 |
> |
* serves as an increment to FRONT_SPINS on phase changes, and as |
399 |
> |
* base average frequency for yielding during spins. Must be a |
400 |
> |
* power of two. |
401 |
|
*/ |
402 |
< |
private static final int CHAINED_SPINS = FRONT_SPINS >>> 2; |
402 |
> |
private static final int CHAINED_SPINS = FRONT_SPINS >>> 1; |
403 |
> |
|
404 |
> |
/** |
405 |
> |
* The maximum number of estimated removal failures (sweepVotes) |
406 |
> |
* to tolerate before sweeping through the queue unlinking |
407 |
> |
* cancelled nodes that were not unlinked upon initial |
408 |
> |
* removal. See above for explanation. The value must be at least |
409 |
> |
* two to avoid useless sweeps when removing trailing nodes. |
410 |
> |
*/ |
411 |
> |
static final int SWEEP_THRESHOLD = 32; |
412 |
|
|
413 |
|
/** |
414 |
|
* Queue nodes. Uses Object, not E, for items to allow forgetting |
415 |
|
* them after use. Relies heavily on Unsafe mechanics to minimize |
416 |
< |
* unnecessary ordering constraints: Writes that intrinsically |
417 |
< |
* precede or follow CASes use simple relaxed forms. Other |
356 |
< |
* cleanups use releasing/lazy writes. |
416 |
> |
* unnecessary ordering constraints: Writes that are intrinsically |
417 |
> |
* ordered wrt other accesses or CASes use simple relaxed forms. |
418 |
|
*/ |
419 |
|
static final class Node { |
420 |
|
final boolean isData; // false if this is a request node |
428 |
|
} |
429 |
|
|
430 |
|
final boolean casItem(Object cmp, Object val) { |
431 |
+ |
// assert cmp == null || cmp.getClass() != Node.class; |
432 |
|
return UNSAFE.compareAndSwapObject(this, itemOffset, cmp, val); |
433 |
|
} |
434 |
|
|
435 |
|
/** |
436 |
< |
* Creates a new node. Uses relaxed write because item can only |
437 |
< |
* be seen if followed by CAS. |
436 |
> |
* Constructs a new node. Uses relaxed write because item can |
437 |
> |
* only be seen after publication via casNext. |
438 |
|
*/ |
439 |
|
Node(Object item, boolean isData) { |
440 |
|
UNSAFE.putObject(this, itemOffset, item); // relaxed write |
450 |
|
} |
451 |
|
|
452 |
|
/** |
453 |
< |
* Sets item to self (using a releasing/lazy write) and waiter |
454 |
< |
* to null, to avoid garbage retention after extracting or |
455 |
< |
* cancelling. |
453 |
> |
* Sets item to self and waiter to null, to avoid garbage |
454 |
> |
* retention after matching or cancelling. Uses relaxed writes |
455 |
> |
* because order is already constrained in the only calling |
456 |
> |
* contexts: item is forgotten only after volatile/atomic |
457 |
> |
* mechanics that extract items. Similarly, clearing waiter |
458 |
> |
* follows either CAS or return from park (if ever parked; |
459 |
> |
* else we don't care). |
460 |
|
*/ |
461 |
|
final void forgetContents() { |
462 |
< |
UNSAFE.putOrderedObject(this, itemOffset, this); |
463 |
< |
UNSAFE.putOrderedObject(this, waiterOffset, null); |
462 |
> |
UNSAFE.putObject(this, itemOffset, this); |
463 |
> |
UNSAFE.putObject(this, waiterOffset, null); |
464 |
|
} |
465 |
|
|
466 |
|
/** |
469 |
|
*/ |
470 |
|
final boolean isMatched() { |
471 |
|
Object x = item; |
472 |
< |
return x == this || (x != null) != isData; |
472 |
> |
return (x == this) || ((x == null) == isData); |
473 |
> |
} |
474 |
> |
|
475 |
> |
/** |
476 |
> |
* Returns true if this is an unmatched request node. |
477 |
> |
*/ |
478 |
> |
final boolean isUnmatchedRequest() { |
479 |
> |
return !isData && item == null; |
480 |
|
} |
481 |
|
|
482 |
|
/** |
494 |
|
* Tries to artificially match a data node -- used by remove. |
495 |
|
*/ |
496 |
|
final boolean tryMatchData() { |
497 |
+ |
// assert isData; |
498 |
|
Object x = item; |
499 |
|
if (x != null && x != this && casItem(x, null)) { |
500 |
|
LockSupport.unpark(waiter); |
503 |
|
return false; |
504 |
|
} |
505 |
|
|
432 |
– |
// Unsafe mechanics |
433 |
– |
private static final sun.misc.Unsafe UNSAFE = getUnsafe(); |
434 |
– |
private static final long nextOffset = |
435 |
– |
objectFieldOffset(UNSAFE, "next", Node.class); |
436 |
– |
private static final long itemOffset = |
437 |
– |
objectFieldOffset(UNSAFE, "item", Node.class); |
438 |
– |
private static final long waiterOffset = |
439 |
– |
objectFieldOffset(UNSAFE, "waiter", Node.class); |
440 |
– |
|
506 |
|
private static final long serialVersionUID = -3375979862319811754L; |
507 |
+ |
|
508 |
+ |
// Unsafe mechanics |
509 |
+ |
private static final sun.misc.Unsafe UNSAFE; |
510 |
+ |
private static final long itemOffset; |
511 |
+ |
private static final long nextOffset; |
512 |
+ |
private static final long waiterOffset; |
513 |
+ |
static { |
514 |
+ |
try { |
515 |
+ |
UNSAFE = getUnsafe(); |
516 |
+ |
Class<?> k = Node.class; |
517 |
+ |
itemOffset = UNSAFE.objectFieldOffset |
518 |
+ |
(k.getDeclaredField("item")); |
519 |
+ |
nextOffset = UNSAFE.objectFieldOffset |
520 |
+ |
(k.getDeclaredField("next")); |
521 |
+ |
waiterOffset = UNSAFE.objectFieldOffset |
522 |
+ |
(k.getDeclaredField("waiter")); |
523 |
+ |
} catch (Exception e) { |
524 |
+ |
throw new Error(e); |
525 |
+ |
} |
526 |
+ |
} |
527 |
|
} |
528 |
|
|
529 |
|
/** head of the queue; null until first enqueue */ |
530 |
< |
private transient volatile Node head; |
446 |
< |
|
447 |
< |
/** predecessor of dangling unspliceable node */ |
448 |
< |
private transient volatile Node cleanMe; // decl here to reduce contention |
530 |
> |
transient volatile Node head; |
531 |
|
|
532 |
|
/** tail of the queue; null until first append */ |
533 |
|
private transient volatile Node tail; |
534 |
|
|
535 |
+ |
/** The number of apparent failures to unsplice removed nodes */ |
536 |
+ |
private transient volatile int sweepVotes; |
537 |
+ |
|
538 |
|
// CAS methods for fields |
539 |
|
private boolean casTail(Node cmp, Node val) { |
540 |
|
return UNSAFE.compareAndSwapObject(this, tailOffset, cmp, val); |
544 |
|
return UNSAFE.compareAndSwapObject(this, headOffset, cmp, val); |
545 |
|
} |
546 |
|
|
547 |
< |
private boolean casCleanMe(Node cmp, Node val) { |
548 |
< |
return UNSAFE.compareAndSwapObject(this, cleanMeOffset, cmp, val); |
547 |
> |
private boolean casSweepVotes(int cmp, int val) { |
548 |
> |
return UNSAFE.compareAndSwapInt(this, sweepVotesOffset, cmp, val); |
549 |
|
} |
550 |
|
|
551 |
|
/* |
552 |
< |
* Possible values for "how" argument in xfer method. Beware that |
468 |
< |
* the order of assigned numerical values matters. |
552 |
> |
* Possible values for "how" argument in xfer method. |
553 |
|
*/ |
554 |
< |
private static final int NOW = 0; // for untimed poll, tryTransfer |
555 |
< |
private static final int ASYNC = 1; // for offer, put, add |
556 |
< |
private static final int SYNC = 2; // for transfer, take |
557 |
< |
private static final int TIMEOUT = 3; // for timed poll, tryTransfer |
554 |
> |
private static final int NOW = 0; // for untimed poll, tryTransfer |
555 |
> |
private static final int ASYNC = 1; // for offer, put, add |
556 |
> |
private static final int SYNC = 2; // for transfer, take |
557 |
> |
private static final int TIMED = 3; // for timed poll, tryTransfer |
558 |
> |
|
559 |
> |
@SuppressWarnings("unchecked") |
560 |
> |
static <E> E cast(Object item) { |
561 |
> |
// assert item == null || item.getClass() != Node.class; |
562 |
> |
return (E) item; |
563 |
> |
} |
564 |
|
|
565 |
|
/** |
566 |
|
* Implements all queuing methods. See above for explanation. |
567 |
|
* |
568 |
|
* @param e the item or null for take |
569 |
|
* @param haveData true if this is a put, else a take |
570 |
< |
* @param how NOW, ASYNC, SYNC, or TIMEOUT |
571 |
< |
* @param nanos timeout in nanosecs, used only if mode is TIMEOUT |
570 |
> |
* @param how NOW, ASYNC, SYNC, or TIMED |
571 |
> |
* @param nanos timeout in nanosecs, used only if mode is TIMED |
572 |
|
* @return an item if matched, else e |
573 |
|
* @throws NullPointerException if haveData mode but e is null |
574 |
|
*/ |
575 |
< |
private Object xfer(Object e, boolean haveData, int how, long nanos) { |
575 |
> |
private E xfer(E e, boolean haveData, int how, long nanos) { |
576 |
|
if (haveData && (e == null)) |
577 |
|
throw new NullPointerException(); |
578 |
|
Node s = null; // the node to append, if needed |
579 |
|
|
580 |
< |
retry: for (;;) { // restart on append race |
580 |
> |
retry: |
581 |
> |
for (;;) { // restart on append race |
582 |
|
|
583 |
|
for (Node h = head, p = h; p != null;) { // find & match first node |
584 |
|
boolean isData = p.isData; |
587 |
|
if (isData == haveData) // can't match |
588 |
|
break; |
589 |
|
if (p.casItem(item, e)) { // match |
590 |
< |
Thread w = p.waiter; |
591 |
< |
while (p != h) { // update head |
592 |
< |
Node n = p.next; // by 2 unless singleton |
502 |
< |
if (n != null) |
503 |
< |
p = n; |
504 |
< |
if (head == h && casHead(h, p)) { |
590 |
> |
for (Node q = p; q != h;) { |
591 |
> |
Node n = q.next; // update by 2 unless singleton |
592 |
> |
if (head == h && casHead(h, n == null ? q : n)) { |
593 |
|
h.forgetNext(); |
594 |
|
break; |
595 |
|
} // advance and retry |
596 |
|
if ((h = head) == null || |
597 |
< |
(p = h.next) == null || !p.isMatched()) |
597 |
> |
(q = h.next) == null || !q.isMatched()) |
598 |
|
break; // unless slack < 2 |
599 |
|
} |
600 |
< |
LockSupport.unpark(w); |
601 |
< |
return item; |
600 |
> |
LockSupport.unpark(p.waiter); |
601 |
> |
return LinkedTransferQueue.<E>cast(item); |
602 |
|
} |
603 |
|
} |
604 |
|
Node n = p.next; |
605 |
|
p = (p != n) ? n : (h = head); // Use head if p offlist |
606 |
|
} |
607 |
|
|
608 |
< |
if (how >= ASYNC) { // No matches available |
608 |
> |
if (how != NOW) { // No matches available |
609 |
|
if (s == null) |
610 |
|
s = new Node(e, haveData); |
611 |
|
Node pred = tryAppend(s, haveData); |
612 |
|
if (pred == null) |
613 |
|
continue retry; // lost race vs opposite mode |
614 |
< |
if (how >= SYNC) |
615 |
< |
return awaitMatch(pred, s, e, how, nanos); |
614 |
> |
if (how != ASYNC) |
615 |
> |
return awaitMatch(s, pred, e, (how == TIMED), nanos); |
616 |
|
} |
617 |
|
return e; // not waiting |
618 |
|
} |
628 |
|
* predecessor |
629 |
|
*/ |
630 |
|
private Node tryAppend(Node s, boolean haveData) { |
631 |
< |
for (Node t = tail, p = t;;) { // move p to last node and append |
631 |
> |
for (Node t = tail, p = t;;) { // move p to last node and append |
632 |
|
Node n, u; // temps for reads of next & tail |
633 |
|
if (p == null && (p = head) == null) { |
634 |
|
if (casHead(null, s)) |
656 |
|
/** |
657 |
|
* Spins/yields/blocks until node s is matched or caller gives up. |
658 |
|
* |
571 |
– |
* @param pred the predecessor of s, or s or null if none |
659 |
|
* @param s the waiting node |
660 |
+ |
* @param pred the predecessor of s, or s itself if it has no |
661 |
+ |
* predecessor, or null if unknown (the null case does not occur |
662 |
+ |
* in any current calls but may in possible future extensions) |
663 |
|
* @param e the comparison value for checking match |
664 |
< |
* @param how either SYNC or TIMEOUT |
665 |
< |
* @param nanos timeout value |
664 |
> |
* @param timed if true, wait only until timeout elapses |
665 |
> |
* @param nanos timeout in nanosecs, used only if timed is true |
666 |
|
* @return matched item, or e if unmatched on interrupt or timeout |
667 |
|
*/ |
668 |
< |
private Object awaitMatch(Node pred, Node s, Object e, |
669 |
< |
int how, long nanos) { |
580 |
< |
long lastTime = (how == TIMEOUT) ? System.nanoTime() : 0L; |
668 |
> |
private E awaitMatch(Node s, Node pred, E e, boolean timed, long nanos) { |
669 |
> |
long lastTime = timed ? System.nanoTime() : 0L; |
670 |
|
Thread w = Thread.currentThread(); |
671 |
|
int spins = -1; // initialized after first item and cancel checks |
672 |
|
ThreadLocalRandom randomYields = null; // bound if needed |
674 |
|
for (;;) { |
675 |
|
Object item = s.item; |
676 |
|
if (item != e) { // matched |
677 |
+ |
// assert item != s; |
678 |
|
s.forgetContents(); // avoid garbage |
679 |
< |
return item; |
679 |
> |
return LinkedTransferQueue.<E>cast(item); |
680 |
|
} |
681 |
< |
if ((w.isInterrupted() || (how == TIMEOUT && nanos <= 0)) && |
682 |
< |
s.casItem(e, s)) { // cancel |
681 |
> |
if ((w.isInterrupted() || (timed && nanos <= 0)) && |
682 |
> |
s.casItem(e, s)) { // cancel |
683 |
|
unsplice(pred, s); |
684 |
|
return e; |
685 |
|
} |
688 |
|
if ((spins = spinsFor(pred, s.isData)) > 0) |
689 |
|
randomYields = ThreadLocalRandom.current(); |
690 |
|
} |
691 |
< |
else if (spins > 0) { // spin, occasionally yield |
602 |
< |
if (randomYields.nextInt(FRONT_SPINS) == 0) |
603 |
< |
Thread.yield(); |
691 |
> |
else if (spins > 0) { // spin |
692 |
|
--spins; |
693 |
+ |
if (randomYields.nextInt(CHAINED_SPINS) == 0) |
694 |
+ |
Thread.yield(); // occasionally yield |
695 |
|
} |
696 |
|
else if (s.waiter == null) { |
697 |
< |
shortenHeadPath(); // reduce slack before blocking |
608 |
< |
s.waiter = w; // request unpark |
697 |
> |
s.waiter = w; // request unpark then recheck |
698 |
|
} |
699 |
< |
else if (how == TIMEOUT) { |
699 |
> |
else if (timed) { |
700 |
|
long now = System.nanoTime(); |
701 |
|
if ((nanos -= now - lastTime) > 0) |
702 |
|
LockSupport.parkNanos(this, nanos); |
704 |
|
} |
705 |
|
else { |
706 |
|
LockSupport.park(this); |
618 |
– |
spins = -1; // spin if front upon wakeup |
707 |
|
} |
708 |
|
} |
709 |
|
} |
714 |
|
*/ |
715 |
|
private static int spinsFor(Node pred, boolean haveData) { |
716 |
|
if (MP && pred != null) { |
717 |
< |
boolean predData = pred.isData; |
718 |
< |
if (predData != haveData) // front and phase change |
719 |
< |
return FRONT_SPINS + (FRONT_SPINS >>> 1); |
632 |
< |
if (predData != (pred.item != null)) // probably at front |
717 |
> |
if (pred.isData != haveData) // phase change |
718 |
> |
return FRONT_SPINS + CHAINED_SPINS; |
719 |
> |
if (pred.isMatched()) // probably at front |
720 |
|
return FRONT_SPINS; |
721 |
|
if (pred.waiter == null) // pred apparently spinning |
722 |
|
return CHAINED_SPINS; |
724 |
|
return 0; |
725 |
|
} |
726 |
|
|
727 |
+ |
/* -------------- Traversal methods -------------- */ |
728 |
+ |
|
729 |
|
/** |
730 |
< |
* Tries (once) to unsplice nodes between head and first unmatched |
731 |
< |
* or trailing node; failing on contention. |
732 |
< |
*/ |
733 |
< |
private void shortenHeadPath() { |
734 |
< |
Node h, hn, p, q; |
735 |
< |
if ((p = h = head) != null && h.isMatched() && |
736 |
< |
(q = hn = h.next) != null) { |
648 |
< |
Node n; |
649 |
< |
while ((n = q.next) != q) { |
650 |
< |
if (n == null || !q.isMatched()) { |
651 |
< |
if (hn != q && h.next == hn) |
652 |
< |
h.casNext(hn, q); |
653 |
< |
break; |
654 |
< |
} |
655 |
< |
p = q; |
656 |
< |
q = n; |
657 |
< |
} |
658 |
< |
} |
730 |
> |
* Returns the successor of p, or the head node if p.next has been |
731 |
> |
* linked to self, which will only be true if traversing with a |
732 |
> |
* stale pointer that is now off the list. |
733 |
> |
*/ |
734 |
> |
final Node succ(Node p) { |
735 |
> |
Node next = p.next; |
736 |
> |
return (p == next) ? head : next; |
737 |
|
} |
738 |
|
|
661 |
– |
/* -------------- Traversal methods -------------- */ |
662 |
– |
|
739 |
|
/** |
740 |
|
* Returns the first unmatched node of the given mode, or null if |
741 |
|
* none. Used by methods isEmpty, hasWaitingConsumer. |
742 |
|
*/ |
743 |
< |
private Node firstOfMode(boolean data) { |
744 |
< |
for (Node p = head; p != null; ) { |
743 |
> |
private Node firstOfMode(boolean isData) { |
744 |
> |
for (Node p = head; p != null; p = succ(p)) { |
745 |
|
if (!p.isMatched()) |
746 |
< |
return (p.isData == data) ? p : null; |
671 |
< |
Node n = p.next; |
672 |
< |
p = (n != p) ? n : head; |
746 |
> |
return (p.isData == isData) ? p : null; |
747 |
|
} |
748 |
|
return null; |
749 |
|
} |
750 |
|
|
751 |
|
/** |
752 |
|
* Returns the item in the first unmatched node with isData; or |
753 |
< |
* null if none. Used by peek. |
753 |
> |
* null if none. Used by peek. |
754 |
|
*/ |
755 |
< |
private Object firstDataItem() { |
756 |
< |
for (Node p = head; p != null; ) { |
683 |
< |
boolean isData = p.isData; |
755 |
> |
private E firstDataItem() { |
756 |
> |
for (Node p = head; p != null; p = succ(p)) { |
757 |
|
Object item = p.item; |
758 |
< |
if (item != p && (item != null) == isData) |
759 |
< |
return isData ? item : null; |
760 |
< |
Node n = p.next; |
761 |
< |
p = (n != p) ? n : head; |
758 |
> |
if (p.isData) { |
759 |
> |
if (item != null && item != p) |
760 |
> |
return LinkedTransferQueue.<E>cast(item); |
761 |
> |
} |
762 |
> |
else if (item == null) |
763 |
> |
return null; |
764 |
|
} |
765 |
|
return null; |
766 |
|
} |
791 |
|
|
792 |
|
final class Itr implements Iterator<E> { |
793 |
|
private Node nextNode; // next node to return item for |
794 |
< |
private Object nextItem; // the corresponding item |
794 |
> |
private E nextItem; // the corresponding item |
795 |
|
private Node lastRet; // last returned node, to support remove |
796 |
+ |
private Node lastPred; // predecessor to unlink lastRet |
797 |
|
|
798 |
|
/** |
799 |
|
* Moves to next node after prev, or first node if prev null. |
800 |
|
*/ |
801 |
|
private void advance(Node prev) { |
802 |
< |
lastRet = prev; |
803 |
< |
Node p; |
804 |
< |
if (prev == null || (p = prev.next) == prev) |
805 |
< |
p = head; |
806 |
< |
while (p != null) { |
807 |
< |
Object item = p.item; |
808 |
< |
if (p.isData) { |
809 |
< |
if (item != null && item != p) { |
810 |
< |
nextItem = item; |
811 |
< |
nextNode = p; |
802 |
> |
/* |
803 |
> |
* To track and avoid buildup of deleted nodes in the face |
804 |
> |
* of calls to both Queue.remove and Itr.remove, we must |
805 |
> |
* include variants of unsplice and sweep upon each |
806 |
> |
* advance: Upon Itr.remove, we may need to catch up links |
807 |
> |
* from lastPred, and upon other removes, we might need to |
808 |
> |
* skip ahead from stale nodes and unsplice deleted ones |
809 |
> |
* found while advancing. |
810 |
> |
*/ |
811 |
> |
|
812 |
> |
Node r, b; // reset lastPred upon possible deletion of lastRet |
813 |
> |
if ((r = lastRet) != null && !r.isMatched()) |
814 |
> |
lastPred = r; // next lastPred is old lastRet |
815 |
> |
else if ((b = lastPred) == null || b.isMatched()) |
816 |
> |
lastPred = null; // at start of list |
817 |
> |
else { |
818 |
> |
Node s, n; // help with removal of lastPred.next |
819 |
> |
while ((s = b.next) != null && |
820 |
> |
s != b && s.isMatched() && |
821 |
> |
(n = s.next) != null && n != s) |
822 |
> |
b.casNext(s, n); |
823 |
> |
} |
824 |
> |
|
825 |
> |
this.lastRet = prev; |
826 |
> |
|
827 |
> |
for (Node p = prev, s, n;;) { |
828 |
> |
s = (p == null) ? head : p.next; |
829 |
> |
if (s == null) |
830 |
> |
break; |
831 |
> |
else if (s == p) { |
832 |
> |
p = null; |
833 |
> |
continue; |
834 |
> |
} |
835 |
> |
Object item = s.item; |
836 |
> |
if (s.isData) { |
837 |
> |
if (item != null && item != s) { |
838 |
> |
nextItem = LinkedTransferQueue.<E>cast(item); |
839 |
> |
nextNode = s; |
840 |
|
return; |
841 |
|
} |
842 |
|
} |
843 |
|
else if (item == null) |
844 |
|
break; |
845 |
< |
Node n = p.next; |
846 |
< |
p = (n != p) ? n : head; |
845 |
> |
// assert s.isMatched(); |
846 |
> |
if (p == null) |
847 |
> |
p = s; |
848 |
> |
else if ((n = s.next) == null) |
849 |
> |
break; |
850 |
> |
else if (s == n) |
851 |
> |
p = null; |
852 |
> |
else |
853 |
> |
p.casNext(s, n); |
854 |
|
} |
855 |
|
nextNode = null; |
856 |
+ |
nextItem = null; |
857 |
|
} |
858 |
|
|
859 |
|
Itr() { |
867 |
|
public final E next() { |
868 |
|
Node p = nextNode; |
869 |
|
if (p == null) throw new NoSuchElementException(); |
870 |
< |
Object e = nextItem; |
870 |
> |
E e = nextItem; |
871 |
|
advance(p); |
872 |
< |
return (E) e; |
872 |
> |
return e; |
873 |
|
} |
874 |
|
|
875 |
|
public final void remove() { |
876 |
< |
Node p = lastRet; |
877 |
< |
if (p == null) throw new IllegalStateException(); |
878 |
< |
lastRet = null; |
879 |
< |
findAndRemoveNode(p); |
876 |
> |
final Node lastRet = this.lastRet; |
877 |
> |
if (lastRet == null) |
878 |
> |
throw new IllegalStateException(); |
879 |
> |
this.lastRet = null; |
880 |
> |
if (lastRet.tryMatchData()) |
881 |
> |
unsplice(lastPred, lastRet); |
882 |
|
} |
883 |
|
} |
884 |
|
|
888 |
|
* Unsplices (now or later) the given deleted/cancelled node with |
889 |
|
* the given predecessor. |
890 |
|
* |
891 |
< |
* @param pred predecessor of node to be unspliced |
891 |
> |
* @param pred a node that was at one time known to be the |
892 |
> |
* predecessor of s, or null or s itself if s is/was at head |
893 |
|
* @param s the node to be unspliced |
894 |
|
*/ |
895 |
< |
private void unsplice(Node pred, Node s) { |
896 |
< |
s.forgetContents(); // clear unneeded fields |
895 |
> |
final void unsplice(Node pred, Node s) { |
896 |
> |
s.forgetContents(); // forget unneeded fields |
897 |
|
/* |
898 |
< |
* At any given time, exactly one node on list cannot be |
899 |
< |
* unlinked -- the last inserted node. To accommodate this, if |
900 |
< |
* we cannot unlink s, we save its predecessor as "cleanMe", |
901 |
< |
* processing the previously saved version first. Because only |
902 |
< |
* one node in the list can have a null next, at least one of |
788 |
< |
* node s or the node previously saved can always be |
789 |
< |
* processed, so this always terminates. |
898 |
> |
* See above for rationale. Briefly: if pred still points to |
899 |
> |
* s, try to unlink s. If s cannot be unlinked, because it is |
900 |
> |
* trailing node or pred might be unlinked, and neither pred |
901 |
> |
* nor s are head or offlist, add to sweepVotes, and if enough |
902 |
> |
* votes have accumulated, sweep. |
903 |
|
*/ |
904 |
< |
if (pred != null && pred != s) { |
905 |
< |
while (pred.next == s) { |
906 |
< |
Node oldpred = (cleanMe == null) ? null : reclean(); |
907 |
< |
Node n = s.next; |
908 |
< |
if (n != null) { |
909 |
< |
if (n != s) |
910 |
< |
pred.casNext(s, n); |
911 |
< |
break; |
904 |
> |
if (pred != null && pred != s && pred.next == s) { |
905 |
> |
Node n = s.next; |
906 |
> |
if (n == null || |
907 |
> |
(n != s && pred.casNext(s, n) && pred.isMatched())) { |
908 |
> |
for (;;) { // check if at, or could be, head |
909 |
> |
Node h = head; |
910 |
> |
if (h == pred || h == s || h == null) |
911 |
> |
return; // at head or list empty |
912 |
> |
if (!h.isMatched()) |
913 |
> |
break; |
914 |
> |
Node hn = h.next; |
915 |
> |
if (hn == null) |
916 |
> |
return; // now empty |
917 |
> |
if (hn != h && casHead(h, hn)) |
918 |
> |
h.forgetNext(); // advance head |
919 |
> |
} |
920 |
> |
if (pred.next != pred && s.next != s) { // recheck if offlist |
921 |
> |
for (;;) { // sweep now if enough votes |
922 |
> |
int v = sweepVotes; |
923 |
> |
if (v < SWEEP_THRESHOLD) { |
924 |
> |
if (casSweepVotes(v, v + 1)) |
925 |
> |
break; |
926 |
> |
} |
927 |
> |
else if (casSweepVotes(v, 0)) { |
928 |
> |
sweep(); |
929 |
> |
break; |
930 |
> |
} |
931 |
> |
} |
932 |
|
} |
800 |
– |
if (oldpred == pred || // Already saved |
801 |
– |
(oldpred == null && casCleanMe(null, pred))) |
802 |
– |
break; // Postpone cleaning |
933 |
|
} |
934 |
|
} |
935 |
|
} |
936 |
|
|
937 |
|
/** |
938 |
< |
* Tries to unsplice the deleted/cancelled node held in cleanMe |
939 |
< |
* that was previously uncleanable because it was at tail. |
810 |
< |
* |
811 |
< |
* @return current cleanMe node (or null) |
938 |
> |
* Unlinks matched (typically cancelled) nodes encountered in a |
939 |
> |
* traversal from head. |
940 |
|
*/ |
941 |
< |
private Node reclean() { |
942 |
< |
/* |
943 |
< |
* cleanMe is, or at one time was, predecessor of a cancelled |
944 |
< |
* node s that was the tail so could not be unspliced. If it |
945 |
< |
* is no longer the tail, try to unsplice if necessary and |
946 |
< |
* make cleanMe slot available. This differs from similar |
819 |
< |
* code in unsplice() because we must check that pred still |
820 |
< |
* points to a matched node that can be unspliced -- if not, |
821 |
< |
* we can (must) clear cleanMe without unsplicing. This can |
822 |
< |
* loop only due to contention. |
823 |
< |
*/ |
824 |
< |
Node pred; |
825 |
< |
while ((pred = cleanMe) != null) { |
826 |
< |
Node s = pred.next; |
827 |
< |
Node n; |
828 |
< |
if (s == null || s == pred || !s.isMatched()) |
829 |
< |
casCleanMe(pred, null); // already gone |
830 |
< |
else if ((n = s.next) != null) { |
831 |
< |
if (n != s) |
832 |
< |
pred.casNext(s, n); |
833 |
< |
casCleanMe(pred, null); |
834 |
< |
} |
835 |
< |
else |
941 |
> |
private void sweep() { |
942 |
> |
for (Node p = head, s, n; p != null && (s = p.next) != null; ) { |
943 |
> |
if (!s.isMatched()) |
944 |
> |
// Unmatched nodes are never self-linked |
945 |
> |
p = s; |
946 |
> |
else if ((n = s.next) == null) // trailing node is pinned |
947 |
|
break; |
948 |
< |
} |
949 |
< |
return pred; |
950 |
< |
} |
951 |
< |
|
952 |
< |
/** |
842 |
< |
* Main implementation of Iterator.remove(). Find |
843 |
< |
* and unsplice the given node. |
844 |
< |
*/ |
845 |
< |
final void findAndRemoveNode(Node s) { |
846 |
< |
if (s.tryMatchData()) { |
847 |
< |
Node pred = null; |
848 |
< |
Node p = head; |
849 |
< |
while (p != null) { |
850 |
< |
if (p == s) { |
851 |
< |
unsplice(pred, p); |
852 |
< |
break; |
853 |
< |
} |
854 |
< |
if (!p.isData && !p.isMatched()) |
855 |
< |
break; |
856 |
< |
pred = p; |
857 |
< |
if ((p = p.next) == pred) { // stale |
858 |
< |
pred = null; |
859 |
< |
p = head; |
860 |
< |
} |
861 |
< |
} |
948 |
> |
else if (s == n) // stale |
949 |
> |
// No need to also check for p == s, since that implies s == n |
950 |
> |
p = head; |
951 |
> |
else |
952 |
> |
p.casNext(s, n); |
953 |
|
} |
954 |
|
} |
955 |
|
|
958 |
|
*/ |
959 |
|
private boolean findAndRemove(Object e) { |
960 |
|
if (e != null) { |
961 |
< |
Node pred = null; |
871 |
< |
Node p = head; |
872 |
< |
while (p != null) { |
961 |
> |
for (Node pred = null, p = head; p != null; ) { |
962 |
|
Object item = p.item; |
963 |
|
if (p.isData) { |
964 |
|
if (item != null && item != p && e.equals(item) && |
970 |
|
else if (item == null) |
971 |
|
break; |
972 |
|
pred = p; |
973 |
< |
if ((p = p.next) == pred) { |
973 |
> |
if ((p = p.next) == pred) { // stale |
974 |
|
pred = null; |
975 |
|
p = head; |
976 |
|
} |
1016 |
|
* return {@code false}. |
1017 |
|
* |
1018 |
|
* @return {@code true} (as specified by |
1019 |
< |
* {@link BlockingQueue#offer(Object,long,TimeUnit) BlockingQueue.offer}) |
1019 |
> |
* {@link java.util.concurrent.BlockingQueue#offer(Object,long,TimeUnit) |
1020 |
> |
* BlockingQueue.offer}) |
1021 |
|
* @throws NullPointerException if the specified element is null |
1022 |
|
*/ |
1023 |
|
public boolean offer(E e, long timeout, TimeUnit unit) { |
1029 |
|
* Inserts the specified element at the tail of this queue. |
1030 |
|
* As the queue is unbounded, this method will never return {@code false}. |
1031 |
|
* |
1032 |
< |
* @return {@code true} (as specified by |
943 |
< |
* {@link BlockingQueue#offer(Object) BlockingQueue.offer}) |
1032 |
> |
* @return {@code true} (as specified by {@link Queue#offer}) |
1033 |
|
* @throws NullPointerException if the specified element is null |
1034 |
|
*/ |
1035 |
|
public boolean offer(E e) { |
1098 |
|
*/ |
1099 |
|
public boolean tryTransfer(E e, long timeout, TimeUnit unit) |
1100 |
|
throws InterruptedException { |
1101 |
< |
if (xfer(e, true, TIMEOUT, unit.toNanos(timeout)) == null) |
1101 |
> |
if (xfer(e, true, TIMED, unit.toNanos(timeout)) == null) |
1102 |
|
return true; |
1103 |
|
if (!Thread.interrupted()) |
1104 |
|
return false; |
1106 |
|
} |
1107 |
|
|
1108 |
|
public E take() throws InterruptedException { |
1109 |
< |
Object e = xfer(null, false, SYNC, 0); |
1109 |
> |
E e = xfer(null, false, SYNC, 0); |
1110 |
|
if (e != null) |
1111 |
< |
return (E)e; |
1111 |
> |
return e; |
1112 |
|
Thread.interrupted(); |
1113 |
|
throw new InterruptedException(); |
1114 |
|
} |
1115 |
|
|
1116 |
|
public E poll(long timeout, TimeUnit unit) throws InterruptedException { |
1117 |
< |
Object e = xfer(null, false, TIMEOUT, unit.toNanos(timeout)); |
1117 |
> |
E e = xfer(null, false, TIMED, unit.toNanos(timeout)); |
1118 |
|
if (e != null || !Thread.interrupted()) |
1119 |
< |
return (E)e; |
1119 |
> |
return e; |
1120 |
|
throw new InterruptedException(); |
1121 |
|
} |
1122 |
|
|
1123 |
|
public E poll() { |
1124 |
< |
return (E)xfer(null, false, NOW, 0); |
1124 |
> |
return xfer(null, false, NOW, 0); |
1125 |
|
} |
1126 |
|
|
1127 |
|
/** |
1134 |
|
if (c == this) |
1135 |
|
throw new IllegalArgumentException(); |
1136 |
|
int n = 0; |
1137 |
< |
E e; |
1049 |
< |
while ( (e = poll()) != null) { |
1137 |
> |
for (E e; (e = poll()) != null;) { |
1138 |
|
c.add(e); |
1139 |
|
++n; |
1140 |
|
} |
1151 |
|
if (c == this) |
1152 |
|
throw new IllegalArgumentException(); |
1153 |
|
int n = 0; |
1154 |
< |
E e; |
1067 |
< |
while (n < maxElements && (e = poll()) != null) { |
1154 |
> |
for (E e; n < maxElements && (e = poll()) != null;) { |
1155 |
|
c.add(e); |
1156 |
|
++n; |
1157 |
|
} |
1159 |
|
} |
1160 |
|
|
1161 |
|
/** |
1162 |
< |
* Returns an iterator over the elements in this queue in proper |
1163 |
< |
* sequence, from head to tail. |
1162 |
> |
* Returns an iterator over the elements in this queue in proper sequence. |
1163 |
> |
* The elements will be returned in order from first (head) to last (tail). |
1164 |
|
* |
1165 |
|
* <p>The returned iterator is a "weakly consistent" iterator that |
1166 |
< |
* will never throw |
1167 |
< |
* {@link ConcurrentModificationException ConcurrentModificationException}, |
1168 |
< |
* and guarantees to traverse elements as they existed upon |
1169 |
< |
* construction of the iterator, and may (but is not guaranteed |
1170 |
< |
* to) reflect any modifications subsequent to construction. |
1166 |
> |
* will never throw {@link java.util.ConcurrentModificationException |
1167 |
> |
* ConcurrentModificationException}, and guarantees to traverse |
1168 |
> |
* elements as they existed upon construction of the iterator, and |
1169 |
> |
* may (but is not guaranteed to) reflect any modifications |
1170 |
> |
* subsequent to construction. |
1171 |
|
* |
1172 |
|
* @return an iterator over the elements in this queue in proper sequence |
1173 |
|
*/ |
1176 |
|
} |
1177 |
|
|
1178 |
|
public E peek() { |
1179 |
< |
return (E) firstDataItem(); |
1179 |
> |
return firstDataItem(); |
1180 |
|
} |
1181 |
|
|
1182 |
|
/** |
1185 |
|
* @return {@code true} if this queue contains no elements |
1186 |
|
*/ |
1187 |
|
public boolean isEmpty() { |
1188 |
< |
return firstOfMode(true) == null; |
1188 |
> |
for (Node p = head; p != null; p = succ(p)) { |
1189 |
> |
if (!p.isMatched()) |
1190 |
> |
return !p.isData; |
1191 |
> |
} |
1192 |
> |
return true; |
1193 |
|
} |
1194 |
|
|
1195 |
|
public boolean hasWaitingConsumer() { |
1232 |
|
} |
1233 |
|
|
1234 |
|
/** |
1235 |
+ |
* Returns {@code true} if this queue contains the specified element. |
1236 |
+ |
* More formally, returns {@code true} if and only if this queue contains |
1237 |
+ |
* at least one element {@code e} such that {@code o.equals(e)}. |
1238 |
+ |
* |
1239 |
+ |
* @param o object to be checked for containment in this queue |
1240 |
+ |
* @return {@code true} if this queue contains the specified element |
1241 |
+ |
*/ |
1242 |
+ |
public boolean contains(Object o) { |
1243 |
+ |
if (o == null) return false; |
1244 |
+ |
for (Node p = head; p != null; p = succ(p)) { |
1245 |
+ |
Object item = p.item; |
1246 |
+ |
if (p.isData) { |
1247 |
+ |
if (item != null && item != p && o.equals(item)) |
1248 |
+ |
return true; |
1249 |
+ |
} |
1250 |
+ |
else if (item == null) |
1251 |
+ |
break; |
1252 |
+ |
} |
1253 |
+ |
return false; |
1254 |
+ |
} |
1255 |
+ |
|
1256 |
+ |
/** |
1257 |
|
* Always returns {@code Integer.MAX_VALUE} because a |
1258 |
|
* {@code LinkedTransferQueue} is not capacity constrained. |
1259 |
|
* |
1260 |
|
* @return {@code Integer.MAX_VALUE} (as specified by |
1261 |
< |
* {@link BlockingQueue#remainingCapacity()}) |
1261 |
> |
* {@link java.util.concurrent.BlockingQueue#remainingCapacity() |
1262 |
> |
* BlockingQueue.remainingCapacity}) |
1263 |
|
*/ |
1264 |
|
public int remainingCapacity() { |
1265 |
|
return Integer.MAX_VALUE; |
1291 |
|
throws java.io.IOException, ClassNotFoundException { |
1292 |
|
s.defaultReadObject(); |
1293 |
|
for (;;) { |
1294 |
< |
@SuppressWarnings("unchecked") E item = (E) s.readObject(); |
1294 |
> |
@SuppressWarnings("unchecked") |
1295 |
> |
E item = (E) s.readObject(); |
1296 |
|
if (item == null) |
1297 |
|
break; |
1298 |
|
else |
1300 |
|
} |
1301 |
|
} |
1302 |
|
|
1188 |
– |
|
1303 |
|
// Unsafe mechanics |
1304 |
|
|
1305 |
< |
private static final sun.misc.Unsafe UNSAFE = getUnsafe(); |
1306 |
< |
private static final long headOffset = |
1307 |
< |
objectFieldOffset(UNSAFE, "head", LinkedTransferQueue.class); |
1308 |
< |
private static final long tailOffset = |
1309 |
< |
objectFieldOffset(UNSAFE, "tail", LinkedTransferQueue.class); |
1196 |
< |
private static final long cleanMeOffset = |
1197 |
< |
objectFieldOffset(UNSAFE, "cleanMe", LinkedTransferQueue.class); |
1198 |
< |
|
1199 |
< |
static long objectFieldOffset(sun.misc.Unsafe UNSAFE, |
1200 |
< |
String field, Class<?> klazz) { |
1305 |
> |
private static final sun.misc.Unsafe UNSAFE; |
1306 |
> |
private static final long headOffset; |
1307 |
> |
private static final long tailOffset; |
1308 |
> |
private static final long sweepVotesOffset; |
1309 |
> |
static { |
1310 |
|
try { |
1311 |
< |
return UNSAFE.objectFieldOffset(klazz.getDeclaredField(field)); |
1312 |
< |
} catch (NoSuchFieldException e) { |
1313 |
< |
// Convert Exception to corresponding Error |
1314 |
< |
NoSuchFieldError error = new NoSuchFieldError(field); |
1315 |
< |
error.initCause(e); |
1316 |
< |
throw error; |
1311 |
> |
UNSAFE = getUnsafe(); |
1312 |
> |
Class<?> k = LinkedTransferQueue.class; |
1313 |
> |
headOffset = UNSAFE.objectFieldOffset |
1314 |
> |
(k.getDeclaredField("head")); |
1315 |
> |
tailOffset = UNSAFE.objectFieldOffset |
1316 |
> |
(k.getDeclaredField("tail")); |
1317 |
> |
sweepVotesOffset = UNSAFE.objectFieldOffset |
1318 |
> |
(k.getDeclaredField("sweepVotes")); |
1319 |
> |
} catch (Exception e) { |
1320 |
> |
throw new Error(e); |
1321 |
|
} |
1322 |
|
} |
1323 |
|
|
1324 |
< |
private static sun.misc.Unsafe getUnsafe() { |
1324 |
> |
/** |
1325 |
> |
* Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. |
1326 |
> |
* Replace with a simple call to Unsafe.getUnsafe when integrating |
1327 |
> |
* into a jdk. |
1328 |
> |
* |
1329 |
> |
* @return a sun.misc.Unsafe |
1330 |
> |
*/ |
1331 |
> |
static sun.misc.Unsafe getUnsafe() { |
1332 |
|
try { |
1333 |
|
return sun.misc.Unsafe.getUnsafe(); |
1334 |
< |
} catch (SecurityException se) { |
1335 |
< |
try { |
1336 |
< |
return java.security.AccessController.doPrivileged |
1337 |
< |
(new java.security |
1338 |
< |
.PrivilegedExceptionAction<sun.misc.Unsafe>() { |
1339 |
< |
public sun.misc.Unsafe run() throws Exception { |
1340 |
< |
java.lang.reflect.Field f = sun.misc |
1341 |
< |
.Unsafe.class.getDeclaredField("theUnsafe"); |
1342 |
< |
f.setAccessible(true); |
1343 |
< |
return (sun.misc.Unsafe) f.get(null); |
1344 |
< |
}}); |
1345 |
< |
} catch (java.security.PrivilegedActionException e) { |
1346 |
< |
throw new RuntimeException("Could not initialize intrinsics", |
1347 |
< |
e.getCause()); |
1348 |
< |
} |
1334 |
> |
} catch (SecurityException tryReflectionInstead) {} |
1335 |
> |
try { |
1336 |
> |
return java.security.AccessController.doPrivileged |
1337 |
> |
(new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() { |
1338 |
> |
public sun.misc.Unsafe run() throws Exception { |
1339 |
> |
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class; |
1340 |
> |
for (java.lang.reflect.Field f : k.getDeclaredFields()) { |
1341 |
> |
f.setAccessible(true); |
1342 |
> |
Object x = f.get(null); |
1343 |
> |
if (k.isInstance(x)) |
1344 |
> |
return k.cast(x); |
1345 |
> |
} |
1346 |
> |
throw new NoSuchFieldError("the Unsafe"); |
1347 |
> |
}}); |
1348 |
> |
} catch (java.security.PrivilegedActionException e) { |
1349 |
> |
throw new RuntimeException("Could not initialize intrinsics", |
1350 |
> |
e.getCause()); |
1351 |
|
} |
1352 |
|
} |
1231 |
– |
|
1353 |
|
} |