ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/main/java/util/concurrent/LinkedTransferQueue.java
Revision: 1.163
Committed: Sat Jan 28 18:29:49 2023 UTC (15 months, 2 weeks ago) by jsr166
Branch: MAIN
CVS Tags: HEAD
Changes since 1.162: +1 -1 lines
Log Message:
typo

File Contents

# Content
1 /*
2 * Written by Doug Lea with assistance from members of JCP JSR-166
3 * Expert Group and released to the public domain, as explained at
4 * http://creativecommons.org/publicdomain/zero/1.0/
5 */
6
7 package java.util.concurrent;
8
9 import java.lang.invoke.MethodHandles;
10 import java.lang.invoke.VarHandle;
11 import java.util.AbstractQueue;
12 import java.util.Arrays;
13 import java.util.Collection;
14 import java.util.Iterator;
15 import java.util.NoSuchElementException;
16 import java.util.Objects;
17 import java.util.Queue;
18 import java.util.Spliterator;
19 import java.util.Spliterators;
20 import java.util.concurrent.locks.LockSupport;
21 import java.util.function.Consumer;
22 import java.util.function.Predicate;
23
24 /**
25 * An unbounded {@link TransferQueue} based on linked nodes.
26 * This queue orders elements FIFO (first-in-first-out) with respect
27 * to any given producer. The <em>head</em> of the queue is that
28 * element that has been on the queue the longest time for some
29 * producer. The <em>tail</em> of the queue is that element that has
30 * been on the queue the shortest time for some producer.
31 *
32 * <p>Beware that, unlike in most collections, the {@code size} method
33 * is <em>NOT</em> a constant-time operation. Because of the
34 * asynchronous nature of these queues, determining the current number
35 * of elements requires a traversal of the elements, and so may report
36 * inaccurate results if this collection is modified during traversal.
37 *
38 * <p>Bulk operations that add, remove, or examine multiple elements,
39 * such as {@link #addAll}, {@link #removeIf} or {@link #forEach},
40 * are <em>not</em> guaranteed to be performed atomically.
41 * For example, a {@code forEach} traversal concurrent with an {@code
42 * addAll} operation might observe only some of the added elements.
43 *
44 * <p>This class and its iterator implement all of the <em>optional</em>
45 * methods of the {@link Collection} and {@link Iterator} interfaces.
46 *
47 * <p>Memory consistency effects: As with other concurrent
48 * collections, actions in a thread prior to placing an object into a
49 * {@code LinkedTransferQueue}
50 * <a href="package-summary.html#MemoryVisibility"><i>happen-before</i></a>
51 * actions subsequent to the access or removal of that element from
52 * the {@code LinkedTransferQueue} in another thread.
53 *
54 * <p>This class is a member of the
55 * <a href="{@docRoot}/java.base/java/util/package-summary.html#CollectionsFramework">
56 * Java Collections Framework</a>.
57 *
58 * @since 1.7
59 * @author Doug Lea
60 * @param <E> the type of elements held in this queue
61 */
62 public class LinkedTransferQueue<E> extends AbstractQueue<E>
63 implements TransferQueue<E>, java.io.Serializable {
64 private static final long serialVersionUID = -3223113410248163686L;
65
66 /*
67 * *** Overview of Dual Queues with Slack ***
68 *
69 * Dual Queues, introduced by Scherer and Scott
70 * (http://www.cs.rochester.edu/~scott/papers/2004_DISC_dual_DS.pdf)
71 * are (linked) queues in which nodes may represent either data or
72 * requests. When a thread tries to enqueue a data node, but
73 * encounters a request node, it instead "matches" and removes it;
74 * and vice versa for enqueuing requests. Blocking Dual Queues
75 * arrange that threads enqueuing unmatched requests block until
76 * other threads provide the match. Dual Synchronous Queues (see
77 * Scherer, Lea, & Scott
78 * http://www.cs.rochester.edu/u/scott/papers/2009_Scherer_CACM_SSQ.pdf)
79 * additionally arrange that threads enqueuing unmatched data also
80 * block. Dual Transfer Queues support all of these modes, as
81 * dictated by callers.
82 *
83 * A FIFO dual queue may be implemented using a variation of the
84 * Michael & Scott (M&S) lock-free queue algorithm
85 * (http://www.cs.rochester.edu/~scott/papers/1996_PODC_queues.pdf).
86 * It maintains two pointer fields, "head", pointing to a
87 * (matched) node that in turn points to the first actual
88 * (unmatched) queue node (or null if empty); and "tail" that
89 * points to the last node on the queue (or again null if
90 * empty). For example, here is a possible queue with four data
91 * elements:
92 *
93 * head tail
94 * | |
95 * v v
96 * M -> U -> U -> U -> U
97 *
98 * The M&S queue algorithm is known to be prone to scalability and
99 * overhead limitations when maintaining (via CAS) these head and
100 * tail pointers. This has led to the development of
101 * contention-reducing variants such as elimination arrays (see
102 * Moir et al http://portal.acm.org/citation.cfm?id=1074013) and
103 * optimistic back pointers (see Ladan-Mozes & Shavit
104 * http://people.csail.mit.edu/edya/publications/OptimisticFIFOQueue-journal.pdf).
105 * However, the nature of dual queues enables a simpler tactic for
106 * improving M&S-style implementations when dual-ness is needed.
107 *
108 * In a dual queue, each node must atomically maintain its match
109 * status. While there are other possible variants, we implement
110 * this here as: for a data-mode node, matching entails CASing an
111 * "item" field from a non-null data value to null upon match, and
112 * vice-versa for request nodes, CASing from null to a data
113 * value. (Note that the linearization properties of this style of
114 * queue are easy to verify -- elements are made available by
115 * linking, and unavailable by matching.) Compared to plain M&S
116 * queues, this property of dual queues requires one additional
117 * successful atomic operation per enq/deq pair. But it also
118 * enables lower cost variants of queue maintenance mechanics. (A
119 * variation of this idea applies even for non-dual queues that
120 * support deletion of interior elements, such as
121 * j.u.c.ConcurrentLinkedQueue.)
122 *
123 * Once a node is matched, its match status can never again
124 * change. We may thus arrange that the linked list of them
125 * contain a prefix of zero or more matched nodes, followed by a
126 * suffix of zero or more unmatched nodes. (Note that we allow
127 * both the prefix and suffix to be zero length, which in turn
128 * means that we do not use a dummy header.) If we were not
129 * concerned with either time or space efficiency, we could
130 * correctly perform enqueue and dequeue operations by traversing
131 * from a pointer to the initial node; CASing the item of the
132 * first unmatched node on match and CASing the next field of the
133 * trailing node on appends. While this would be a terrible idea
134 * in itself, it does have the benefit of not requiring ANY atomic
135 * updates on head/tail fields.
136 *
137 * We introduce here an approach that lies between the extremes of
138 * never versus always updating queue (head and tail) pointers.
139 * This offers a tradeoff between sometimes requiring extra
140 * traversal steps to locate the first and/or last unmatched
141 * nodes, versus the reduced overhead and contention of fewer
142 * updates to queue pointers. For example, a possible snapshot of
143 * a queue is:
144 *
145 * head tail
146 * | |
147 * v v
148 * M -> M -> U -> U -> U -> U
149 *
150 * The best value for this "slack" (the targeted maximum distance
151 * between the value of "head" and the first unmatched node, and
152 * similarly for "tail") is an empirical matter. We have found
153 * that using very small constants in the range of 1-3 work best
154 * over a range of platforms. Larger values introduce increasing
155 * costs of cache misses and risks of long traversal chains, while
156 * smaller values increase CAS contention and overhead.
157 *
158 * Dual queues with slack differ from plain M&S dual queues by
159 * virtue of only sometimes updating head or tail pointers when
160 * matching, appending, or even traversing nodes; in order to
161 * maintain a targeted slack. The idea of "sometimes" may be
162 * operationalized in several ways. The simplest is to use a
163 * per-operation counter incremented on each traversal step, and
164 * to try (via CAS) to update the associated queue pointer
165 * whenever the count exceeds a threshold. Another, that requires
166 * more overhead, is to use random number generators to update
167 * with a given probability per traversal step.
168 *
169 * In any strategy along these lines, because CASes updating
170 * fields may fail, the actual slack may exceed targeted slack.
171 * However, they may be retried at any time to maintain targets.
172 * Even when using very small slack values, this approach works
173 * well for dual queues because it allows all operations up to the
174 * point of matching or appending an item (hence potentially
175 * allowing progress by another thread) to be read-only, thus not
176 * introducing any further contention. As described below, we
177 * implement this by performing slack maintenance retries only
178 * after these points.
179 *
180 * As an accompaniment to such techniques, traversal overhead can
181 * be further reduced without increasing contention of head
182 * pointer updates: Threads may sometimes shortcut the "next" link
183 * path from the current "head" node to be closer to the currently
184 * known first unmatched node, and similarly for tail. Again, this
185 * may be triggered with using thresholds or randomization.
186 *
187 * These ideas must be further extended to avoid unbounded amounts
188 * of costly-to-reclaim garbage caused by the sequential "next"
189 * links of nodes starting at old forgotten head nodes: As first
190 * described in detail by Boehm
191 * (http://portal.acm.org/citation.cfm?doid=503272.503282), if a GC
192 * delays noticing that any arbitrarily old node has become
193 * garbage, all newer dead nodes will also be unreclaimed.
194 * (Similar issues arise in non-GC environments.) To cope with
195 * this in our implementation, upon CASing to advance the head
196 * pointer, we set the "next" link of the previous head to point
197 * only to itself; thus limiting the length of chains of dead nodes.
198 * (We also take similar care to wipe out possibly garbage
199 * retaining values held in other Node fields.) However, doing so
200 * adds some further complexity to traversal: If any "next"
201 * pointer links to itself, it indicates that the current thread
202 * has lagged behind a head-update, and so the traversal must
203 * continue from the "head". Traversals trying to find the
204 * current tail starting from "tail" may also encounter
205 * self-links, in which case they also continue at "head".
206 *
207 * It is tempting in slack-based scheme to not even use CAS for
208 * updates (similarly to Ladan-Mozes & Shavit). However, this
209 * cannot be done for head updates under the above link-forgetting
210 * mechanics because an update may leave head at a detached node.
211 * And while direct writes are possible for tail updates, they
212 * increase the risk of long retraversals, and hence long garbage
213 * chains, which can be much more costly than is worthwhile
214 * considering that the cost difference of performing a CAS vs
215 * write is smaller when they are not triggered on each operation
216 * (especially considering that writes and CASes equally require
217 * additional GC bookkeeping ("write barriers") that are sometimes
218 * more costly than the writes themselves because of contention).
219 *
220 * *** Overview of implementation ***
221 *
222 * We use a threshold-based approach to updates, with a slack
223 * threshold of two -- that is, we update head/tail when the
224 * current pointer appears to be two or more steps away from the
225 * first/last node. The slack value is hard-wired: a path greater
226 * than one is naturally implemented by checking equality of
227 * traversal pointers except when the list has only one element,
228 * in which case we keep slack threshold at one. Avoiding tracking
229 * explicit counts across method calls slightly simplifies an
230 * already-messy implementation. Using randomization would
231 * probably work better if there were a low-quality dirt-cheap
232 * per-thread one available, but even ThreadLocalRandom is too
233 * heavy for these purposes.
234 *
235 * With such a small slack threshold value, it is not worthwhile
236 * to augment this with path short-circuiting (i.e., unsplicing
237 * interior nodes) except in the case of cancellation/removal (see
238 * below).
239 *
240 * All enqueue/dequeue operations are handled by the single method
241 * "xfer" with parameters indicating whether to act as some form
242 * of offer, put, poll, take, or transfer (each possibly with
243 * timeout). The relative complexity of using one monolithic
244 * method outweighs the code bulk and maintenance problems of
245 * using separate methods for each case.
246 *
247 * Operation consists of up to two phases. The first is implemented
248 * in method xfer, the second in method awaitMatch.
249 *
250 * 1. Traverse until matching or appending (method xfer)
251 *
252 * Conceptually, we simply traverse all nodes starting from head.
253 * If we encounter an unmatched node of opposite mode, we match
254 * it and return, also updating head (by at least 2 hops) to
255 * one past the matched node (or the node itself if it's the
256 * pinned trailing node). Traversals also check for the
257 * possibility of falling off-list, in which case they restart.
258 *
259 * If the trailing node of the list is reached, a match is not
260 * possible. If this call was untimed poll or tryTransfer
261 * (argument "how" is NOW), return empty-handed immediately.
262 * Else a new node is CAS-appended. On successful append, if
263 * this call was ASYNC (e.g. offer), an element was
264 * successfully added to the end of the queue and we return.
265 *
266 * Of course, this naive traversal is O(n) when no match is
267 * possible. We optimize the traversal by maintaining a tail
268 * pointer, which is expected to be "near" the end of the list.
269 * It is only safe to fast-forward to tail (in the presence of
270 * arbitrary concurrent changes) if it is pointing to a node of
271 * the same mode, even if it is dead (in this case no preceding
272 * node could still be matchable by this traversal). If we
273 * need to restart due to falling off-list, we can again
274 * fast-forward to tail, but only if it has changed since the
275 * last traversal (else we might loop forever). If tail cannot
276 * be used, traversal starts at head (but in this case we
277 * expect to be able to match near head). As with head, we
278 * CAS-advance the tail pointer by at least two hops.
279 *
280 * 2. Await match or cancellation (method awaitMatch)
281 *
282 * Wait for another thread to match node; instead cancelling if
283 * the current thread was interrupted or the wait timed out. To
284 * improve performance in common single-source / single-sink
285 * usages when there are more tasks than cores, an initial
286 * Thread.yield is tried when there is apparently only one
287 * waiter. In other cases, waiters may help with some
288 * bookkeeping, then park/unpark.
289 *
290 * ** Unlinking removed interior nodes **
291 *
292 * In addition to minimizing garbage retention via self-linking
293 * described above, we also unlink removed interior nodes. These
294 * may arise due to timed out or interrupted waits, or calls to
295 * remove(x) or Iterator.remove. Normally, given a node that was
296 * at one time known to be the predecessor of some node s that is
297 * to be removed, we can unsplice s by CASing the next field of
298 * its predecessor if it still points to s (otherwise s must
299 * already have been removed or is now offlist). But there are two
300 * situations in which we cannot guarantee to make node s
301 * unreachable in this way: (1) If s is the trailing node of list
302 * (i.e., with null next), then it is pinned as the target node
303 * for appends, so can only be removed later after other nodes are
304 * appended. (2) We cannot necessarily unlink s given a
305 * predecessor node that is matched (including the case of being
306 * cancelled): the predecessor may already be unspliced, in which
307 * case some previous reachable node may still point to s.
308 * (For further explanation see Herlihy & Shavit "The Art of
309 * Multiprocessor Programming" chapter 9). Although, in both
310 * cases, we can rule out the need for further action if either s
311 * or its predecessor are (or can be made to be) at, or fall off
312 * from, the head of list.
313 *
314 * Without taking these into account, it would be possible for an
315 * unbounded number of supposedly removed nodes to remain reachable.
316 * Situations leading to such buildup are uncommon but can occur
317 * in practice; for example when a series of short timed calls to
318 * poll repeatedly time out at the trailing node but otherwise
319 * never fall off the list because of an untimed call to take() at
320 * the front of the queue.
321 *
322 * When these cases arise, rather than always retraversing the
323 * entire list to find an actual predecessor to unlink (which
324 * won't help for case (1) anyway), we record the need to sweep the
325 * next time any thread would otherwise block in awaitMatch. Also,
326 * because traversal operations on the linked list of nodes are a
327 * natural opportunity to sweep dead nodes, we generally do so,
328 * including all the operations that might remove elements as they
329 * traverse, such as removeIf and Iterator.remove. This largely
330 * eliminates long chains of dead interior nodes, except from
331 * cancelled or timed out blocking operations.
332 *
333 * Note that we cannot self-link unlinked interior nodes during
334 * sweeps. However, the associated garbage chains terminate when
335 * some successor ultimately falls off the head of the list and is
336 * self-linked.
337 */
338
339 /**
340 * The number of nanoseconds for which it is faster to spin
341 * rather than to use timed park. A rough estimate suffices.
342 * Using a power of two minus one simplifies some comparisons.
343 */
344 static final long SPIN_FOR_TIMEOUT_THRESHOLD = 1023L;
345
346 /**
347 * The maximum number of estimated removal failures (sweepVotes)
348 * to tolerate before sweeping through the queue unlinking
349 * cancelled nodes that were not unlinked upon initial
350 * removal. See above for explanation. The value must be at least
351 * two to avoid useless sweeps when removing trailing nodes.
352 */
353 static final int SWEEP_THRESHOLD = 32;
354
355 /**
356 * Queue nodes. Uses Object, not E, for items to allow forgetting
357 * them after use. Writes that are intrinsically ordered wrt
358 * other accesses or CASes use simple relaxed forms.
359 */
360 static final class Node implements ForkJoinPool.ManagedBlocker {
361 final boolean isData; // false if this is a request node
362 volatile Object item; // initially non-null if isData; CASed to match
363 volatile Node next;
364 volatile Thread waiter; // null when not waiting for a match
365
366 /**
367 * Constructs a data node holding item if item is non-null,
368 * else a request node. Uses relaxed write because item can
369 * only be seen after piggy-backing publication via CAS.
370 */
371 Node(Object item) {
372 ITEM.set(this, item);
373 isData = (item != null);
374 }
375
376 /** Constructs a (matched data) dummy node. */
377 Node() {
378 isData = true;
379 }
380
381 final boolean casNext(Node cmp, Node val) {
382 // assert val != null;
383 return NEXT.compareAndSet(this, cmp, val);
384 }
385
386 final boolean casItem(Object cmp, Object val) {
387 // assert isData == (cmp != null);
388 // assert isData == (val == null);
389 // assert !(cmp instanceof Node);
390 return ITEM.compareAndSet(this, cmp, val);
391 }
392
393 /**
394 * Links node to itself to avoid garbage retention. Called
395 * only after CASing head field, so uses relaxed write.
396 */
397 final void selfLink() {
398 // assert isMatched();
399 NEXT.setRelease(this, this);
400 }
401
402 final void appendRelaxed(Node next) {
403 // assert next != null;
404 // assert this.next == null;
405 NEXT.setOpaque(this, next);
406 }
407
408 /**
409 * Returns true if this node has been matched, including the
410 * case of artificial matches due to cancellation.
411 */
412 final boolean isMatched() {
413 return isData == (item == null);
414 }
415
416 /** Tries to CAS-match this node; if successful, wakes waiter. */
417 final boolean tryMatch(Object cmp, Object val) {
418 if (casItem(cmp, val)) {
419 LockSupport.unpark(waiter);
420 return true;
421 }
422 return false;
423 }
424
425 /**
426 * Returns true if a node with the given mode cannot be
427 * appended to this node because this node is unmatched and
428 * has opposite data mode.
429 */
430 final boolean cannotPrecede(boolean haveData) {
431 boolean d = isData;
432 return d != haveData && d != (item == null);
433 }
434
435 public final boolean isReleasable() {
436 return (isData == (item == null)) ||
437 Thread.currentThread().isInterrupted();
438 }
439
440 public final boolean block() {
441 while (!isReleasable()) LockSupport.park();
442 return true;
443 }
444
445 private static final long serialVersionUID = -3375979862319811754L;
446 }
447
448 /**
449 * A node from which the first live (non-matched) node (if any)
450 * can be reached in O(1) time.
451 * Invariants:
452 * - all live nodes are reachable from head via .next
453 * - head != null
454 * - (tmp = head).next != tmp || tmp != head
455 * Non-invariants:
456 * - head may or may not be live
457 * - it is permitted for tail to lag behind head, that is, for tail
458 * to not be reachable from head!
459 */
460 transient volatile Node head;
461
462 /**
463 * A node from which the last node on list (that is, the unique
464 * node with node.next == null) can be reached in O(1) time.
465 * Invariants:
466 * - the last node is always reachable from tail via .next
467 * - tail != null
468 * Non-invariants:
469 * - tail may or may not be live
470 * - it is permitted for tail to lag behind head, that is, for tail
471 * to not be reachable from head!
472 * - tail.next may or may not be self-linked.
473 */
474 private transient volatile Node tail;
475
476 /** The number of apparent failures to unsplice cancelled nodes */
477 private transient volatile boolean needSweep;
478
479 private boolean casTail(Node cmp, Node val) {
480 // assert cmp != null;
481 // assert val != null;
482 return TAIL.compareAndSet(this, cmp, val);
483 }
484
485 private boolean casHead(Node cmp, Node val) {
486 return HEAD.compareAndSet(this, cmp, val);
487 }
488
489 /**
490 * Tries to CAS pred.next (or head, if pred is null) from c to p.
491 * Caller must ensure that we're not unlinking the trailing node.
492 */
493 private boolean tryCasSuccessor(Node pred, Node c, Node p) {
494 // assert p != null;
495 // assert c.isData != (c.item != null);
496 // assert c != p;
497 if (pred != null)
498 return pred.casNext(c, p);
499 if (casHead(c, p)) {
500 c.selfLink();
501 return true;
502 }
503 return false;
504 }
505
506 /**
507 * Collapses dead (matched) nodes between pred and q.
508 * @param pred the last known live node, or null if none
509 * @param c the first dead node
510 * @param p the last dead node
511 * @param q p.next: the next live node, or null if at end
512 * @return pred if pred still alive and CAS succeeded; else p
513 */
514 private Node skipDeadNodes(Node pred, Node c, Node p, Node q) {
515 // assert pred != c;
516 // assert p != q;
517 // assert c.isMatched();
518 // assert p.isMatched();
519 if (q == null) {
520 // Never unlink trailing node.
521 if (c == p) return pred;
522 q = p;
523 }
524 return (tryCasSuccessor(pred, c, q)
525 && (pred == null || !pred.isMatched()))
526 ? pred : p;
527 }
528
529 /**
530 * Collapses dead (matched) nodes from h (which was once head) to p.
531 * Caller ensures all nodes from h up to and including p are dead.
532 */
533 private void skipDeadNodesNearHead(Node h, Node p) {
534 // assert h != null;
535 // assert h != p;
536 // assert p.isMatched();
537 for (;;) {
538 final Node q;
539 if ((q = p.next) == null) break;
540 else if (!q.isMatched()) { p = q; break; }
541 else if (p == (p = q)) return;
542 }
543 if (casHead(h, p))
544 h.selfLink();
545 }
546
547 /* Possible values for "how" argument in xfer method. */
548
549 private static final int NOW = 0; // for untimed poll, tryTransfer
550 private static final int ASYNC = 1; // for offer, put, add
551 private static final int SYNC = 2; // for transfer, take
552 private static final int TIMED = 3; // for timed poll, tryTransfer
553
554 /**
555 * Implements all queuing methods. See above for explanation.
556 *
557 * @param e the item or null for take
558 * @param haveData true if this is a put, else a take
559 * @param how NOW, ASYNC, SYNC, or TIMED
560 * @param nanos timeout in nanosecs, used only if mode is TIMED
561 * @return an item if matched, else e
562 * @throws NullPointerException if haveData mode but e is null
563 */
564 @SuppressWarnings("unchecked")
565 private E xfer(E e, boolean haveData, int how, long nanos) {
566 if (haveData && (e == null))
567 throw new NullPointerException();
568
569 restart: for (Node s = null, t = null, h = null;;) {
570 for (Node p = (t != (t = tail) && t.isData == haveData) ? t
571 : (h = head);; ) {
572 final Node q; final Object item;
573 if (p.isData != haveData
574 && haveData == ((item = p.item) == null)) {
575 if (h == null) h = head;
576 if (p.tryMatch(item, e)) {
577 if (h != p) skipDeadNodesNearHead(h, p);
578 return (E) item;
579 }
580 }
581 if ((q = p.next) == null) {
582 if (how == NOW) return e;
583 if (s == null) s = new Node(e);
584 if (!p.casNext(null, s)) continue;
585 if (p != t) casTail(t, s);
586 if (how == ASYNC) return e;
587 return awaitMatch(s, p, e, (how == TIMED), nanos);
588 }
589 if (p == (p = q)) continue restart;
590 }
591 }
592 }
593
594 /**
595 * Possibly blocks until node s is matched or caller gives up.
596 *
597 * @param s the waiting node
598 * @param pred the predecessor of s, or null if unknown (the null
599 * case does not occur in any current calls but may in possible
600 * future extensions)
601 * @param e the comparison value for checking match
602 * @param timed if true, wait only until timeout elapses
603 * @param nanos timeout in nanosecs, used only if timed is true
604 * @return matched item, or e if unmatched on interrupt or timeout
605 */
606 @SuppressWarnings("unchecked")
607 private E awaitMatch(Node s, Node pred, E e, boolean timed, long nanos) {
608 final boolean isData = s.isData;
609 final long deadline = timed ? System.nanoTime() + nanos : 0L;
610 final Thread w = Thread.currentThread();
611 int stat = -1; // -1: may yield, +1: park, else 0
612 Object item;
613 while ((item = s.item) == e) {
614 if (needSweep) // help clean
615 sweep();
616 else if ((timed && nanos <= 0L) || w.isInterrupted()) {
617 if (s.casItem(e, (e == null) ? s : null)) {
618 unsplice(pred, s); // cancelled
619 return e;
620 }
621 }
622 else if (stat <= 0) {
623 if (pred != null && pred.next == s) {
624 if (stat < 0 &&
625 (pred.isData != isData || pred.isMatched())) {
626 stat = 0; // yield once if first
627 Thread.yield();
628 }
629 else {
630 stat = 1;
631 s.waiter = w; // enable unpark
632 }
633 } // else signal in progress
634 }
635 else if ((item = s.item) != e)
636 break; // recheck
637 else if (!timed) {
638 LockSupport.setCurrentBlocker(this);
639 try {
640 ForkJoinPool.managedBlock(s);
641 } catch (InterruptedException cannotHappen) { }
642 LockSupport.setCurrentBlocker(null);
643 }
644 else {
645 nanos = deadline - System.nanoTime();
646 if (nanos > SPIN_FOR_TIMEOUT_THRESHOLD)
647 LockSupport.parkNanos(this, nanos);
648 }
649 }
650 if (stat == 1)
651 WAITER.set(s, null);
652 if (!isData)
653 ITEM.set(s, s); // self-link to avoid garbage
654 return (E) item;
655 }
656
657 /* -------------- Traversal methods -------------- */
658
659 /**
660 * Returns the first unmatched data node, or null if none.
661 * Callers must recheck if the returned node is unmatched
662 * before using.
663 */
664 final Node firstDataNode() {
665 Node first = null;
666 restartFromHead: for (;;) {
667 Node h = head, p = h;
668 while (p != null) {
669 if (p.item != null) {
670 if (p.isData) {
671 first = p;
672 break;
673 }
674 }
675 else if (!p.isData)
676 break;
677 final Node q;
678 if ((q = p.next) == null)
679 break;
680 if (p == (p = q))
681 continue restartFromHead;
682 }
683 if (p != h && casHead(h, p))
684 h.selfLink();
685 return first;
686 }
687 }
688
689 /**
690 * Traverses and counts unmatched nodes of the given mode.
691 * Used by methods size and getWaitingConsumerCount.
692 */
693 private int countOfMode(boolean data) {
694 restartFromHead: for (;;) {
695 int count = 0;
696 for (Node p = head; p != null;) {
697 if (!p.isMatched()) {
698 if (p.isData != data)
699 return 0;
700 if (++count == Integer.MAX_VALUE)
701 break; // @see Collection.size()
702 }
703 if (p == (p = p.next))
704 continue restartFromHead;
705 }
706 return count;
707 }
708 }
709
710 public String toString() {
711 String[] a = null;
712 restartFromHead: for (;;) {
713 int charLength = 0;
714 int size = 0;
715 for (Node p = head; p != null;) {
716 Object item = p.item;
717 if (p.isData) {
718 if (item != null) {
719 if (a == null)
720 a = new String[4];
721 else if (size == a.length)
722 a = Arrays.copyOf(a, 2 * size);
723 String s = item.toString();
724 a[size++] = s;
725 charLength += s.length();
726 }
727 } else if (item == null)
728 break;
729 if (p == (p = p.next))
730 continue restartFromHead;
731 }
732
733 if (size == 0)
734 return "[]";
735
736 return Helpers.toString(a, size, charLength);
737 }
738 }
739
740 private Object[] toArrayInternal(Object[] a) {
741 Object[] x = a;
742 restartFromHead: for (;;) {
743 int size = 0;
744 for (Node p = head; p != null;) {
745 Object item = p.item;
746 if (p.isData) {
747 if (item != null) {
748 if (x == null)
749 x = new Object[4];
750 else if (size == x.length)
751 x = Arrays.copyOf(x, 2 * (size + 4));
752 x[size++] = item;
753 }
754 } else if (item == null)
755 break;
756 if (p == (p = p.next))
757 continue restartFromHead;
758 }
759 if (x == null)
760 return new Object[0];
761 else if (a != null && size <= a.length) {
762 if (a != x)
763 System.arraycopy(x, 0, a, 0, size);
764 if (size < a.length)
765 a[size] = null;
766 return a;
767 }
768 return (size == x.length) ? x : Arrays.copyOf(x, size);
769 }
770 }
771
772 /**
773 * Returns an array containing all of the elements in this queue, in
774 * proper sequence.
775 *
776 * <p>The returned array will be "safe" in that no references to it are
777 * maintained by this queue. (In other words, this method must allocate
778 * a new array). The caller is thus free to modify the returned array.
779 *
780 * <p>This method acts as bridge between array-based and collection-based
781 * APIs.
782 *
783 * @return an array containing all of the elements in this queue
784 */
785 public Object[] toArray() {
786 return toArrayInternal(null);
787 }
788
789 /**
790 * Returns an array containing all of the elements in this queue, in
791 * proper sequence; the runtime type of the returned array is that of
792 * the specified array. If the queue fits in the specified array, it
793 * is returned therein. Otherwise, a new array is allocated with the
794 * runtime type of the specified array and the size of this queue.
795 *
796 * <p>If this queue fits in the specified array with room to spare
797 * (i.e., the array has more elements than this queue), the element in
798 * the array immediately following the end of the queue is set to
799 * {@code null}.
800 *
801 * <p>Like the {@link #toArray()} method, this method acts as bridge between
802 * array-based and collection-based APIs. Further, this method allows
803 * precise control over the runtime type of the output array, and may,
804 * under certain circumstances, be used to save allocation costs.
805 *
806 * <p>Suppose {@code x} is a queue known to contain only strings.
807 * The following code can be used to dump the queue into a newly
808 * allocated array of {@code String}:
809 *
810 * <pre> {@code String[] y = x.toArray(new String[0]);}</pre>
811 *
812 * Note that {@code toArray(new Object[0])} is identical in function to
813 * {@code toArray()}.
814 *
815 * @param a the array into which the elements of the queue are to
816 * be stored, if it is big enough; otherwise, a new array of the
817 * same runtime type is allocated for this purpose
818 * @return an array containing all of the elements in this queue
819 * @throws ArrayStoreException if the runtime type of the specified array
820 * is not a supertype of the runtime type of every element in
821 * this queue
822 * @throws NullPointerException if the specified array is null
823 */
824 @SuppressWarnings("unchecked")
825 public <T> T[] toArray(T[] a) {
826 Objects.requireNonNull(a);
827 return (T[]) toArrayInternal(a);
828 }
829
830 /**
831 * Weakly-consistent iterator.
832 *
833 * Lazily updated ancestor is expected to be amortized O(1) remove(),
834 * but O(n) in the worst case, when lastRet is concurrently deleted.
835 */
836 final class Itr implements Iterator<E> {
837 private Node nextNode; // next node to return item for
838 private E nextItem; // the corresponding item
839 private Node lastRet; // last returned node, to support remove
840 private Node ancestor; // Helps unlink lastRet on remove()
841
842 /**
843 * Moves to next node after pred, or first node if pred null.
844 */
845 @SuppressWarnings("unchecked")
846 private void advance(Node pred) {
847 for (Node p = (pred == null) ? head : pred.next, c = p;
848 p != null; ) {
849 final Object item;
850 if ((item = p.item) != null && p.isData) {
851 nextNode = p;
852 nextItem = (E) item;
853 if (c != p)
854 tryCasSuccessor(pred, c, p);
855 return;
856 }
857 else if (!p.isData && item == null)
858 break;
859 if (c != p && !tryCasSuccessor(pred, c, c = p)) {
860 pred = p;
861 c = p = p.next;
862 }
863 else if (p == (p = p.next)) {
864 pred = null;
865 c = p = head;
866 }
867 }
868 nextItem = null;
869 nextNode = null;
870 }
871
872 Itr() {
873 advance(null);
874 }
875
876 public final boolean hasNext() {
877 return nextNode != null;
878 }
879
880 public final E next() {
881 final Node p;
882 if ((p = nextNode) == null) throw new NoSuchElementException();
883 E e = nextItem;
884 advance(lastRet = p);
885 return e;
886 }
887
888 public void forEachRemaining(Consumer<? super E> action) {
889 Objects.requireNonNull(action);
890 Node q = null;
891 for (Node p; (p = nextNode) != null; advance(q = p))
892 action.accept(nextItem);
893 if (q != null)
894 lastRet = q;
895 }
896
897 public final void remove() {
898 final Node lastRet = this.lastRet;
899 if (lastRet == null)
900 throw new IllegalStateException();
901 this.lastRet = null;
902 if (lastRet.item == null) // already deleted?
903 return;
904 // Advance ancestor, collapsing intervening dead nodes
905 Node pred = ancestor;
906 for (Node p = (pred == null) ? head : pred.next, c = p, q;
907 p != null; ) {
908 if (p == lastRet) {
909 final Object item;
910 if ((item = p.item) != null)
911 p.tryMatch(item, null);
912 if ((q = p.next) == null) q = p;
913 if (c != q) tryCasSuccessor(pred, c, q);
914 ancestor = pred;
915 return;
916 }
917 final Object item; final boolean pAlive;
918 if (pAlive = ((item = p.item) != null && p.isData)) {
919 // exceptionally, nothing to do
920 }
921 else if (!p.isData && item == null)
922 break;
923 if ((c != p && !tryCasSuccessor(pred, c, c = p)) || pAlive) {
924 pred = p;
925 c = p = p.next;
926 }
927 else if (p == (p = p.next)) {
928 pred = null;
929 c = p = head;
930 }
931 }
932 // traversal failed to find lastRet; must have been deleted;
933 // leave ancestor at original location to avoid overshoot;
934 // better luck next time!
935
936 // assert lastRet.isMatched();
937 }
938 }
939
940 /** A customized variant of Spliterators.IteratorSpliterator */
941 final class LTQSpliterator implements Spliterator<E> {
942 static final int MAX_BATCH = 1 << 25; // max batch array size;
943 Node current; // current node; null until initialized
944 int batch; // batch size for splits
945 boolean exhausted; // true when no more nodes
946 LTQSpliterator() {}
947
948 public Spliterator<E> trySplit() {
949 Node p, q;
950 if ((p = current()) == null || (q = p.next) == null)
951 return null;
952 int i = 0, n = batch = Math.min(batch + 1, MAX_BATCH);
953 Object[] a = null;
954 do {
955 final Object item = p.item;
956 if (p.isData) {
957 if (item != null) {
958 if (a == null)
959 a = new Object[n];
960 a[i++] = item;
961 }
962 } else if (item == null) {
963 p = null;
964 break;
965 }
966 if (p == (p = q))
967 p = firstDataNode();
968 } while (p != null && (q = p.next) != null && i < n);
969 setCurrent(p);
970 return (i == 0) ? null :
971 Spliterators.spliterator(a, 0, i, (Spliterator.ORDERED |
972 Spliterator.NONNULL |
973 Spliterator.CONCURRENT));
974 }
975
976 public void forEachRemaining(Consumer<? super E> action) {
977 Objects.requireNonNull(action);
978 final Node p;
979 if ((p = current()) != null) {
980 current = null;
981 exhausted = true;
982 forEachFrom(action, p);
983 }
984 }
985
986 @SuppressWarnings("unchecked")
987 public boolean tryAdvance(Consumer<? super E> action) {
988 Objects.requireNonNull(action);
989 Node p;
990 if ((p = current()) != null) {
991 E e = null;
992 do {
993 final Object item = p.item;
994 final boolean isData = p.isData;
995 if (p == (p = p.next))
996 p = head;
997 if (isData) {
998 if (item != null) {
999 e = (E) item;
1000 break;
1001 }
1002 }
1003 else if (item == null)
1004 p = null;
1005 } while (p != null);
1006 setCurrent(p);
1007 if (e != null) {
1008 action.accept(e);
1009 return true;
1010 }
1011 }
1012 return false;
1013 }
1014
1015 private void setCurrent(Node p) {
1016 if ((current = p) == null)
1017 exhausted = true;
1018 }
1019
1020 private Node current() {
1021 Node p;
1022 if ((p = current) == null && !exhausted)
1023 setCurrent(p = firstDataNode());
1024 return p;
1025 }
1026
1027 public long estimateSize() { return Long.MAX_VALUE; }
1028
1029 public int characteristics() {
1030 return (Spliterator.ORDERED |
1031 Spliterator.NONNULL |
1032 Spliterator.CONCURRENT);
1033 }
1034 }
1035
1036 /**
1037 * Returns a {@link Spliterator} over the elements in this queue.
1038 *
1039 * <p>The returned spliterator is
1040 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1041 *
1042 * <p>The {@code Spliterator} reports {@link Spliterator#CONCURRENT},
1043 * {@link Spliterator#ORDERED}, and {@link Spliterator#NONNULL}.
1044 *
1045 * @implNote
1046 * The {@code Spliterator} implements {@code trySplit} to permit limited
1047 * parallelism.
1048 *
1049 * @return a {@code Spliterator} over the elements in this queue
1050 * @since 1.8
1051 */
1052 public Spliterator<E> spliterator() {
1053 return new LTQSpliterator();
1054 }
1055
1056 /* -------------- Removal methods -------------- */
1057
1058 /**
1059 * Unsplices (now or later) the given deleted/cancelled node with
1060 * the given predecessor.
1061 *
1062 * @param pred a node that was at one time known to be the
1063 * predecessor of s
1064 * @param s the node to be unspliced
1065 */
1066 final void unsplice(Node pred, Node s) {
1067 // assert pred != null;
1068 // assert pred != s;
1069 // assert s != null;
1070 // assert s.isMatched();
1071 // assert (SWEEP_THRESHOLD & (SWEEP_THRESHOLD - 1)) == 0;
1072 s.waiter = null; // disable signals
1073 /*
1074 * See above for rationale. Briefly: if pred still points to
1075 * s, try to unlink s. If s cannot be unlinked, because it is
1076 * trailing node or pred might be unlinked, and neither pred
1077 * nor s are head or offlist, set needSweep;
1078 */
1079 if (pred != null && pred.next == s) {
1080 Node n = s.next;
1081 if (n == null ||
1082 (n != s && pred.casNext(s, n) && pred.isMatched())) {
1083 for (;;) { // check if at, or could be, head
1084 Node h = head;
1085 if (h == pred || h == s)
1086 return; // at head or list empty
1087 if (!h.isMatched())
1088 break;
1089 Node hn = h.next;
1090 if (hn == null)
1091 return; // now empty
1092 if (hn != h && casHead(h, hn))
1093 h.selfLink(); // advance head
1094 }
1095 if (pred.next != pred && s.next != s)
1096 needSweep = true;
1097 }
1098 }
1099 }
1100
1101 /**
1102 * Unlinks matched (typically cancelled) nodes encountered in a
1103 * traversal from head.
1104 */
1105 private void sweep() {
1106 needSweep = false;
1107 for (Node p = head, s, n; p != null && (s = p.next) != null; ) {
1108 if (!s.isMatched())
1109 // Unmatched nodes are never self-linked
1110 p = s;
1111 else if ((n = s.next) == null) // trailing node is pinned
1112 break;
1113 else if (s == n) // stale
1114 // No need to also check for p == s, since that implies s == n
1115 p = head;
1116 else
1117 p.casNext(s, n);
1118 }
1119 }
1120
1121 /**
1122 * Creates an initially empty {@code LinkedTransferQueue}.
1123 */
1124 public LinkedTransferQueue() {
1125 head = tail = new Node();
1126 }
1127
1128 /**
1129 * Creates a {@code LinkedTransferQueue}
1130 * initially containing the elements of the given collection,
1131 * added in traversal order of the collection's iterator.
1132 *
1133 * @param c the collection of elements to initially contain
1134 * @throws NullPointerException if the specified collection or any
1135 * of its elements are null
1136 */
1137 public LinkedTransferQueue(Collection<? extends E> c) {
1138 Node h = null, t = null;
1139 for (E e : c) {
1140 Node newNode = new Node(Objects.requireNonNull(e));
1141 if (h == null)
1142 h = t = newNode;
1143 else
1144 t.appendRelaxed(t = newNode);
1145 }
1146 if (h == null)
1147 h = t = new Node();
1148 head = h;
1149 tail = t;
1150 }
1151
1152 /**
1153 * Inserts the specified element at the tail of this queue.
1154 * As the queue is unbounded, this method will never block.
1155 *
1156 * @throws NullPointerException if the specified element is null
1157 */
1158 public void put(E e) {
1159 xfer(e, true, ASYNC, 0L);
1160 }
1161
1162 /**
1163 * Inserts the specified element at the tail of this queue.
1164 * As the queue is unbounded, this method will never block or
1165 * return {@code false}.
1166 *
1167 * @return {@code true} (as specified by
1168 * {@link BlockingQueue#offer(Object,long,TimeUnit) BlockingQueue.offer})
1169 * @throws NullPointerException if the specified element is null
1170 */
1171 public boolean offer(E e, long timeout, TimeUnit unit) {
1172 xfer(e, true, ASYNC, 0L);
1173 return true;
1174 }
1175
1176 /**
1177 * Inserts the specified element at the tail of this queue.
1178 * As the queue is unbounded, this method will never return {@code false}.
1179 *
1180 * @return {@code true} (as specified by {@link Queue#offer})
1181 * @throws NullPointerException if the specified element is null
1182 */
1183 public boolean offer(E e) {
1184 xfer(e, true, ASYNC, 0L);
1185 return true;
1186 }
1187
1188 /**
1189 * Inserts the specified element at the tail of this queue.
1190 * As the queue is unbounded, this method will never throw
1191 * {@link IllegalStateException} or return {@code false}.
1192 *
1193 * @return {@code true} (as specified by {@link Collection#add})
1194 * @throws NullPointerException if the specified element is null
1195 */
1196 public boolean add(E e) {
1197 xfer(e, true, ASYNC, 0L);
1198 return true;
1199 }
1200
1201 /**
1202 * Transfers the element to a waiting consumer immediately, if possible.
1203 *
1204 * <p>More precisely, transfers the specified element immediately
1205 * if there exists a consumer already waiting to receive it (in
1206 * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
1207 * otherwise returning {@code false} without enqueuing the element.
1208 *
1209 * @throws NullPointerException if the specified element is null
1210 */
1211 public boolean tryTransfer(E e) {
1212 return xfer(e, true, NOW, 0L) == null;
1213 }
1214
1215 /**
1216 * Transfers the element to a consumer, waiting if necessary to do so.
1217 *
1218 * <p>More precisely, transfers the specified element immediately
1219 * if there exists a consumer already waiting to receive it (in
1220 * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
1221 * else inserts the specified element at the tail of this queue
1222 * and waits until the element is received by a consumer.
1223 *
1224 * @throws NullPointerException if the specified element is null
1225 */
1226 public void transfer(E e) throws InterruptedException {
1227 if (xfer(e, true, SYNC, 0L) != null) {
1228 Thread.interrupted(); // failure possible only due to interrupt
1229 throw new InterruptedException();
1230 }
1231 }
1232
1233 /**
1234 * Transfers the element to a consumer if it is possible to do so
1235 * before the timeout elapses.
1236 *
1237 * <p>More precisely, transfers the specified element immediately
1238 * if there exists a consumer already waiting to receive it (in
1239 * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
1240 * else inserts the specified element at the tail of this queue
1241 * and waits until the element is received by a consumer,
1242 * returning {@code false} if the specified wait time elapses
1243 * before the element can be transferred.
1244 *
1245 * @throws NullPointerException if the specified element is null
1246 */
1247 public boolean tryTransfer(E e, long timeout, TimeUnit unit)
1248 throws InterruptedException {
1249 if (xfer(e, true, TIMED, unit.toNanos(timeout)) == null)
1250 return true;
1251 if (!Thread.interrupted())
1252 return false;
1253 throw new InterruptedException();
1254 }
1255
1256 public E take() throws InterruptedException {
1257 E e = xfer(null, false, SYNC, 0L);
1258 if (e != null)
1259 return e;
1260 Thread.interrupted();
1261 throw new InterruptedException();
1262 }
1263
1264 public E poll(long timeout, TimeUnit unit) throws InterruptedException {
1265 E e = xfer(null, false, TIMED, unit.toNanos(timeout));
1266 if (e != null || !Thread.interrupted())
1267 return e;
1268 throw new InterruptedException();
1269 }
1270
1271 public E poll() {
1272 return xfer(null, false, NOW, 0L);
1273 }
1274
1275 /**
1276 * @throws NullPointerException {@inheritDoc}
1277 * @throws IllegalArgumentException {@inheritDoc}
1278 */
1279 public int drainTo(Collection<? super E> c) {
1280 Objects.requireNonNull(c);
1281 if (c == this)
1282 throw new IllegalArgumentException();
1283 int n = 0;
1284 for (E e; (e = poll()) != null; n++)
1285 c.add(e);
1286 return n;
1287 }
1288
1289 /**
1290 * @throws NullPointerException {@inheritDoc}
1291 * @throws IllegalArgumentException {@inheritDoc}
1292 */
1293 public int drainTo(Collection<? super E> c, int maxElements) {
1294 Objects.requireNonNull(c);
1295 if (c == this)
1296 throw new IllegalArgumentException();
1297 int n = 0;
1298 for (E e; n < maxElements && (e = poll()) != null; n++)
1299 c.add(e);
1300 return n;
1301 }
1302
1303 /**
1304 * Returns an iterator over the elements in this queue in proper sequence.
1305 * The elements will be returned in order from first (head) to last (tail).
1306 *
1307 * <p>The returned iterator is
1308 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1309 *
1310 * @return an iterator over the elements in this queue in proper sequence
1311 */
1312 public Iterator<E> iterator() {
1313 return new Itr();
1314 }
1315
1316 public E peek() {
1317 restartFromHead: for (;;) {
1318 for (Node p = head; p != null;) {
1319 Object item = p.item;
1320 if (p.isData) {
1321 if (item != null) {
1322 @SuppressWarnings("unchecked") E e = (E) item;
1323 return e;
1324 }
1325 }
1326 else if (item == null)
1327 break;
1328 if (p == (p = p.next))
1329 continue restartFromHead;
1330 }
1331 return null;
1332 }
1333 }
1334
1335 /**
1336 * Returns {@code true} if this queue contains no elements.
1337 *
1338 * @return {@code true} if this queue contains no elements
1339 */
1340 public boolean isEmpty() {
1341 return firstDataNode() == null;
1342 }
1343
1344 public boolean hasWaitingConsumer() {
1345 restartFromHead: for (;;) {
1346 for (Node p = head; p != null;) {
1347 Object item = p.item;
1348 if (p.isData) {
1349 if (item != null)
1350 break;
1351 }
1352 else if (item == null)
1353 return true;
1354 if (p == (p = p.next))
1355 continue restartFromHead;
1356 }
1357 return false;
1358 }
1359 }
1360
1361 /**
1362 * Returns the number of elements in this queue. If this queue
1363 * contains more than {@code Integer.MAX_VALUE} elements, returns
1364 * {@code Integer.MAX_VALUE}.
1365 *
1366 * <p>Beware that, unlike in most collections, this method is
1367 * <em>NOT</em> a constant-time operation. Because of the
1368 * asynchronous nature of these queues, determining the current
1369 * number of elements requires an O(n) traversal.
1370 *
1371 * @return the number of elements in this queue
1372 */
1373 public int size() {
1374 return countOfMode(true);
1375 }
1376
1377 public int getWaitingConsumerCount() {
1378 return countOfMode(false);
1379 }
1380
1381 /**
1382 * Removes a single instance of the specified element from this queue,
1383 * if it is present. More formally, removes an element {@code e} such
1384 * that {@code o.equals(e)}, if this queue contains one or more such
1385 * elements.
1386 * Returns {@code true} if this queue contained the specified element
1387 * (or equivalently, if this queue changed as a result of the call).
1388 *
1389 * @param o element to be removed from this queue, if present
1390 * @return {@code true} if this queue changed as a result of the call
1391 */
1392 public boolean remove(Object o) {
1393 if (o == null) return false;
1394 restartFromHead: for (;;) {
1395 for (Node p = head, pred = null; p != null; ) {
1396 Node q = p.next;
1397 final Object item;
1398 if ((item = p.item) != null) {
1399 if (p.isData) {
1400 if (o.equals(item) && p.tryMatch(item, null)) {
1401 skipDeadNodes(pred, p, p, q);
1402 return true;
1403 }
1404 pred = p; p = q; continue;
1405 }
1406 }
1407 else if (!p.isData)
1408 break;
1409 for (Node c = p;; q = p.next) {
1410 if (q == null || !q.isMatched()) {
1411 pred = skipDeadNodes(pred, c, p, q); p = q; break;
1412 }
1413 if (p == (p = q)) continue restartFromHead;
1414 }
1415 }
1416 return false;
1417 }
1418 }
1419
1420 /**
1421 * Returns {@code true} if this queue contains the specified element.
1422 * More formally, returns {@code true} if and only if this queue contains
1423 * at least one element {@code e} such that {@code o.equals(e)}.
1424 *
1425 * @param o object to be checked for containment in this queue
1426 * @return {@code true} if this queue contains the specified element
1427 */
1428 public boolean contains(Object o) {
1429 if (o == null) return false;
1430 restartFromHead: for (;;) {
1431 for (Node p = head, pred = null; p != null; ) {
1432 Node q = p.next;
1433 final Object item;
1434 if ((item = p.item) != null) {
1435 if (p.isData) {
1436 if (o.equals(item))
1437 return true;
1438 pred = p; p = q; continue;
1439 }
1440 }
1441 else if (!p.isData)
1442 break;
1443 for (Node c = p;; q = p.next) {
1444 if (q == null || !q.isMatched()) {
1445 pred = skipDeadNodes(pred, c, p, q); p = q; break;
1446 }
1447 if (p == (p = q)) continue restartFromHead;
1448 }
1449 }
1450 return false;
1451 }
1452 }
1453
1454 /**
1455 * Always returns {@code Integer.MAX_VALUE} because a
1456 * {@code LinkedTransferQueue} is not capacity constrained.
1457 *
1458 * @return {@code Integer.MAX_VALUE} (as specified by
1459 * {@link BlockingQueue#remainingCapacity()})
1460 */
1461 public int remainingCapacity() {
1462 return Integer.MAX_VALUE;
1463 }
1464
1465 /**
1466 * Saves this queue to a stream (that is, serializes it).
1467 *
1468 * @param s the stream
1469 * @throws java.io.IOException if an I/O error occurs
1470 * @serialData All of the elements (each an {@code E}) in
1471 * the proper order, followed by a null
1472 */
1473 private void writeObject(java.io.ObjectOutputStream s)
1474 throws java.io.IOException {
1475 s.defaultWriteObject();
1476 for (E e : this)
1477 s.writeObject(e);
1478 // Use trailing null as sentinel
1479 s.writeObject(null);
1480 }
1481
1482 /**
1483 * Reconstitutes this queue from a stream (that is, deserializes it).
1484 * @param s the stream
1485 * @throws ClassNotFoundException if the class of a serialized object
1486 * could not be found
1487 * @throws java.io.IOException if an I/O error occurs
1488 */
1489 private void readObject(java.io.ObjectInputStream s)
1490 throws java.io.IOException, ClassNotFoundException {
1491
1492 // Read in elements until trailing null sentinel found
1493 Node h = null, t = null;
1494 for (Object item; (item = s.readObject()) != null; ) {
1495 Node newNode = new Node(item);
1496 if (h == null)
1497 h = t = newNode;
1498 else
1499 t.appendRelaxed(t = newNode);
1500 }
1501 if (h == null)
1502 h = t = new Node();
1503 head = h;
1504 tail = t;
1505 }
1506
1507 /**
1508 * @throws NullPointerException {@inheritDoc}
1509 */
1510 public boolean removeIf(Predicate<? super E> filter) {
1511 Objects.requireNonNull(filter);
1512 return bulkRemove(filter);
1513 }
1514
1515 /**
1516 * @throws NullPointerException {@inheritDoc}
1517 */
1518 public boolean removeAll(Collection<?> c) {
1519 Objects.requireNonNull(c);
1520 return bulkRemove(e -> c.contains(e));
1521 }
1522
1523 /**
1524 * @throws NullPointerException {@inheritDoc}
1525 */
1526 public boolean retainAll(Collection<?> c) {
1527 Objects.requireNonNull(c);
1528 return bulkRemove(e -> !c.contains(e));
1529 }
1530
1531 public void clear() {
1532 bulkRemove(e -> true);
1533 }
1534
1535 /**
1536 * Tolerate this many consecutive dead nodes before CAS-collapsing.
1537 * Amortized cost of clear() is (1 + 1/MAX_HOPS) CASes per element.
1538 */
1539 private static final int MAX_HOPS = 8;
1540
1541 /** Implementation of bulk remove methods. */
1542 @SuppressWarnings("unchecked")
1543 private boolean bulkRemove(Predicate<? super E> filter) {
1544 boolean removed = false;
1545 restartFromHead: for (;;) {
1546 int hops = MAX_HOPS;
1547 // c will be CASed to collapse intervening dead nodes between
1548 // pred (or head if null) and p.
1549 for (Node p = head, c = p, pred = null, q; p != null; p = q) {
1550 q = p.next;
1551 final Object item; boolean pAlive;
1552 if (pAlive = ((item = p.item) != null && p.isData)) {
1553 if (filter.test((E) item)) {
1554 if (p.tryMatch(item, null))
1555 removed = true;
1556 pAlive = false;
1557 }
1558 }
1559 else if (!p.isData && item == null)
1560 break;
1561 if (pAlive || q == null || --hops == 0) {
1562 // p might already be self-linked here, but if so:
1563 // - CASing head will surely fail
1564 // - CASing pred's next will be useless but harmless.
1565 if ((c != p && !tryCasSuccessor(pred, c, c = p))
1566 || pAlive) {
1567 // if CAS failed or alive, abandon old pred
1568 hops = MAX_HOPS;
1569 pred = p;
1570 c = q;
1571 }
1572 } else if (p == q)
1573 continue restartFromHead;
1574 }
1575 return removed;
1576 }
1577 }
1578
1579 /**
1580 * Runs action on each element found during a traversal starting at p.
1581 * If p is null, the action is not run.
1582 */
1583 @SuppressWarnings("unchecked")
1584 void forEachFrom(Consumer<? super E> action, Node p) {
1585 for (Node pred = null; p != null; ) {
1586 Node q = p.next;
1587 final Object item;
1588 if ((item = p.item) != null) {
1589 if (p.isData) {
1590 action.accept((E) item);
1591 pred = p; p = q; continue;
1592 }
1593 }
1594 else if (!p.isData)
1595 break;
1596 for (Node c = p;; q = p.next) {
1597 if (q == null || !q.isMatched()) {
1598 pred = skipDeadNodes(pred, c, p, q); p = q; break;
1599 }
1600 if (p == (p = q)) { pred = null; p = head; break; }
1601 }
1602 }
1603 }
1604
1605 /**
1606 * @throws NullPointerException {@inheritDoc}
1607 */
1608 public void forEach(Consumer<? super E> action) {
1609 Objects.requireNonNull(action);
1610 forEachFrom(action, head);
1611 }
1612
1613 // VarHandle mechanics
1614 private static final VarHandle HEAD;
1615 private static final VarHandle TAIL;
1616 static final VarHandle ITEM;
1617 static final VarHandle NEXT;
1618 static final VarHandle WAITER;
1619 static {
1620 try {
1621 MethodHandles.Lookup l = MethodHandles.lookup();
1622 HEAD = l.findVarHandle(LinkedTransferQueue.class, "head",
1623 Node.class);
1624 TAIL = l.findVarHandle(LinkedTransferQueue.class, "tail",
1625 Node.class);
1626 ITEM = l.findVarHandle(Node.class, "item", Object.class);
1627 NEXT = l.findVarHandle(Node.class, "next", Node.class);
1628 WAITER = l.findVarHandle(Node.class, "waiter", Thread.class);
1629 } catch (ReflectiveOperationException e) {
1630 throw new ExceptionInInitializerError(e);
1631 }
1632
1633 // Reduce the risk of rare disastrous classloading in first call to
1634 // LockSupport.park: https://bugs.openjdk.java.net/browse/JDK-8074773
1635 Class<?> ensureLoaded = LockSupport.class;
1636 }
1637 }