ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/main/java/util/concurrent/LinkedTransferQueue.java
Revision: 1.133
Committed: Mon Jan 2 00:10:14 2017 UTC (7 years, 5 months ago) by jsr166
Branch: MAIN
Changes since 1.132: +2 -0 lines
Log Message:
add assertions that we never cas-unlink the trailing node

File Contents

# Content
1 /*
2 * Written by Doug Lea with assistance from members of JCP JSR-166
3 * Expert Group and released to the public domain, as explained at
4 * http://creativecommons.org/publicdomain/zero/1.0/
5 */
6
7 package java.util.concurrent;
8
9 import java.lang.invoke.MethodHandles;
10 import java.lang.invoke.VarHandle;
11 import java.util.AbstractQueue;
12 import java.util.Arrays;
13 import java.util.Collection;
14 import java.util.Iterator;
15 import java.util.NoSuchElementException;
16 import java.util.Objects;
17 import java.util.Queue;
18 import java.util.Spliterator;
19 import java.util.Spliterators;
20 import java.util.concurrent.locks.LockSupport;
21 import java.util.function.Consumer;
22 import java.util.function.Predicate;
23
24 /**
25 * An unbounded {@link TransferQueue} based on linked nodes.
26 * This queue orders elements FIFO (first-in-first-out) with respect
27 * to any given producer. The <em>head</em> of the queue is that
28 * element that has been on the queue the longest time for some
29 * producer. The <em>tail</em> of the queue is that element that has
30 * been on the queue the shortest time for some producer.
31 *
32 * <p>Beware that, unlike in most collections, the {@code size} method
33 * is <em>NOT</em> a constant-time operation. Because of the
34 * asynchronous nature of these queues, determining the current number
35 * of elements requires a traversal of the elements, and so may report
36 * inaccurate results if this collection is modified during traversal.
37 *
38 * <p>Bulk operations that add, remove, or examine multiple elements,
39 * such as {@link #addAll}, {@link #removeIf} or {@link #forEach},
40 * are <em>not</em> guaranteed to be performed atomically.
41 * For example, a {@code forEach} traversal concurrent with an {@code
42 * addAll} operation might observe only some of the added elements.
43 *
44 * <p>This class and its iterator implement all of the <em>optional</em>
45 * methods of the {@link Collection} and {@link Iterator} interfaces.
46 *
47 * <p>Memory consistency effects: As with other concurrent
48 * collections, actions in a thread prior to placing an object into a
49 * {@code LinkedTransferQueue}
50 * <a href="package-summary.html#MemoryVisibility"><i>happen-before</i></a>
51 * actions subsequent to the access or removal of that element from
52 * the {@code LinkedTransferQueue} in another thread.
53 *
54 * <p>This class is a member of the
55 * <a href="{@docRoot}/../technotes/guides/collections/index.html">
56 * Java Collections Framework</a>.
57 *
58 * @since 1.7
59 * @author Doug Lea
60 * @param <E> the type of elements held in this queue
61 */
62 public class LinkedTransferQueue<E> extends AbstractQueue<E>
63 implements TransferQueue<E>, java.io.Serializable {
64 private static final long serialVersionUID = -3223113410248163686L;
65
66 /*
67 * *** Overview of Dual Queues with Slack ***
68 *
69 * Dual Queues, introduced by Scherer and Scott
70 * (http://www.cs.rochester.edu/~scott/papers/2004_DISC_dual_DS.pdf)
71 * are (linked) queues in which nodes may represent either data or
72 * requests. When a thread tries to enqueue a data node, but
73 * encounters a request node, it instead "matches" and removes it;
74 * and vice versa for enqueuing requests. Blocking Dual Queues
75 * arrange that threads enqueuing unmatched requests block until
76 * other threads provide the match. Dual Synchronous Queues (see
77 * Scherer, Lea, & Scott
78 * http://www.cs.rochester.edu/u/scott/papers/2009_Scherer_CACM_SSQ.pdf)
79 * additionally arrange that threads enqueuing unmatched data also
80 * block. Dual Transfer Queues support all of these modes, as
81 * dictated by callers.
82 *
83 * A FIFO dual queue may be implemented using a variation of the
84 * Michael & Scott (M&S) lock-free queue algorithm
85 * (http://www.cs.rochester.edu/~scott/papers/1996_PODC_queues.pdf).
86 * It maintains two pointer fields, "head", pointing to a
87 * (matched) node that in turn points to the first actual
88 * (unmatched) queue node (or null if empty); and "tail" that
89 * points to the last node on the queue (or again null if
90 * empty). For example, here is a possible queue with four data
91 * elements:
92 *
93 * head tail
94 * | |
95 * v v
96 * M -> U -> U -> U -> U
97 *
98 * The M&S queue algorithm is known to be prone to scalability and
99 * overhead limitations when maintaining (via CAS) these head and
100 * tail pointers. This has led to the development of
101 * contention-reducing variants such as elimination arrays (see
102 * Moir et al http://portal.acm.org/citation.cfm?id=1074013) and
103 * optimistic back pointers (see Ladan-Mozes & Shavit
104 * http://people.csail.mit.edu/edya/publications/OptimisticFIFOQueue-journal.pdf).
105 * However, the nature of dual queues enables a simpler tactic for
106 * improving M&S-style implementations when dual-ness is needed.
107 *
108 * In a dual queue, each node must atomically maintain its match
109 * status. While there are other possible variants, we implement
110 * this here as: for a data-mode node, matching entails CASing an
111 * "item" field from a non-null data value to null upon match, and
112 * vice-versa for request nodes, CASing from null to a data
113 * value. (Note that the linearization properties of this style of
114 * queue are easy to verify -- elements are made available by
115 * linking, and unavailable by matching.) Compared to plain M&S
116 * queues, this property of dual queues requires one additional
117 * successful atomic operation per enq/deq pair. But it also
118 * enables lower cost variants of queue maintenance mechanics. (A
119 * variation of this idea applies even for non-dual queues that
120 * support deletion of interior elements, such as
121 * j.u.c.ConcurrentLinkedQueue.)
122 *
123 * Once a node is matched, its match status can never again
124 * change. We may thus arrange that the linked list of them
125 * contain a prefix of zero or more matched nodes, followed by a
126 * suffix of zero or more unmatched nodes. (Note that we allow
127 * both the prefix and suffix to be zero length, which in turn
128 * means that we do not use a dummy header.) If we were not
129 * concerned with either time or space efficiency, we could
130 * correctly perform enqueue and dequeue operations by traversing
131 * from a pointer to the initial node; CASing the item of the
132 * first unmatched node on match and CASing the next field of the
133 * trailing node on appends. (Plus some special-casing when
134 * initially empty). While this would be a terrible idea in
135 * itself, it does have the benefit of not requiring ANY atomic
136 * updates on head/tail fields.
137 *
138 * We introduce here an approach that lies between the extremes of
139 * never versus always updating queue (head and tail) pointers.
140 * This offers a tradeoff between sometimes requiring extra
141 * traversal steps to locate the first and/or last unmatched
142 * nodes, versus the reduced overhead and contention of fewer
143 * updates to queue pointers. For example, a possible snapshot of
144 * a queue is:
145 *
146 * head tail
147 * | |
148 * v v
149 * M -> M -> U -> U -> U -> U
150 *
151 * The best value for this "slack" (the targeted maximum distance
152 * between the value of "head" and the first unmatched node, and
153 * similarly for "tail") is an empirical matter. We have found
154 * that using very small constants in the range of 1-3 work best
155 * over a range of platforms. Larger values introduce increasing
156 * costs of cache misses and risks of long traversal chains, while
157 * smaller values increase CAS contention and overhead.
158 *
159 * Dual queues with slack differ from plain M&S dual queues by
160 * virtue of only sometimes updating head or tail pointers when
161 * matching, appending, or even traversing nodes; in order to
162 * maintain a targeted slack. The idea of "sometimes" may be
163 * operationalized in several ways. The simplest is to use a
164 * per-operation counter incremented on each traversal step, and
165 * to try (via CAS) to update the associated queue pointer
166 * whenever the count exceeds a threshold. Another, that requires
167 * more overhead, is to use random number generators to update
168 * with a given probability per traversal step.
169 *
170 * In any strategy along these lines, because CASes updating
171 * fields may fail, the actual slack may exceed targeted slack.
172 * However, they may be retried at any time to maintain targets.
173 * Even when using very small slack values, this approach works
174 * well for dual queues because it allows all operations up to the
175 * point of matching or appending an item (hence potentially
176 * allowing progress by another thread) to be read-only, thus not
177 * introducing any further contention. As described below, we
178 * implement this by performing slack maintenance retries only
179 * after these points.
180 *
181 * As an accompaniment to such techniques, traversal overhead can
182 * be further reduced without increasing contention of head
183 * pointer updates: Threads may sometimes shortcut the "next" link
184 * path from the current "head" node to be closer to the currently
185 * known first unmatched node, and similarly for tail. Again, this
186 * may be triggered with using thresholds or randomization.
187 *
188 * These ideas must be further extended to avoid unbounded amounts
189 * of costly-to-reclaim garbage caused by the sequential "next"
190 * links of nodes starting at old forgotten head nodes: As first
191 * described in detail by Boehm
192 * (http://portal.acm.org/citation.cfm?doid=503272.503282), if a GC
193 * delays noticing that any arbitrarily old node has become
194 * garbage, all newer dead nodes will also be unreclaimed.
195 * (Similar issues arise in non-GC environments.) To cope with
196 * this in our implementation, upon CASing to advance the head
197 * pointer, we set the "next" link of the previous head to point
198 * only to itself; thus limiting the length of chains of dead nodes.
199 * (We also take similar care to wipe out possibly garbage
200 * retaining values held in other Node fields.) However, doing so
201 * adds some further complexity to traversal: If any "next"
202 * pointer links to itself, it indicates that the current thread
203 * has lagged behind a head-update, and so the traversal must
204 * continue from the "head". Traversals trying to find the
205 * current tail starting from "tail" may also encounter
206 * self-links, in which case they also continue at "head".
207 *
208 * It is tempting in slack-based scheme to not even use CAS for
209 * updates (similarly to Ladan-Mozes & Shavit). However, this
210 * cannot be done for head updates under the above link-forgetting
211 * mechanics because an update may leave head at a detached node.
212 * And while direct writes are possible for tail updates, they
213 * increase the risk of long retraversals, and hence long garbage
214 * chains, which can be much more costly than is worthwhile
215 * considering that the cost difference of performing a CAS vs
216 * write is smaller when they are not triggered on each operation
217 * (especially considering that writes and CASes equally require
218 * additional GC bookkeeping ("write barriers") that are sometimes
219 * more costly than the writes themselves because of contention).
220 *
221 * *** Overview of implementation ***
222 *
223 * We use a threshold-based approach to updates, with a slack
224 * threshold of two -- that is, we update head/tail when the
225 * current pointer appears to be two or more steps away from the
226 * first/last node. The slack value is hard-wired: a path greater
227 * than one is naturally implemented by checking equality of
228 * traversal pointers except when the list has only one element,
229 * in which case we keep slack threshold at one. Avoiding tracking
230 * explicit counts across method calls slightly simplifies an
231 * already-messy implementation. Using randomization would
232 * probably work better if there were a low-quality dirt-cheap
233 * per-thread one available, but even ThreadLocalRandom is too
234 * heavy for these purposes.
235 *
236 * With such a small slack threshold value, it is not worthwhile
237 * to augment this with path short-circuiting (i.e., unsplicing
238 * interior nodes) except in the case of cancellation/removal (see
239 * below).
240 *
241 * We allow both the head and tail fields to be null before any
242 * nodes are enqueued; initializing upon first append. This
243 * simplifies some other logic, as well as providing more
244 * efficient explicit control paths instead of letting JVMs insert
245 * implicit NullPointerExceptions when they are null. While not
246 * currently fully implemented, we also leave open the possibility
247 * of re-nulling these fields when empty (which is complicated to
248 * arrange, for little benefit.)
249 *
250 * All enqueue/dequeue operations are handled by the single method
251 * "xfer" with parameters indicating whether to act as some form
252 * of offer, put, poll, take, or transfer (each possibly with
253 * timeout). The relative complexity of using one monolithic
254 * method outweighs the code bulk and maintenance problems of
255 * using separate methods for each case.
256 *
257 * Operation consists of up to three phases. The first is
258 * implemented within method xfer, the second in tryAppend, and
259 * the third in method awaitMatch.
260 *
261 * 1. Try to match an existing node
262 *
263 * Starting at head, skip already-matched nodes until finding
264 * an unmatched node of opposite mode, if one exists, in which
265 * case matching it and returning, also if necessary updating
266 * head to one past the matched node (or the node itself if the
267 * list has no other unmatched nodes). If the CAS misses, then
268 * a loop retries advancing head by two steps until either
269 * success or the slack is at most two. By requiring that each
270 * attempt advances head by two (if applicable), we ensure that
271 * the slack does not grow without bound. Traversals also check
272 * if the initial head is now off-list, in which case they
273 * restart at the new head.
274 *
275 * If no candidates are found and the call was untimed
276 * poll/offer (argument "how" is NOW), return.
277 *
278 * 2. Try to append a new node (method tryAppend)
279 *
280 * Starting at current tail pointer, find the actual last node
281 * and try to append a new node (or if head was null, establish
282 * the first node). Nodes can be appended only if their
283 * predecessors are either already matched or are of the same
284 * mode. If we detect otherwise, then a new node with opposite
285 * mode must have been appended during traversal, so we must
286 * restart at phase 1. The traversal and update steps are
287 * otherwise similar to phase 1: Retrying upon CAS misses and
288 * checking for staleness. In particular, if a self-link is
289 * encountered, then we can safely jump to a node on the list
290 * by continuing the traversal at current head.
291 *
292 * On successful append, if the call was ASYNC, return.
293 *
294 * 3. Await match or cancellation (method awaitMatch)
295 *
296 * Wait for another thread to match node; instead cancelling if
297 * the current thread was interrupted or the wait timed out. On
298 * multiprocessors, we use front-of-queue spinning: If a node
299 * appears to be the first unmatched node in the queue, it
300 * spins a bit before blocking. In either case, before blocking
301 * it tries to unsplice any nodes between the current "head"
302 * and the first unmatched node.
303 *
304 * Front-of-queue spinning vastly improves performance of
305 * heavily contended queues. And so long as it is relatively
306 * brief and "quiet", spinning does not much impact performance
307 * of less-contended queues. During spins threads check their
308 * interrupt status and generate a thread-local random number
309 * to decide to occasionally perform a Thread.yield. While
310 * yield has underdefined specs, we assume that it might help,
311 * and will not hurt, in limiting impact of spinning on busy
312 * systems. We also use smaller (1/2) spins for nodes that are
313 * not known to be front but whose predecessors have not
314 * blocked -- these "chained" spins avoid artifacts of
315 * front-of-queue rules which otherwise lead to alternating
316 * nodes spinning vs blocking. Further, front threads that
317 * represent phase changes (from data to request node or vice
318 * versa) compared to their predecessors receive additional
319 * chained spins, reflecting longer paths typically required to
320 * unblock threads during phase changes.
321 *
322 *
323 * ** Unlinking removed interior nodes **
324 *
325 * In addition to minimizing garbage retention via self-linking
326 * described above, we also unlink removed interior nodes. These
327 * may arise due to timed out or interrupted waits, or calls to
328 * remove(x) or Iterator.remove. Normally, given a node that was
329 * at one time known to be the predecessor of some node s that is
330 * to be removed, we can unsplice s by CASing the next field of
331 * its predecessor if it still points to s (otherwise s must
332 * already have been removed or is now offlist). But there are two
333 * situations in which we cannot guarantee to make node s
334 * unreachable in this way: (1) If s is the trailing node of list
335 * (i.e., with null next), then it is pinned as the target node
336 * for appends, so can only be removed later after other nodes are
337 * appended. (2) We cannot necessarily unlink s given a
338 * predecessor node that is matched (including the case of being
339 * cancelled): the predecessor may already be unspliced, in which
340 * case some previous reachable node may still point to s.
341 * (For further explanation see Herlihy & Shavit "The Art of
342 * Multiprocessor Programming" chapter 9). Although, in both
343 * cases, we can rule out the need for further action if either s
344 * or its predecessor are (or can be made to be) at, or fall off
345 * from, the head of list.
346 *
347 * Without taking these into account, it would be possible for an
348 * unbounded number of supposedly removed nodes to remain
349 * reachable. Situations leading to such buildup are uncommon but
350 * can occur in practice; for example when a series of short timed
351 * calls to poll repeatedly time out but never otherwise fall off
352 * the list because of an untimed call to take at the front of the
353 * queue.
354 *
355 * When these cases arise, rather than always retraversing the
356 * entire list to find an actual predecessor to unlink (which
357 * won't help for case (1) anyway), we record a conservative
358 * estimate of possible unsplice failures (in "sweepVotes").
359 * We trigger a full sweep when the estimate exceeds a threshold
360 * ("SWEEP_THRESHOLD") indicating the maximum number of estimated
361 * removal failures to tolerate before sweeping through, unlinking
362 * cancelled nodes that were not unlinked upon initial removal.
363 * We perform sweeps by the thread hitting threshold (rather than
364 * background threads or by spreading work to other threads)
365 * because in the main contexts in which removal occurs, the
366 * caller is already timed-out, cancelled, or performing a
367 * potentially O(n) operation (e.g. remove(x)), none of which are
368 * time-critical enough to warrant the overhead that alternatives
369 * would impose on other threads.
370 *
371 * Because the sweepVotes estimate is conservative, and because
372 * nodes become unlinked "naturally" as they fall off the head of
373 * the queue, and because we allow votes to accumulate even while
374 * sweeps are in progress, there are typically significantly fewer
375 * such nodes than estimated. Choice of a threshold value
376 * balances the likelihood of wasted effort and contention, versus
377 * providing a worst-case bound on retention of interior nodes in
378 * quiescent queues. The value defined below was chosen
379 * empirically to balance these under various timeout scenarios.
380 *
381 * Note that we cannot self-link unlinked interior nodes during
382 * sweeps. However, the associated garbage chains terminate when
383 * some successor ultimately falls off the head of the list and is
384 * self-linked.
385 */
386
387 /** True if on multiprocessor */
388 private static final boolean MP =
389 Runtime.getRuntime().availableProcessors() > 1;
390
391 /**
392 * The number of times to spin (with randomly interspersed calls
393 * to Thread.yield) on multiprocessor before blocking when a node
394 * is apparently the first waiter in the queue. See above for
395 * explanation. Must be a power of two. The value is empirically
396 * derived -- it works pretty well across a variety of processors,
397 * numbers of CPUs, and OSes.
398 */
399 private static final int FRONT_SPINS = 1 << 7;
400
401 /**
402 * The number of times to spin before blocking when a node is
403 * preceded by another node that is apparently spinning. Also
404 * serves as an increment to FRONT_SPINS on phase changes, and as
405 * base average frequency for yielding during spins. Must be a
406 * power of two.
407 */
408 private static final int CHAINED_SPINS = FRONT_SPINS >>> 1;
409
410 /**
411 * The maximum number of estimated removal failures (sweepVotes)
412 * to tolerate before sweeping through the queue unlinking
413 * cancelled nodes that were not unlinked upon initial
414 * removal. See above for explanation. The value must be at least
415 * two to avoid useless sweeps when removing trailing nodes.
416 */
417 static final int SWEEP_THRESHOLD = 32;
418
419 /**
420 * Queue nodes. Uses Object, not E, for items to allow forgetting
421 * them after use. Relies heavily on VarHandles to minimize
422 * unnecessary ordering constraints: Writes that are intrinsically
423 * ordered wrt other accesses or CASes use simple relaxed forms.
424 */
425 static final class Node {
426 final boolean isData; // false if this is a request node
427 volatile Object item; // initially non-null if isData; CASed to match
428 volatile Node next;
429 volatile Thread waiter; // null until waiting
430
431 final boolean casNext(Node cmp, Node val) {
432 return NEXT.compareAndSet(this, cmp, val);
433 }
434
435 final boolean casItem(Object cmp, Object val) {
436 // assert isData == (cmp != null);
437 // assert isData == (val == null);
438 // assert !(cmp instanceof Node);
439 return ITEM.compareAndSet(this, cmp, val);
440 }
441
442 /**
443 * Constructs a new node. Uses relaxed write because item can
444 * only be seen after publication via casNext.
445 */
446 Node(Object item) {
447 ITEM.set(this, item);
448 isData = (item != null);
449 }
450
451 /**
452 * Links node to itself to avoid garbage retention. Called
453 * only after CASing head field, so uses relaxed write.
454 */
455 final void forgetNext() {
456 NEXT.setRelease(this, this);
457 }
458
459 /**
460 * Sets item (of a request node) to self and waiter to null,
461 * to avoid garbage retention after matching or cancelling.
462 * Uses relaxed writes because order is already constrained in
463 * the only calling contexts: item is forgotten only after
464 * volatile/atomic mechanics that extract items, and visitors
465 * of request nodes only ever check whether item is null.
466 * Similarly, clearing waiter follows either CAS or return
467 * from park (if ever parked; else we don't care).
468 */
469 final void forgetContents() {
470 // assert isMatched();
471 if (!isData)
472 ITEM.set(this, this);
473 WAITER.set(this, null);
474 }
475
476 /**
477 * Returns true if this node has been matched, including the
478 * case of artificial matches due to cancellation.
479 */
480 final boolean isMatched() {
481 return isData == (item == null);
482 }
483
484 /**
485 * Returns true if a node with the given mode cannot be
486 * appended to this node because this node is unmatched and
487 * has opposite data mode.
488 */
489 final boolean cannotPrecede(boolean haveData) {
490 boolean d = isData;
491 return d != haveData && d != (item == null);
492 }
493
494 /**
495 * Tries to artificially match a data node -- used by remove.
496 */
497 final boolean tryMatchData() {
498 // assert isData;
499 final Object x;
500 if ((x = item) != null && casItem(x, null)) {
501 LockSupport.unpark(waiter);
502 return true;
503 }
504 return false;
505 }
506
507 private static final long serialVersionUID = -3375979862319811754L;
508
509 // VarHandle mechanics
510 private static final VarHandle ITEM;
511 private static final VarHandle NEXT;
512 private static final VarHandle WAITER;
513 static {
514 try {
515 MethodHandles.Lookup l = MethodHandles.lookup();
516 ITEM = l.findVarHandle(Node.class, "item", Object.class);
517 NEXT = l.findVarHandle(Node.class, "next", Node.class);
518 WAITER = l.findVarHandle(Node.class, "waiter", Thread.class);
519 } catch (ReflectiveOperationException e) {
520 throw new Error(e);
521 }
522 }
523 }
524
525 /** head of the queue; null until first enqueue */
526 transient volatile Node head;
527
528 /** tail of the queue; null until first append */
529 private transient volatile Node tail;
530
531 /** The number of apparent failures to unsplice removed nodes */
532 private transient volatile int sweepVotes;
533
534 private boolean casTail(Node cmp, Node val) {
535 return TAIL.compareAndSet(this, cmp, val);
536 }
537
538 private boolean casHead(Node cmp, Node val) {
539 return HEAD.compareAndSet(this, cmp, val);
540 }
541
542 private boolean casSweepVotes(int cmp, int val) {
543 return SWEEPVOTES.compareAndSet(this, cmp, val);
544 }
545
546 /**
547 * Tries to CAS pred.next (or head, if pred is null) from c to p.
548 * Caller must ensure that we're not unlinking the trailing node.
549 */
550 private boolean tryCasSuccessor(Node pred, Node c, Node p) {
551 // assert p != null;
552 // assert c != p;
553 if (pred != null)
554 return pred.casNext(c, p);
555 if (casHead(c, p)) {
556 c.forgetNext();
557 return true;
558 }
559 return false;
560 }
561
562 /*
563 * Possible values for "how" argument in xfer method.
564 */
565 private static final int NOW = 0; // for untimed poll, tryTransfer
566 private static final int ASYNC = 1; // for offer, put, add
567 private static final int SYNC = 2; // for transfer, take
568 private static final int TIMED = 3; // for timed poll, tryTransfer
569
570 /**
571 * Implements all queuing methods. See above for explanation.
572 *
573 * @param e the item or null for take
574 * @param haveData true if this is a put, else a take
575 * @param how NOW, ASYNC, SYNC, or TIMED
576 * @param nanos timeout in nanosecs, used only if mode is TIMED
577 * @return an item if matched, else e
578 * @throws NullPointerException if haveData mode but e is null
579 */
580 private E xfer(E e, boolean haveData, int how, long nanos) {
581 if (haveData && (e == null))
582 throw new NullPointerException();
583 Node s = null; // the node to append, if needed
584
585 restartFromHead: for (;;) {
586 for (Node h = head, p = h; p != null;) { // find & match first node
587 boolean isData = p.isData;
588 Object item = p.item;
589 if ((item != null) == isData) { // unmatched
590 if (isData == haveData) // can't match
591 break;
592 if (p.casItem(item, e)) { // match
593 for (Node q = p; q != h;) {
594 Node n = q.next; // update by 2 unless singleton
595 if (head == h && casHead(h, n == null ? q : n)) {
596 h.forgetNext();
597 break;
598 } // advance and retry
599 if ((h = head) == null ||
600 (q = h.next) == null || !q.isMatched())
601 break; // unless slack < 2
602 }
603 LockSupport.unpark(p.waiter);
604 @SuppressWarnings("unchecked") E itemE = (E) item;
605 return itemE;
606 }
607 }
608 Node n = p.next;
609 p = (p != n) ? n : (h = head); // Use head if p offlist
610 }
611
612 if (how != NOW) { // No matches available
613 if (s == null)
614 s = new Node(e);
615 Node pred = tryAppend(s, haveData);
616 if (pred == null)
617 continue restartFromHead; // lost race vs opposite mode
618 if (how != ASYNC)
619 return awaitMatch(s, pred, e, (how == TIMED), nanos);
620 }
621 return e; // not waiting
622 }
623 }
624
625 /**
626 * Tries to append node s as tail.
627 *
628 * @param s the node to append
629 * @param haveData true if appending in data mode
630 * @return null on failure due to losing race with append in
631 * different mode, else s's predecessor, or s itself if no
632 * predecessor
633 */
634 private Node tryAppend(Node s, boolean haveData) {
635 for (Node t = tail, p = t;;) { // move p to last node and append
636 Node n, u; // temps for reads of next & tail
637 if (p == null && (p = head) == null) {
638 if (casHead(null, s))
639 return s; // initialize
640 }
641 else if (p.cannotPrecede(haveData))
642 return null; // lost race vs opposite mode
643 else if ((n = p.next) != null) // not last; keep traversing
644 p = p != t && t != (u = tail) ? (t = u) : // stale tail
645 (p != n) ? n : null; // restart if off list
646 else if (!p.casNext(null, s))
647 p = p.next; // re-read on CAS failure
648 else {
649 if (p != t) { // update if slack now >= 2
650 while ((tail != t || !casTail(t, s)) &&
651 (t = tail) != null &&
652 (s = t.next) != null && // advance and retry
653 (s = s.next) != null && s != t);
654 }
655 return p;
656 }
657 }
658 }
659
660 /**
661 * Spins/yields/blocks until node s is matched or caller gives up.
662 *
663 * @param s the waiting node
664 * @param pred the predecessor of s, or s itself if it has no
665 * predecessor, or null if unknown (the null case does not occur
666 * in any current calls but may in possible future extensions)
667 * @param e the comparison value for checking match
668 * @param timed if true, wait only until timeout elapses
669 * @param nanos timeout in nanosecs, used only if timed is true
670 * @return matched item, or e if unmatched on interrupt or timeout
671 */
672 private E awaitMatch(Node s, Node pred, E e, boolean timed, long nanos) {
673 final long deadline = timed ? System.nanoTime() + nanos : 0L;
674 Thread w = Thread.currentThread();
675 int spins = -1; // initialized after first item and cancel checks
676 ThreadLocalRandom randomYields = null; // bound if needed
677
678 for (;;) {
679 Object item = s.item;
680 if (item != e) { // matched
681 // assert item != s;
682 s.forgetContents(); // avoid garbage
683 @SuppressWarnings("unchecked") E itemE = (E) item;
684 return itemE;
685 }
686 else if (w.isInterrupted() || (timed && nanos <= 0L)) {
687 // try to cancel and unlink
688 if (s.casItem(e, s.isData ? null : s)) {
689 unsplice(pred, s);
690 return e;
691 }
692 // return normally if lost CAS
693 }
694 else if (spins < 0) { // establish spins at/near front
695 if ((spins = spinsFor(pred, s.isData)) > 0)
696 randomYields = ThreadLocalRandom.current();
697 }
698 else if (spins > 0) { // spin
699 --spins;
700 if (randomYields.nextInt(CHAINED_SPINS) == 0)
701 Thread.yield(); // occasionally yield
702 }
703 else if (s.waiter == null) {
704 s.waiter = w; // request unpark then recheck
705 }
706 else if (timed) {
707 nanos = deadline - System.nanoTime();
708 if (nanos > 0L)
709 LockSupport.parkNanos(this, nanos);
710 }
711 else {
712 LockSupport.park(this);
713 }
714 }
715 }
716
717 /**
718 * Returns spin/yield value for a node with given predecessor and
719 * data mode. See above for explanation.
720 */
721 private static int spinsFor(Node pred, boolean haveData) {
722 if (MP && pred != null) {
723 if (pred.isData != haveData) // phase change
724 return FRONT_SPINS + CHAINED_SPINS;
725 if (pred.isMatched()) // probably at front
726 return FRONT_SPINS;
727 if (pred.waiter == null) // pred apparently spinning
728 return CHAINED_SPINS;
729 }
730 return 0;
731 }
732
733 /* -------------- Traversal methods -------------- */
734
735 /**
736 * Returns the first unmatched data node, or null if none.
737 * Callers must recheck if the returned node is unmatched
738 * before using.
739 */
740 final Node firstDataNode() {
741 restartFromHead: for (;;) {
742 for (Node p = head; p != null;) {
743 Object item = p.item;
744 if (p.isData) {
745 if (item != null)
746 return p;
747 }
748 else if (item == null)
749 break;
750 if (p == (p = p.next))
751 continue restartFromHead;
752 }
753 return null;
754 }
755 }
756
757 /**
758 * Traverses and counts unmatched nodes of the given mode.
759 * Used by methods size and getWaitingConsumerCount.
760 */
761 private int countOfMode(boolean data) {
762 restartFromHead: for (;;) {
763 int count = 0;
764 for (Node p = head; p != null;) {
765 if (!p.isMatched()) {
766 if (p.isData != data)
767 return 0;
768 if (++count == Integer.MAX_VALUE)
769 break; // @see Collection.size()
770 }
771 if (p == (p = p.next))
772 continue restartFromHead;
773 }
774 return count;
775 }
776 }
777
778 public String toString() {
779 String[] a = null;
780 restartFromHead: for (;;) {
781 int charLength = 0;
782 int size = 0;
783 for (Node p = head; p != null;) {
784 Object item = p.item;
785 if (p.isData) {
786 if (item != null) {
787 if (a == null)
788 a = new String[4];
789 else if (size == a.length)
790 a = Arrays.copyOf(a, 2 * size);
791 String s = item.toString();
792 a[size++] = s;
793 charLength += s.length();
794 }
795 } else if (item == null)
796 break;
797 if (p == (p = p.next))
798 continue restartFromHead;
799 }
800
801 if (size == 0)
802 return "[]";
803
804 return Helpers.toString(a, size, charLength);
805 }
806 }
807
808 private Object[] toArrayInternal(Object[] a) {
809 Object[] x = a;
810 restartFromHead: for (;;) {
811 int size = 0;
812 for (Node p = head; p != null;) {
813 Object item = p.item;
814 if (p.isData) {
815 if (item != null) {
816 if (x == null)
817 x = new Object[4];
818 else if (size == x.length)
819 x = Arrays.copyOf(x, 2 * (size + 4));
820 x[size++] = item;
821 }
822 } else if (item == null)
823 break;
824 if (p == (p = p.next))
825 continue restartFromHead;
826 }
827 if (x == null)
828 return new Object[0];
829 else if (a != null && size <= a.length) {
830 if (a != x)
831 System.arraycopy(x, 0, a, 0, size);
832 if (size < a.length)
833 a[size] = null;
834 return a;
835 }
836 return (size == x.length) ? x : Arrays.copyOf(x, size);
837 }
838 }
839
840 /**
841 * Returns an array containing all of the elements in this queue, in
842 * proper sequence.
843 *
844 * <p>The returned array will be "safe" in that no references to it are
845 * maintained by this queue. (In other words, this method must allocate
846 * a new array). The caller is thus free to modify the returned array.
847 *
848 * <p>This method acts as bridge between array-based and collection-based
849 * APIs.
850 *
851 * @return an array containing all of the elements in this queue
852 */
853 public Object[] toArray() {
854 return toArrayInternal(null);
855 }
856
857 /**
858 * Returns an array containing all of the elements in this queue, in
859 * proper sequence; the runtime type of the returned array is that of
860 * the specified array. If the queue fits in the specified array, it
861 * is returned therein. Otherwise, a new array is allocated with the
862 * runtime type of the specified array and the size of this queue.
863 *
864 * <p>If this queue fits in the specified array with room to spare
865 * (i.e., the array has more elements than this queue), the element in
866 * the array immediately following the end of the queue is set to
867 * {@code null}.
868 *
869 * <p>Like the {@link #toArray()} method, this method acts as bridge between
870 * array-based and collection-based APIs. Further, this method allows
871 * precise control over the runtime type of the output array, and may,
872 * under certain circumstances, be used to save allocation costs.
873 *
874 * <p>Suppose {@code x} is a queue known to contain only strings.
875 * The following code can be used to dump the queue into a newly
876 * allocated array of {@code String}:
877 *
878 * <pre> {@code String[] y = x.toArray(new String[0]);}</pre>
879 *
880 * Note that {@code toArray(new Object[0])} is identical in function to
881 * {@code toArray()}.
882 *
883 * @param a the array into which the elements of the queue are to
884 * be stored, if it is big enough; otherwise, a new array of the
885 * same runtime type is allocated for this purpose
886 * @return an array containing all of the elements in this queue
887 * @throws ArrayStoreException if the runtime type of the specified array
888 * is not a supertype of the runtime type of every element in
889 * this queue
890 * @throws NullPointerException if the specified array is null
891 */
892 @SuppressWarnings("unchecked")
893 public <T> T[] toArray(T[] a) {
894 Objects.requireNonNull(a);
895 return (T[]) toArrayInternal(a);
896 }
897
898 final class Itr implements Iterator<E> {
899 private Node nextNode; // next node to return item for
900 private E nextItem; // the corresponding item
901 private Node lastRet; // last returned node, to support remove
902 private Node lastPred; // predecessor to unlink lastRet
903
904 /**
905 * Moves to next node after prev, or first node if prev null.
906 */
907 private void advance(Node prev) {
908 /*
909 * To track and avoid buildup of deleted nodes in the face
910 * of calls to both Queue.remove and Itr.remove, we must
911 * include variants of unsplice and sweep upon each
912 * advance: Upon Itr.remove, we may need to catch up links
913 * from lastPred, and upon other removes, we might need to
914 * skip ahead from stale nodes and unsplice deleted ones
915 * found while advancing.
916 */
917
918 Node r, b; // reset lastPred upon possible deletion of lastRet
919 if ((r = lastRet) != null && !r.isMatched())
920 lastPred = r; // next lastPred is old lastRet
921 else if ((b = lastPred) == null || b.isMatched())
922 lastPred = null; // at start of list
923 else {
924 Node s, n; // help with removal of lastPred.next
925 while ((s = b.next) != null &&
926 s != b && s.isMatched() &&
927 (n = s.next) != null && n != s)
928 b.casNext(s, n);
929 }
930
931 this.lastRet = prev;
932
933 for (Node p = prev, s, n;;) {
934 s = (p == null) ? head : p.next;
935 if (s == null)
936 break;
937 else if (s == p) {
938 p = null;
939 continue;
940 }
941 Object item = s.item;
942 if (s.isData) {
943 if (item != null) {
944 @SuppressWarnings("unchecked") E itemE = (E) item;
945 nextItem = itemE;
946 nextNode = s;
947 return;
948 }
949 }
950 else if (item == null)
951 break;
952 // assert s.isMatched();
953 if (p == null)
954 p = s;
955 else if ((n = s.next) == null)
956 break;
957 else if (s == n)
958 p = null;
959 else
960 p.casNext(s, n);
961 }
962 nextNode = null;
963 nextItem = null;
964 }
965
966 Itr() {
967 advance(null);
968 }
969
970 public final boolean hasNext() {
971 return nextNode != null;
972 }
973
974 public final E next() {
975 final Node p;
976 if ((p = nextNode) == null) throw new NoSuchElementException();
977 E e = nextItem;
978 advance(p);
979 return e;
980 }
981
982 // Default implementation of forEachRemaining is "good enough".
983
984 public final void remove() {
985 final Node lastRet = this.lastRet;
986 if (lastRet == null)
987 throw new IllegalStateException();
988 this.lastRet = null;
989 if (lastRet.tryMatchData())
990 unsplice(lastPred, lastRet);
991 }
992 }
993
994 /** A customized variant of Spliterators.IteratorSpliterator */
995 final class LTQSpliterator implements Spliterator<E> {
996 static final int MAX_BATCH = 1 << 25; // max batch array size;
997 Node current; // current node; null until initialized
998 int batch; // batch size for splits
999 boolean exhausted; // true when no more nodes
1000 LTQSpliterator() {}
1001
1002 public Spliterator<E> trySplit() {
1003 Node p, q;
1004 if ((p = current()) == null || (q = p.next) == null)
1005 return null;
1006 int i = 0, n = batch = Math.min(batch + 1, MAX_BATCH);
1007 Object[] a = null;
1008 do {
1009 final Object item = p.item;
1010 if (p.isData) {
1011 if (item != null)
1012 ((a != null) ? a : (a = new Object[n]))[i++] = item;
1013 } else if (item == null) {
1014 p = null;
1015 break;
1016 }
1017 if (p == (p = q))
1018 p = firstDataNode();
1019 } while (p != null && (q = p.next) != null && i < n);
1020 setCurrent(p);
1021 return (i == 0) ? null :
1022 Spliterators.spliterator(a, 0, i, (Spliterator.ORDERED |
1023 Spliterator.NONNULL |
1024 Spliterator.CONCURRENT));
1025 }
1026
1027 public void forEachRemaining(Consumer<? super E> action) {
1028 Objects.requireNonNull(action);
1029 final Node p;
1030 if ((p = current()) != null) {
1031 current = null;
1032 exhausted = true;
1033 forEachFrom(action, p);
1034 }
1035 }
1036
1037 @SuppressWarnings("unchecked")
1038 public boolean tryAdvance(Consumer<? super E> action) {
1039 Objects.requireNonNull(action);
1040 Node p;
1041 if ((p = current()) != null) {
1042 E e = null;
1043 do {
1044 final Object item = p.item;
1045 final boolean isData = p.isData;
1046 if (p == (p = p.next))
1047 p = head;
1048 if (isData) {
1049 if (item != null) {
1050 e = (E) item;
1051 break;
1052 }
1053 }
1054 else if (item == null)
1055 p = null;
1056 } while (p != null);
1057 setCurrent(p);
1058 if (e != null) {
1059 action.accept(e);
1060 return true;
1061 }
1062 }
1063 return false;
1064 }
1065
1066 private void setCurrent(Node p) {
1067 if ((current = p) == null)
1068 exhausted = true;
1069 }
1070
1071 private Node current() {
1072 Node p;
1073 if ((p = current) == null && !exhausted)
1074 setCurrent(p = firstDataNode());
1075 return p;
1076 }
1077
1078 public long estimateSize() { return Long.MAX_VALUE; }
1079
1080 public int characteristics() {
1081 return (Spliterator.ORDERED |
1082 Spliterator.NONNULL |
1083 Spliterator.CONCURRENT);
1084 }
1085 }
1086
1087 /**
1088 * Returns a {@link Spliterator} over the elements in this queue.
1089 *
1090 * <p>The returned spliterator is
1091 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1092 *
1093 * <p>The {@code Spliterator} reports {@link Spliterator#CONCURRENT},
1094 * {@link Spliterator#ORDERED}, and {@link Spliterator#NONNULL}.
1095 *
1096 * @implNote
1097 * The {@code Spliterator} implements {@code trySplit} to permit limited
1098 * parallelism.
1099 *
1100 * @return a {@code Spliterator} over the elements in this queue
1101 * @since 1.8
1102 */
1103 public Spliterator<E> spliterator() {
1104 return new LTQSpliterator();
1105 }
1106
1107 /* -------------- Removal methods -------------- */
1108
1109 /**
1110 * Unsplices (now or later) the given deleted/cancelled node with
1111 * the given predecessor.
1112 *
1113 * @param pred a node that was at one time known to be the
1114 * predecessor of s, or null or s itself if s is/was at head
1115 * @param s the node to be unspliced
1116 */
1117 final void unsplice(Node pred, Node s) {
1118 s.waiter = null; // disable signals
1119 /*
1120 * See above for rationale. Briefly: if pred still points to
1121 * s, try to unlink s. If s cannot be unlinked, because it is
1122 * trailing node or pred might be unlinked, and neither pred
1123 * nor s are head or offlist, add to sweepVotes, and if enough
1124 * votes have accumulated, sweep.
1125 */
1126 if (pred != null && pred != s && pred.next == s) {
1127 Node n = s.next;
1128 if (n == null ||
1129 (n != s && pred.casNext(s, n) && pred.isMatched())) {
1130 for (;;) { // check if at, or could be, head
1131 Node h = head;
1132 if (h == pred || h == s || h == null)
1133 return; // at head or list empty
1134 if (!h.isMatched())
1135 break;
1136 Node hn = h.next;
1137 if (hn == null)
1138 return; // now empty
1139 if (hn != h && casHead(h, hn))
1140 h.forgetNext(); // advance head
1141 }
1142 if (pred.next != pred && s.next != s) { // recheck if offlist
1143 for (;;) { // sweep now if enough votes
1144 int v = sweepVotes;
1145 if (v < SWEEP_THRESHOLD) {
1146 if (casSweepVotes(v, v + 1))
1147 break;
1148 }
1149 else if (casSweepVotes(v, 0)) {
1150 sweep();
1151 break;
1152 }
1153 }
1154 }
1155 }
1156 }
1157 }
1158
1159 /**
1160 * Unlinks matched (typically cancelled) nodes encountered in a
1161 * traversal from head.
1162 */
1163 private void sweep() {
1164 for (Node p = head, s, n; p != null && (s = p.next) != null; ) {
1165 if (!s.isMatched())
1166 // Unmatched nodes are never self-linked
1167 p = s;
1168 else if ((n = s.next) == null) // trailing node is pinned
1169 break;
1170 else if (s == n) // stale
1171 // No need to also check for p == s, since that implies s == n
1172 p = head;
1173 else
1174 p.casNext(s, n);
1175 }
1176 }
1177
1178 /**
1179 * Creates an initially empty {@code LinkedTransferQueue}.
1180 */
1181 public LinkedTransferQueue() {
1182 }
1183
1184 /**
1185 * Creates a {@code LinkedTransferQueue}
1186 * initially containing the elements of the given collection,
1187 * added in traversal order of the collection's iterator.
1188 *
1189 * @param c the collection of elements to initially contain
1190 * @throws NullPointerException if the specified collection or any
1191 * of its elements are null
1192 */
1193 public LinkedTransferQueue(Collection<? extends E> c) {
1194 this();
1195 addAll(c);
1196 }
1197
1198 /**
1199 * Inserts the specified element at the tail of this queue.
1200 * As the queue is unbounded, this method will never block.
1201 *
1202 * @throws NullPointerException if the specified element is null
1203 */
1204 public void put(E e) {
1205 xfer(e, true, ASYNC, 0);
1206 }
1207
1208 /**
1209 * Inserts the specified element at the tail of this queue.
1210 * As the queue is unbounded, this method will never block or
1211 * return {@code false}.
1212 *
1213 * @return {@code true} (as specified by
1214 * {@link java.util.concurrent.BlockingQueue#offer(Object,long,TimeUnit)
1215 * BlockingQueue.offer})
1216 * @throws NullPointerException if the specified element is null
1217 */
1218 public boolean offer(E e, long timeout, TimeUnit unit) {
1219 xfer(e, true, ASYNC, 0);
1220 return true;
1221 }
1222
1223 /**
1224 * Inserts the specified element at the tail of this queue.
1225 * As the queue is unbounded, this method will never return {@code false}.
1226 *
1227 * @return {@code true} (as specified by {@link Queue#offer})
1228 * @throws NullPointerException if the specified element is null
1229 */
1230 public boolean offer(E e) {
1231 xfer(e, true, ASYNC, 0);
1232 return true;
1233 }
1234
1235 /**
1236 * Inserts the specified element at the tail of this queue.
1237 * As the queue is unbounded, this method will never throw
1238 * {@link IllegalStateException} or return {@code false}.
1239 *
1240 * @return {@code true} (as specified by {@link Collection#add})
1241 * @throws NullPointerException if the specified element is null
1242 */
1243 public boolean add(E e) {
1244 xfer(e, true, ASYNC, 0);
1245 return true;
1246 }
1247
1248 /**
1249 * Transfers the element to a waiting consumer immediately, if possible.
1250 *
1251 * <p>More precisely, transfers the specified element immediately
1252 * if there exists a consumer already waiting to receive it (in
1253 * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
1254 * otherwise returning {@code false} without enqueuing the element.
1255 *
1256 * @throws NullPointerException if the specified element is null
1257 */
1258 public boolean tryTransfer(E e) {
1259 return xfer(e, true, NOW, 0) == null;
1260 }
1261
1262 /**
1263 * Transfers the element to a consumer, waiting if necessary to do so.
1264 *
1265 * <p>More precisely, transfers the specified element immediately
1266 * if there exists a consumer already waiting to receive it (in
1267 * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
1268 * else inserts the specified element at the tail of this queue
1269 * and waits until the element is received by a consumer.
1270 *
1271 * @throws NullPointerException if the specified element is null
1272 */
1273 public void transfer(E e) throws InterruptedException {
1274 if (xfer(e, true, SYNC, 0) != null) {
1275 Thread.interrupted(); // failure possible only due to interrupt
1276 throw new InterruptedException();
1277 }
1278 }
1279
1280 /**
1281 * Transfers the element to a consumer if it is possible to do so
1282 * before the timeout elapses.
1283 *
1284 * <p>More precisely, transfers the specified element immediately
1285 * if there exists a consumer already waiting to receive it (in
1286 * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
1287 * else inserts the specified element at the tail of this queue
1288 * and waits until the element is received by a consumer,
1289 * returning {@code false} if the specified wait time elapses
1290 * before the element can be transferred.
1291 *
1292 * @throws NullPointerException if the specified element is null
1293 */
1294 public boolean tryTransfer(E e, long timeout, TimeUnit unit)
1295 throws InterruptedException {
1296 if (xfer(e, true, TIMED, unit.toNanos(timeout)) == null)
1297 return true;
1298 if (!Thread.interrupted())
1299 return false;
1300 throw new InterruptedException();
1301 }
1302
1303 public E take() throws InterruptedException {
1304 E e = xfer(null, false, SYNC, 0);
1305 if (e != null)
1306 return e;
1307 Thread.interrupted();
1308 throw new InterruptedException();
1309 }
1310
1311 public E poll(long timeout, TimeUnit unit) throws InterruptedException {
1312 E e = xfer(null, false, TIMED, unit.toNanos(timeout));
1313 if (e != null || !Thread.interrupted())
1314 return e;
1315 throw new InterruptedException();
1316 }
1317
1318 public E poll() {
1319 return xfer(null, false, NOW, 0);
1320 }
1321
1322 /**
1323 * @throws NullPointerException {@inheritDoc}
1324 * @throws IllegalArgumentException {@inheritDoc}
1325 */
1326 public int drainTo(Collection<? super E> c) {
1327 Objects.requireNonNull(c);
1328 if (c == this)
1329 throw new IllegalArgumentException();
1330 int n = 0;
1331 for (E e; (e = poll()) != null; n++)
1332 c.add(e);
1333 return n;
1334 }
1335
1336 /**
1337 * @throws NullPointerException {@inheritDoc}
1338 * @throws IllegalArgumentException {@inheritDoc}
1339 */
1340 public int drainTo(Collection<? super E> c, int maxElements) {
1341 Objects.requireNonNull(c);
1342 if (c == this)
1343 throw new IllegalArgumentException();
1344 int n = 0;
1345 for (E e; n < maxElements && (e = poll()) != null; n++)
1346 c.add(e);
1347 return n;
1348 }
1349
1350 /**
1351 * Returns an iterator over the elements in this queue in proper sequence.
1352 * The elements will be returned in order from first (head) to last (tail).
1353 *
1354 * <p>The returned iterator is
1355 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1356 *
1357 * @return an iterator over the elements in this queue in proper sequence
1358 */
1359 public Iterator<E> iterator() {
1360 return new Itr();
1361 }
1362
1363 public E peek() {
1364 restartFromHead: for (;;) {
1365 for (Node p = head; p != null;) {
1366 Object item = p.item;
1367 if (p.isData) {
1368 if (item != null) {
1369 @SuppressWarnings("unchecked") E e = (E) item;
1370 return e;
1371 }
1372 }
1373 else if (item == null)
1374 break;
1375 if (p == (p = p.next))
1376 continue restartFromHead;
1377 }
1378 return null;
1379 }
1380 }
1381
1382 /**
1383 * Returns {@code true} if this queue contains no elements.
1384 *
1385 * @return {@code true} if this queue contains no elements
1386 */
1387 public boolean isEmpty() {
1388 return firstDataNode() == null;
1389 }
1390
1391 public boolean hasWaitingConsumer() {
1392 restartFromHead: for (;;) {
1393 for (Node p = head; p != null;) {
1394 Object item = p.item;
1395 if (p.isData) {
1396 if (item != null)
1397 break;
1398 }
1399 else if (item == null)
1400 return true;
1401 if (p == (p = p.next))
1402 continue restartFromHead;
1403 }
1404 return false;
1405 }
1406 }
1407
1408 /**
1409 * Returns the number of elements in this queue. If this queue
1410 * contains more than {@code Integer.MAX_VALUE} elements, returns
1411 * {@code Integer.MAX_VALUE}.
1412 *
1413 * <p>Beware that, unlike in most collections, this method is
1414 * <em>NOT</em> a constant-time operation. Because of the
1415 * asynchronous nature of these queues, determining the current
1416 * number of elements requires an O(n) traversal.
1417 *
1418 * @return the number of elements in this queue
1419 */
1420 public int size() {
1421 return countOfMode(true);
1422 }
1423
1424 public int getWaitingConsumerCount() {
1425 return countOfMode(false);
1426 }
1427
1428 /**
1429 * Removes a single instance of the specified element from this queue,
1430 * if it is present. More formally, removes an element {@code e} such
1431 * that {@code o.equals(e)}, if this queue contains one or more such
1432 * elements.
1433 * Returns {@code true} if this queue contained the specified element
1434 * (or equivalently, if this queue changed as a result of the call).
1435 *
1436 * @param o element to be removed from this queue, if present
1437 * @return {@code true} if this queue changed as a result of the call
1438 */
1439 public boolean remove(Object o) {
1440 if (o == null)
1441 return false;
1442 restartFromHead: for (;;) {
1443 for (Node p = head, c = p, pred = null, q; p != null; ) {
1444 final Object item; boolean pAlive;
1445 if (pAlive = ((item = p.item) != null && p.isData)) {
1446 if (o.equals(item) && p.tryMatchData()) {
1447 if ((q = p.next) == null) q = p;
1448 if (c != q) tryCasSuccessor(pred, c, q);
1449 return true;
1450 }
1451 }
1452 else if (!p.isData && item == null)
1453 break;
1454 if (c != p && tryCasSuccessor(pred, c, p))
1455 c = p;
1456 q = p.next;
1457 if (pAlive || c != p) {
1458 pred = p;
1459 p = c = q;
1460 }
1461 else if (p == (p = q))
1462 continue restartFromHead;
1463 }
1464 return false;
1465 }
1466 }
1467
1468 /**
1469 * Returns {@code true} if this queue contains the specified element.
1470 * More formally, returns {@code true} if and only if this queue contains
1471 * at least one element {@code e} such that {@code o.equals(e)}.
1472 *
1473 * @param o object to be checked for containment in this queue
1474 * @return {@code true} if this queue contains the specified element
1475 */
1476 public boolean contains(Object o) {
1477 if (o == null)
1478 return false;
1479 restartFromHead: for (;;) {
1480 for (Node p = head, c = p, pred = null, q; p != null; ) {
1481 final Object item; final boolean pAlive;
1482 if (pAlive = ((item = p.item) != null && p.isData)) {
1483 if (o.equals(item))
1484 return true;
1485 }
1486 else if (!p.isData && item == null)
1487 break;
1488 if (c != p && tryCasSuccessor(pred, c, p))
1489 c = p;
1490 q = p.next;
1491 if (pAlive || c != p) {
1492 pred = p;
1493 p = c = q;
1494 }
1495 else if (p == (p = q))
1496 continue restartFromHead;
1497 }
1498 return false;
1499 }
1500 }
1501
1502 /**
1503 * Always returns {@code Integer.MAX_VALUE} because a
1504 * {@code LinkedTransferQueue} is not capacity constrained.
1505 *
1506 * @return {@code Integer.MAX_VALUE} (as specified by
1507 * {@link java.util.concurrent.BlockingQueue#remainingCapacity()
1508 * BlockingQueue.remainingCapacity})
1509 */
1510 public int remainingCapacity() {
1511 return Integer.MAX_VALUE;
1512 }
1513
1514 /**
1515 * Saves this queue to a stream (that is, serializes it).
1516 *
1517 * @param s the stream
1518 * @throws java.io.IOException if an I/O error occurs
1519 * @serialData All of the elements (each an {@code E}) in
1520 * the proper order, followed by a null
1521 */
1522 private void writeObject(java.io.ObjectOutputStream s)
1523 throws java.io.IOException {
1524 s.defaultWriteObject();
1525 for (E e : this)
1526 s.writeObject(e);
1527 // Use trailing null as sentinel
1528 s.writeObject(null);
1529 }
1530
1531 /**
1532 * Reconstitutes this queue from a stream (that is, deserializes it).
1533 * @param s the stream
1534 * @throws ClassNotFoundException if the class of a serialized object
1535 * could not be found
1536 * @throws java.io.IOException if an I/O error occurs
1537 */
1538 private void readObject(java.io.ObjectInputStream s)
1539 throws java.io.IOException, ClassNotFoundException {
1540 s.defaultReadObject();
1541 for (;;) {
1542 @SuppressWarnings("unchecked")
1543 E item = (E) s.readObject();
1544 if (item == null)
1545 break;
1546 else
1547 offer(item);
1548 }
1549 }
1550
1551 /**
1552 * @throws NullPointerException {@inheritDoc}
1553 */
1554 public boolean removeIf(Predicate<? super E> filter) {
1555 Objects.requireNonNull(filter);
1556 return bulkRemove(filter);
1557 }
1558
1559 /**
1560 * @throws NullPointerException {@inheritDoc}
1561 */
1562 public boolean removeAll(Collection<?> c) {
1563 Objects.requireNonNull(c);
1564 return bulkRemove(e -> c.contains(e));
1565 }
1566
1567 /**
1568 * @throws NullPointerException {@inheritDoc}
1569 */
1570 public boolean retainAll(Collection<?> c) {
1571 Objects.requireNonNull(c);
1572 return bulkRemove(e -> !c.contains(e));
1573 }
1574
1575 public void clear() {
1576 bulkRemove(e -> true);
1577 }
1578
1579 /**
1580 * Tolerate this many consecutive dead nodes before CAS-collapsing.
1581 * Amortized cost of clear() is (1 + 1/MAX_HOPS) CASes per element.
1582 */
1583 private static final int MAX_HOPS = 8;
1584
1585 /** Implementation of bulk remove methods. */
1586 @SuppressWarnings("unchecked")
1587 private boolean bulkRemove(Predicate<? super E> filter) {
1588 boolean removed = false;
1589 restartFromHead: for (;;) {
1590 int hops = MAX_HOPS;
1591 // c will be CASed to collapse intervening dead nodes between
1592 // pred (or head if null) and p.
1593 for (Node p = head, c = p, pred = null, q; p != null; p = q) {
1594 final Object item; boolean pAlive;
1595 if (pAlive = ((item = p.item) != null && p.isData)) {
1596 if (filter.test((E) item)) {
1597 if (p.tryMatchData())
1598 removed = true;
1599 pAlive = false;
1600 }
1601 }
1602 else if (!p.isData && item == null)
1603 break;
1604 if ((q = p.next) == null || pAlive || --hops == 0) {
1605 // p might already be self-linked here, but if so:
1606 // - CASing head will surely fail
1607 // - CASing pred's next will be useless but harmless.
1608 if (c != p && tryCasSuccessor(pred, c, p))
1609 c = p;
1610 // if c != p, CAS failed, so abandon old pred
1611 if (pAlive || c != p) {
1612 hops = MAX_HOPS;
1613 pred = p;
1614 c = q;
1615 }
1616 } else if (p == q)
1617 continue restartFromHead;
1618 }
1619 return removed;
1620 }
1621 }
1622
1623 /**
1624 * Runs action on each element found during a traversal starting at p.
1625 * If p is null, the action is not run.
1626 */
1627 @SuppressWarnings("unchecked")
1628 void forEachFrom(Consumer<? super E> action, Node p) {
1629 for (Node c = p, pred = null, q; p != null; ) {
1630 final Object item; final boolean pAlive;
1631 if (pAlive = ((item = p.item) != null && p.isData))
1632 action.accept((E) item);
1633 else if (!p.isData && item == null)
1634 break;
1635 if (c != p && tryCasSuccessor(pred, c, p))
1636 c = p;
1637 q = p.next;
1638 if (pAlive || c != p) {
1639 pred = p;
1640 p = c = q;
1641 }
1642 else if (p == (p = q)) {
1643 pred = null;
1644 c = p = head;
1645 }
1646 }
1647 }
1648
1649 /**
1650 * @throws NullPointerException {@inheritDoc}
1651 */
1652 public void forEach(Consumer<? super E> action) {
1653 Objects.requireNonNull(action);
1654 forEachFrom(action, head);
1655 }
1656
1657 // VarHandle mechanics
1658 private static final VarHandle HEAD;
1659 private static final VarHandle TAIL;
1660 private static final VarHandle SWEEPVOTES;
1661 static {
1662 try {
1663 MethodHandles.Lookup l = MethodHandles.lookup();
1664 HEAD = l.findVarHandle(LinkedTransferQueue.class, "head",
1665 Node.class);
1666 TAIL = l.findVarHandle(LinkedTransferQueue.class, "tail",
1667 Node.class);
1668 SWEEPVOTES = l.findVarHandle(LinkedTransferQueue.class, "sweepVotes",
1669 int.class);
1670 } catch (ReflectiveOperationException e) {
1671 throw new Error(e);
1672 }
1673
1674 // Reduce the risk of rare disastrous classloading in first call to
1675 // LockSupport.park: https://bugs.openjdk.java.net/browse/JDK-8074773
1676 Class<?> ensureLoaded = LockSupport.class;
1677 }
1678 }