ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/main/java/util/concurrent/LinkedTransferQueue.java
Revision: 1.121
Committed: Wed Dec 28 18:07:18 2016 UTC (7 years, 5 months ago) by jsr166
Branch: MAIN
Changes since 1.120: +5 -4 lines
Log Message:
using release write in forgetNext seems slightly safer

File Contents

# Content
1 /*
2 * Written by Doug Lea with assistance from members of JCP JSR-166
3 * Expert Group and released to the public domain, as explained at
4 * http://creativecommons.org/publicdomain/zero/1.0/
5 */
6
7 package java.util.concurrent;
8
9 import java.lang.invoke.MethodHandles;
10 import java.lang.invoke.VarHandle;
11 import java.util.AbstractQueue;
12 import java.util.Arrays;
13 import java.util.Collection;
14 import java.util.Iterator;
15 import java.util.NoSuchElementException;
16 import java.util.Objects;
17 import java.util.Queue;
18 import java.util.Spliterator;
19 import java.util.Spliterators;
20 import java.util.concurrent.locks.LockSupport;
21 import java.util.function.Consumer;
22 import java.util.function.Predicate;
23
24 /**
25 * An unbounded {@link TransferQueue} based on linked nodes.
26 * This queue orders elements FIFO (first-in-first-out) with respect
27 * to any given producer. The <em>head</em> of the queue is that
28 * element that has been on the queue the longest time for some
29 * producer. The <em>tail</em> of the queue is that element that has
30 * been on the queue the shortest time for some producer.
31 *
32 * <p>Beware that, unlike in most collections, the {@code size} method
33 * is <em>NOT</em> a constant-time operation. Because of the
34 * asynchronous nature of these queues, determining the current number
35 * of elements requires a traversal of the elements, and so may report
36 * inaccurate results if this collection is modified during traversal.
37 * Additionally, the bulk operations {@code addAll},
38 * {@code removeAll}, {@code retainAll}, {@code containsAll},
39 * and {@code toArray} are <em>not</em> guaranteed
40 * to be performed atomically. For example, an iterator operating
41 * concurrently with an {@code addAll} operation might view only some
42 * of the added elements.
43 *
44 * <p>This class and its iterator implement all of the
45 * <em>optional</em> methods of the {@link Collection} and {@link
46 * Iterator} interfaces.
47 *
48 * <p>Memory consistency effects: As with other concurrent
49 * collections, actions in a thread prior to placing an object into a
50 * {@code LinkedTransferQueue}
51 * <a href="package-summary.html#MemoryVisibility"><i>happen-before</i></a>
52 * actions subsequent to the access or removal of that element from
53 * the {@code LinkedTransferQueue} in another thread.
54 *
55 * <p>This class is a member of the
56 * <a href="{@docRoot}/../technotes/guides/collections/index.html">
57 * Java Collections Framework</a>.
58 *
59 * @since 1.7
60 * @author Doug Lea
61 * @param <E> the type of elements held in this queue
62 */
63 public class LinkedTransferQueue<E> extends AbstractQueue<E>
64 implements TransferQueue<E>, java.io.Serializable {
65 private static final long serialVersionUID = -3223113410248163686L;
66
67 /*
68 * *** Overview of Dual Queues with Slack ***
69 *
70 * Dual Queues, introduced by Scherer and Scott
71 * (http://www.cs.rochester.edu/~scott/papers/2004_DISC_dual_DS.pdf)
72 * are (linked) queues in which nodes may represent either data or
73 * requests. When a thread tries to enqueue a data node, but
74 * encounters a request node, it instead "matches" and removes it;
75 * and vice versa for enqueuing requests. Blocking Dual Queues
76 * arrange that threads enqueuing unmatched requests block until
77 * other threads provide the match. Dual Synchronous Queues (see
78 * Scherer, Lea, & Scott
79 * http://www.cs.rochester.edu/u/scott/papers/2009_Scherer_CACM_SSQ.pdf)
80 * additionally arrange that threads enqueuing unmatched data also
81 * block. Dual Transfer Queues support all of these modes, as
82 * dictated by callers.
83 *
84 * A FIFO dual queue may be implemented using a variation of the
85 * Michael & Scott (M&S) lock-free queue algorithm
86 * (http://www.cs.rochester.edu/~scott/papers/1996_PODC_queues.pdf).
87 * It maintains two pointer fields, "head", pointing to a
88 * (matched) node that in turn points to the first actual
89 * (unmatched) queue node (or null if empty); and "tail" that
90 * points to the last node on the queue (or again null if
91 * empty). For example, here is a possible queue with four data
92 * elements:
93 *
94 * head tail
95 * | |
96 * v v
97 * M -> U -> U -> U -> U
98 *
99 * The M&S queue algorithm is known to be prone to scalability and
100 * overhead limitations when maintaining (via CAS) these head and
101 * tail pointers. This has led to the development of
102 * contention-reducing variants such as elimination arrays (see
103 * Moir et al http://portal.acm.org/citation.cfm?id=1074013) and
104 * optimistic back pointers (see Ladan-Mozes & Shavit
105 * http://people.csail.mit.edu/edya/publications/OptimisticFIFOQueue-journal.pdf).
106 * However, the nature of dual queues enables a simpler tactic for
107 * improving M&S-style implementations when dual-ness is needed.
108 *
109 * In a dual queue, each node must atomically maintain its match
110 * status. While there are other possible variants, we implement
111 * this here as: for a data-mode node, matching entails CASing an
112 * "item" field from a non-null data value to null upon match, and
113 * vice-versa for request nodes, CASing from null to a data
114 * value. (Note that the linearization properties of this style of
115 * queue are easy to verify -- elements are made available by
116 * linking, and unavailable by matching.) Compared to plain M&S
117 * queues, this property of dual queues requires one additional
118 * successful atomic operation per enq/deq pair. But it also
119 * enables lower cost variants of queue maintenance mechanics. (A
120 * variation of this idea applies even for non-dual queues that
121 * support deletion of interior elements, such as
122 * j.u.c.ConcurrentLinkedQueue.)
123 *
124 * Once a node is matched, its match status can never again
125 * change. We may thus arrange that the linked list of them
126 * contain a prefix of zero or more matched nodes, followed by a
127 * suffix of zero or more unmatched nodes. (Note that we allow
128 * both the prefix and suffix to be zero length, which in turn
129 * means that we do not use a dummy header.) If we were not
130 * concerned with either time or space efficiency, we could
131 * correctly perform enqueue and dequeue operations by traversing
132 * from a pointer to the initial node; CASing the item of the
133 * first unmatched node on match and CASing the next field of the
134 * trailing node on appends. (Plus some special-casing when
135 * initially empty). While this would be a terrible idea in
136 * itself, it does have the benefit of not requiring ANY atomic
137 * updates on head/tail fields.
138 *
139 * We introduce here an approach that lies between the extremes of
140 * never versus always updating queue (head and tail) pointers.
141 * This offers a tradeoff between sometimes requiring extra
142 * traversal steps to locate the first and/or last unmatched
143 * nodes, versus the reduced overhead and contention of fewer
144 * updates to queue pointers. For example, a possible snapshot of
145 * a queue is:
146 *
147 * head tail
148 * | |
149 * v v
150 * M -> M -> U -> U -> U -> U
151 *
152 * The best value for this "slack" (the targeted maximum distance
153 * between the value of "head" and the first unmatched node, and
154 * similarly for "tail") is an empirical matter. We have found
155 * that using very small constants in the range of 1-3 work best
156 * over a range of platforms. Larger values introduce increasing
157 * costs of cache misses and risks of long traversal chains, while
158 * smaller values increase CAS contention and overhead.
159 *
160 * Dual queues with slack differ from plain M&S dual queues by
161 * virtue of only sometimes updating head or tail pointers when
162 * matching, appending, or even traversing nodes; in order to
163 * maintain a targeted slack. The idea of "sometimes" may be
164 * operationalized in several ways. The simplest is to use a
165 * per-operation counter incremented on each traversal step, and
166 * to try (via CAS) to update the associated queue pointer
167 * whenever the count exceeds a threshold. Another, that requires
168 * more overhead, is to use random number generators to update
169 * with a given probability per traversal step.
170 *
171 * In any strategy along these lines, because CASes updating
172 * fields may fail, the actual slack may exceed targeted
173 * slack. However, they may be retried at any time to maintain
174 * targets. Even when using very small slack values, this
175 * approach works well for dual queues because it allows all
176 * operations up to the point of matching or appending an item
177 * (hence potentially allowing progress by another thread) to be
178 * read-only, thus not introducing any further contention. As
179 * described below, we implement this by performing slack
180 * maintenance retries only after these points.
181 *
182 * As an accompaniment to such techniques, traversal overhead can
183 * be further reduced without increasing contention of head
184 * pointer updates: Threads may sometimes shortcut the "next" link
185 * path from the current "head" node to be closer to the currently
186 * known first unmatched node, and similarly for tail. Again, this
187 * may be triggered with using thresholds or randomization.
188 *
189 * These ideas must be further extended to avoid unbounded amounts
190 * of costly-to-reclaim garbage caused by the sequential "next"
191 * links of nodes starting at old forgotten head nodes: As first
192 * described in detail by Boehm
193 * (http://portal.acm.org/citation.cfm?doid=503272.503282), if a GC
194 * delays noticing that any arbitrarily old node has become
195 * garbage, all newer dead nodes will also be unreclaimed.
196 * (Similar issues arise in non-GC environments.) To cope with
197 * this in our implementation, upon CASing to advance the head
198 * pointer, we set the "next" link of the previous head to point
199 * only to itself; thus limiting the length of connected dead lists.
200 * (We also take similar care to wipe out possibly garbage
201 * retaining values held in other Node fields.) However, doing so
202 * adds some further complexity to traversal: If any "next"
203 * pointer links to itself, it indicates that the current thread
204 * has lagged behind a head-update, and so the traversal must
205 * continue from the "head". Traversals trying to find the
206 * current tail starting from "tail" may also encounter
207 * self-links, in which case they also continue at "head".
208 *
209 * It is tempting in slack-based scheme to not even use CAS for
210 * updates (similarly to Ladan-Mozes & Shavit). However, this
211 * cannot be done for head updates under the above link-forgetting
212 * mechanics because an update may leave head at a detached node.
213 * And while direct writes are possible for tail updates, they
214 * increase the risk of long retraversals, and hence long garbage
215 * chains, which can be much more costly than is worthwhile
216 * considering that the cost difference of performing a CAS vs
217 * write is smaller when they are not triggered on each operation
218 * (especially considering that writes and CASes equally require
219 * additional GC bookkeeping ("write barriers") that are sometimes
220 * more costly than the writes themselves because of contention).
221 *
222 * *** Overview of implementation ***
223 *
224 * We use a threshold-based approach to updates, with a slack
225 * threshold of two -- that is, we update head/tail when the
226 * current pointer appears to be two or more steps away from the
227 * first/last node. The slack value is hard-wired: a path greater
228 * than one is naturally implemented by checking equality of
229 * traversal pointers except when the list has only one element,
230 * in which case we keep slack threshold at one. Avoiding tracking
231 * explicit counts across method calls slightly simplifies an
232 * already-messy implementation. Using randomization would
233 * probably work better if there were a low-quality dirt-cheap
234 * per-thread one available, but even ThreadLocalRandom is too
235 * heavy for these purposes.
236 *
237 * With such a small slack threshold value, it is not worthwhile
238 * to augment this with path short-circuiting (i.e., unsplicing
239 * interior nodes) except in the case of cancellation/removal (see
240 * below).
241 *
242 * We allow both the head and tail fields to be null before any
243 * nodes are enqueued; initializing upon first append. This
244 * simplifies some other logic, as well as providing more
245 * efficient explicit control paths instead of letting JVMs insert
246 * implicit NullPointerExceptions when they are null. While not
247 * currently fully implemented, we also leave open the possibility
248 * of re-nulling these fields when empty (which is complicated to
249 * arrange, for little benefit.)
250 *
251 * All enqueue/dequeue operations are handled by the single method
252 * "xfer" with parameters indicating whether to act as some form
253 * of offer, put, poll, take, or transfer (each possibly with
254 * timeout). The relative complexity of using one monolithic
255 * method outweighs the code bulk and maintenance problems of
256 * using separate methods for each case.
257 *
258 * Operation consists of up to three phases. The first is
259 * implemented within method xfer, the second in tryAppend, and
260 * the third in method awaitMatch.
261 *
262 * 1. Try to match an existing node
263 *
264 * Starting at head, skip already-matched nodes until finding
265 * an unmatched node of opposite mode, if one exists, in which
266 * case matching it and returning, also if necessary updating
267 * head to one past the matched node (or the node itself if the
268 * list has no other unmatched nodes). If the CAS misses, then
269 * a loop retries advancing head by two steps until either
270 * success or the slack is at most two. By requiring that each
271 * attempt advances head by two (if applicable), we ensure that
272 * the slack does not grow without bound. Traversals also check
273 * if the initial head is now off-list, in which case they
274 * start at the new head.
275 *
276 * If no candidates are found and the call was untimed
277 * poll/offer, (argument "how" is NOW) return.
278 *
279 * 2. Try to append a new node (method tryAppend)
280 *
281 * Starting at current tail pointer, find the actual last node
282 * and try to append a new node (or if head was null, establish
283 * the first node). Nodes can be appended only if their
284 * predecessors are either already matched or are of the same
285 * mode. If we detect otherwise, then a new node with opposite
286 * mode must have been appended during traversal, so we must
287 * restart at phase 1. The traversal and update steps are
288 * otherwise similar to phase 1: Retrying upon CAS misses and
289 * checking for staleness. In particular, if a self-link is
290 * encountered, then we can safely jump to a node on the list
291 * by continuing the traversal at current head.
292 *
293 * On successful append, if the call was ASYNC, return.
294 *
295 * 3. Await match or cancellation (method awaitMatch)
296 *
297 * Wait for another thread to match node; instead cancelling if
298 * the current thread was interrupted or the wait timed out. On
299 * multiprocessors, we use front-of-queue spinning: If a node
300 * appears to be the first unmatched node in the queue, it
301 * spins a bit before blocking. In either case, before blocking
302 * it tries to unsplice any nodes between the current "head"
303 * and the first unmatched node.
304 *
305 * Front-of-queue spinning vastly improves performance of
306 * heavily contended queues. And so long as it is relatively
307 * brief and "quiet", spinning does not much impact performance
308 * of less-contended queues. During spins threads check their
309 * interrupt status and generate a thread-local random number
310 * to decide to occasionally perform a Thread.yield. While
311 * yield has underdefined specs, we assume that it might help,
312 * and will not hurt, in limiting impact of spinning on busy
313 * systems. We also use smaller (1/2) spins for nodes that are
314 * not known to be front but whose predecessors have not
315 * blocked -- these "chained" spins avoid artifacts of
316 * front-of-queue rules which otherwise lead to alternating
317 * nodes spinning vs blocking. Further, front threads that
318 * represent phase changes (from data to request node or vice
319 * versa) compared to their predecessors receive additional
320 * chained spins, reflecting longer paths typically required to
321 * unblock threads during phase changes.
322 *
323 *
324 * ** Unlinking removed interior nodes **
325 *
326 * In addition to minimizing garbage retention via self-linking
327 * described above, we also unlink removed interior nodes. These
328 * may arise due to timed out or interrupted waits, or calls to
329 * remove(x) or Iterator.remove. Normally, given a node that was
330 * at one time known to be the predecessor of some node s that is
331 * to be removed, we can unsplice s by CASing the next field of
332 * its predecessor if it still points to s (otherwise s must
333 * already have been removed or is now offlist). But there are two
334 * situations in which we cannot guarantee to make node s
335 * unreachable in this way: (1) If s is the trailing node of list
336 * (i.e., with null next), then it is pinned as the target node
337 * for appends, so can only be removed later after other nodes are
338 * appended. (2) We cannot necessarily unlink s given a
339 * predecessor node that is matched (including the case of being
340 * cancelled): the predecessor may already be unspliced, in which
341 * case some previous reachable node may still point to s.
342 * (For further explanation see Herlihy & Shavit "The Art of
343 * Multiprocessor Programming" chapter 9). Although, in both
344 * cases, we can rule out the need for further action if either s
345 * or its predecessor are (or can be made to be) at, or fall off
346 * from, the head of list.
347 *
348 * Without taking these into account, it would be possible for an
349 * unbounded number of supposedly removed nodes to remain
350 * reachable. Situations leading to such buildup are uncommon but
351 * can occur in practice; for example when a series of short timed
352 * calls to poll repeatedly time out but never otherwise fall off
353 * the list because of an untimed call to take at the front of the
354 * queue.
355 *
356 * When these cases arise, rather than always retraversing the
357 * entire list to find an actual predecessor to unlink (which
358 * won't help for case (1) anyway), we record a conservative
359 * estimate of possible unsplice failures (in "sweepVotes").
360 * We trigger a full sweep when the estimate exceeds a threshold
361 * ("SWEEP_THRESHOLD") indicating the maximum number of estimated
362 * removal failures to tolerate before sweeping through, unlinking
363 * cancelled nodes that were not unlinked upon initial removal.
364 * We perform sweeps by the thread hitting threshold (rather than
365 * background threads or by spreading work to other threads)
366 * because in the main contexts in which removal occurs, the
367 * caller is already timed-out, cancelled, or performing a
368 * potentially O(n) operation (e.g. remove(x)), none of which are
369 * time-critical enough to warrant the overhead that alternatives
370 * would impose on other threads.
371 *
372 * Because the sweepVotes estimate is conservative, and because
373 * nodes become unlinked "naturally" as they fall off the head of
374 * the queue, and because we allow votes to accumulate even while
375 * sweeps are in progress, there are typically significantly fewer
376 * such nodes than estimated. Choice of a threshold value
377 * balances the likelihood of wasted effort and contention, versus
378 * providing a worst-case bound on retention of interior nodes in
379 * quiescent queues. The value defined below was chosen
380 * empirically to balance these under various timeout scenarios.
381 *
382 * Note that we cannot self-link unlinked interior nodes during
383 * sweeps. However, the associated garbage chains terminate when
384 * some successor ultimately falls off the head of the list and is
385 * self-linked.
386 */
387
388 /** True if on multiprocessor */
389 private static final boolean MP =
390 Runtime.getRuntime().availableProcessors() > 1;
391
392 /**
393 * The number of times to spin (with randomly interspersed calls
394 * to Thread.yield) on multiprocessor before blocking when a node
395 * is apparently the first waiter in the queue. See above for
396 * explanation. Must be a power of two. The value is empirically
397 * derived -- it works pretty well across a variety of processors,
398 * numbers of CPUs, and OSes.
399 */
400 private static final int FRONT_SPINS = 1 << 7;
401
402 /**
403 * The number of times to spin before blocking when a node is
404 * preceded by another node that is apparently spinning. Also
405 * serves as an increment to FRONT_SPINS on phase changes, and as
406 * base average frequency for yielding during spins. Must be a
407 * power of two.
408 */
409 private static final int CHAINED_SPINS = FRONT_SPINS >>> 1;
410
411 /**
412 * The maximum number of estimated removal failures (sweepVotes)
413 * to tolerate before sweeping through the queue unlinking
414 * cancelled nodes that were not unlinked upon initial
415 * removal. See above for explanation. The value must be at least
416 * two to avoid useless sweeps when removing trailing nodes.
417 */
418 static final int SWEEP_THRESHOLD = 32;
419
420 /**
421 * Queue nodes. Uses Object, not E, for items to allow forgetting
422 * them after use. Relies heavily on VarHandles to minimize
423 * unnecessary ordering constraints: Writes that are intrinsically
424 * ordered wrt other accesses or CASes use simple relaxed forms.
425 */
426 static final class Node {
427 final boolean isData; // false if this is a request node
428 volatile Object item; // initially non-null if isData; CASed to match
429 volatile Node next;
430 volatile Thread waiter; // null until waiting
431
432 final boolean casNext(Node cmp, Node val) {
433 return NEXT.compareAndSet(this, cmp, val);
434 }
435
436 final boolean casItem(Object cmp, Object val) {
437 // assert isData == (cmp != null);
438 // assert isData == (val == null);
439 // assert !(cmp instanceof Node);
440 return ITEM.compareAndSet(this, cmp, val);
441 }
442
443 /**
444 * Constructs a new node. Uses relaxed write because item can
445 * only be seen after publication via casNext.
446 */
447 Node(Object item) {
448 ITEM.set(this, item);
449 isData = (item != null);
450 }
451
452 /**
453 * Links node to itself to avoid garbage retention. Called
454 * only after CASing head field, so uses relaxed write.
455 */
456 final void forgetNext() {
457 NEXT.setRelease(this, this);
458 }
459
460 /**
461 * Sets item (of a request node) to self and waiter to null,
462 * to avoid garbage retention after matching or cancelling.
463 * Uses relaxed writes because order is already constrained in
464 * the only calling contexts: item is forgotten only after
465 * volatile/atomic mechanics that extract items, and visitors
466 * of request nodes only ever check whether item is null.
467 * Similarly, clearing waiter follows either CAS or return
468 * from park (if ever parked; else we don't care).
469 */
470 final void forgetContents() {
471 // assert isMatched();
472 if (!isData)
473 ITEM.set(this, this);
474 WAITER.set(this, null);
475 }
476
477 /**
478 * Returns true if this node has been matched, including the
479 * case of artificial matches due to cancellation.
480 */
481 final boolean isMatched() {
482 return isData == (item == null);
483 }
484
485 /**
486 * Returns true if a node with the given mode cannot be
487 * appended to this node because this node is unmatched and
488 * has opposite data mode.
489 */
490 final boolean cannotPrecede(boolean haveData) {
491 boolean d = isData;
492 return d != haveData && d != (item == null);
493 }
494
495 /**
496 * Tries to artificially match a data node -- used by remove.
497 */
498 final boolean tryMatchData() {
499 // assert isData;
500 final Object x;
501 if ((x = item) != null && casItem(x, null)) {
502 LockSupport.unpark(waiter);
503 return true;
504 }
505 return false;
506 }
507
508 private static final long serialVersionUID = -3375979862319811754L;
509
510 // VarHandle mechanics
511 private static final VarHandle ITEM;
512 private static final VarHandle NEXT;
513 private static final VarHandle WAITER;
514 static {
515 try {
516 MethodHandles.Lookup l = MethodHandles.lookup();
517 ITEM = l.findVarHandle(Node.class, "item", Object.class);
518 NEXT = l.findVarHandle(Node.class, "next", Node.class);
519 WAITER = l.findVarHandle(Node.class, "waiter", Thread.class);
520 } catch (ReflectiveOperationException e) {
521 throw new Error(e);
522 }
523 }
524 }
525
526 /** head of the queue; null until first enqueue */
527 transient volatile Node head;
528
529 /** tail of the queue; null until first append */
530 private transient volatile Node tail;
531
532 /** The number of apparent failures to unsplice removed nodes */
533 private transient volatile int sweepVotes;
534
535 private boolean casTail(Node cmp, Node val) {
536 return TAIL.compareAndSet(this, cmp, val);
537 }
538
539 private boolean casHead(Node cmp, Node val) {
540 return HEAD.compareAndSet(this, cmp, val);
541 }
542
543 private boolean casSweepVotes(int cmp, int val) {
544 return SWEEPVOTES.compareAndSet(this, cmp, val);
545 }
546
547 /*
548 * Possible values for "how" argument in xfer method.
549 */
550 private static final int NOW = 0; // for untimed poll, tryTransfer
551 private static final int ASYNC = 1; // for offer, put, add
552 private static final int SYNC = 2; // for transfer, take
553 private static final int TIMED = 3; // for timed poll, tryTransfer
554
555 /**
556 * Implements all queuing methods. See above for explanation.
557 *
558 * @param e the item or null for take
559 * @param haveData true if this is a put, else a take
560 * @param how NOW, ASYNC, SYNC, or TIMED
561 * @param nanos timeout in nanosecs, used only if mode is TIMED
562 * @return an item if matched, else e
563 * @throws NullPointerException if haveData mode but e is null
564 */
565 private E xfer(E e, boolean haveData, int how, long nanos) {
566 if (haveData && (e == null))
567 throw new NullPointerException();
568 Node s = null; // the node to append, if needed
569
570 restartFromHead: for (;;) {
571 for (Node h = head, p = h; p != null;) { // find & match first node
572 boolean isData = p.isData;
573 Object item = p.item;
574 if ((item != null) == isData) { // unmatched
575 if (isData == haveData) // can't match
576 break;
577 if (p.casItem(item, e)) { // match
578 for (Node q = p; q != h;) {
579 Node n = q.next; // update by 2 unless singleton
580 if (head == h && casHead(h, n == null ? q : n)) {
581 h.forgetNext();
582 break;
583 } // advance and retry
584 if ((h = head) == null ||
585 (q = h.next) == null || !q.isMatched())
586 break; // unless slack < 2
587 }
588 LockSupport.unpark(p.waiter);
589 @SuppressWarnings("unchecked") E itemE = (E) item;
590 return itemE;
591 }
592 }
593 Node n = p.next;
594 p = (p != n) ? n : (h = head); // Use head if p offlist
595 }
596
597 if (how != NOW) { // No matches available
598 if (s == null)
599 s = new Node(e);
600 Node pred = tryAppend(s, haveData);
601 if (pred == null)
602 continue restartFromHead; // lost race vs opposite mode
603 if (how != ASYNC)
604 return awaitMatch(s, pred, e, (how == TIMED), nanos);
605 }
606 return e; // not waiting
607 }
608 }
609
610 /**
611 * Tries to append node s as tail.
612 *
613 * @param s the node to append
614 * @param haveData true if appending in data mode
615 * @return null on failure due to losing race with append in
616 * different mode, else s's predecessor, or s itself if no
617 * predecessor
618 */
619 private Node tryAppend(Node s, boolean haveData) {
620 for (Node t = tail, p = t;;) { // move p to last node and append
621 Node n, u; // temps for reads of next & tail
622 if (p == null && (p = head) == null) {
623 if (casHead(null, s))
624 return s; // initialize
625 }
626 else if (p.cannotPrecede(haveData))
627 return null; // lost race vs opposite mode
628 else if ((n = p.next) != null) // not last; keep traversing
629 p = p != t && t != (u = tail) ? (t = u) : // stale tail
630 (p != n) ? n : null; // restart if off list
631 else if (!p.casNext(null, s))
632 p = p.next; // re-read on CAS failure
633 else {
634 if (p != t) { // update if slack now >= 2
635 while ((tail != t || !casTail(t, s)) &&
636 (t = tail) != null &&
637 (s = t.next) != null && // advance and retry
638 (s = s.next) != null && s != t);
639 }
640 return p;
641 }
642 }
643 }
644
645 /**
646 * Spins/yields/blocks until node s is matched or caller gives up.
647 *
648 * @param s the waiting node
649 * @param pred the predecessor of s, or s itself if it has no
650 * predecessor, or null if unknown (the null case does not occur
651 * in any current calls but may in possible future extensions)
652 * @param e the comparison value for checking match
653 * @param timed if true, wait only until timeout elapses
654 * @param nanos timeout in nanosecs, used only if timed is true
655 * @return matched item, or e if unmatched on interrupt or timeout
656 */
657 private E awaitMatch(Node s, Node pred, E e, boolean timed, long nanos) {
658 final long deadline = timed ? System.nanoTime() + nanos : 0L;
659 Thread w = Thread.currentThread();
660 int spins = -1; // initialized after first item and cancel checks
661 ThreadLocalRandom randomYields = null; // bound if needed
662
663 for (;;) {
664 Object item = s.item;
665 if (item != e) { // matched
666 // assert item != s;
667 s.forgetContents(); // avoid garbage
668 @SuppressWarnings("unchecked") E itemE = (E) item;
669 return itemE;
670 }
671 else if (w.isInterrupted() || (timed && nanos <= 0L)) {
672 // try to cancel and unlink
673 if (s.casItem(e, s.isData ? null : s)) {
674 unsplice(pred, s);
675 return e;
676 }
677 // return normally if lost CAS
678 }
679 else if (spins < 0) { // establish spins at/near front
680 if ((spins = spinsFor(pred, s.isData)) > 0)
681 randomYields = ThreadLocalRandom.current();
682 }
683 else if (spins > 0) { // spin
684 --spins;
685 if (randomYields.nextInt(CHAINED_SPINS) == 0)
686 Thread.yield(); // occasionally yield
687 }
688 else if (s.waiter == null) {
689 s.waiter = w; // request unpark then recheck
690 }
691 else if (timed) {
692 nanos = deadline - System.nanoTime();
693 if (nanos > 0L)
694 LockSupport.parkNanos(this, nanos);
695 }
696 else {
697 LockSupport.park(this);
698 }
699 }
700 }
701
702 /**
703 * Returns spin/yield value for a node with given predecessor and
704 * data mode. See above for explanation.
705 */
706 private static int spinsFor(Node pred, boolean haveData) {
707 if (MP && pred != null) {
708 if (pred.isData != haveData) // phase change
709 return FRONT_SPINS + CHAINED_SPINS;
710 if (pred.isMatched()) // probably at front
711 return FRONT_SPINS;
712 if (pred.waiter == null) // pred apparently spinning
713 return CHAINED_SPINS;
714 }
715 return 0;
716 }
717
718 /* -------------- Traversal methods -------------- */
719
720 /**
721 * Returns the first unmatched data node, or null if none.
722 * Callers must recheck if the returned node is unmatched
723 * before using.
724 */
725 final Node firstDataNode() {
726 restartFromHead: for (;;) {
727 for (Node p = head; p != null;) {
728 Object item = p.item;
729 if (p.isData) {
730 if (item != null)
731 return p;
732 }
733 else if (item == null)
734 break;
735 if (p == (p = p.next))
736 continue restartFromHead;
737 }
738 return null;
739 }
740 }
741
742 /**
743 * Traverses and counts unmatched nodes of the given mode.
744 * Used by methods size and getWaitingConsumerCount.
745 */
746 private int countOfMode(boolean data) {
747 restartFromHead: for (;;) {
748 int count = 0;
749 for (Node p = head; p != null;) {
750 if (!p.isMatched()) {
751 if (p.isData != data)
752 return 0;
753 if (++count == Integer.MAX_VALUE)
754 break; // @see Collection.size()
755 }
756 if (p == (p = p.next))
757 continue restartFromHead;
758 }
759 return count;
760 }
761 }
762
763 public String toString() {
764 String[] a = null;
765 restartFromHead: for (;;) {
766 int charLength = 0;
767 int size = 0;
768 for (Node p = head; p != null;) {
769 Object item = p.item;
770 if (p.isData) {
771 if (item != null) {
772 if (a == null)
773 a = new String[4];
774 else if (size == a.length)
775 a = Arrays.copyOf(a, 2 * size);
776 String s = item.toString();
777 a[size++] = s;
778 charLength += s.length();
779 }
780 } else if (item == null)
781 break;
782 if (p == (p = p.next))
783 continue restartFromHead;
784 }
785
786 if (size == 0)
787 return "[]";
788
789 return Helpers.toString(a, size, charLength);
790 }
791 }
792
793 private Object[] toArrayInternal(Object[] a) {
794 Object[] x = a;
795 restartFromHead: for (;;) {
796 int size = 0;
797 for (Node p = head; p != null;) {
798 Object item = p.item;
799 if (p.isData) {
800 if (item != null) {
801 if (x == null)
802 x = new Object[4];
803 else if (size == x.length)
804 x = Arrays.copyOf(x, 2 * (size + 4));
805 x[size++] = item;
806 }
807 } else if (item == null)
808 break;
809 if (p == (p = p.next))
810 continue restartFromHead;
811 }
812 if (x == null)
813 return new Object[0];
814 else if (a != null && size <= a.length) {
815 if (a != x)
816 System.arraycopy(x, 0, a, 0, size);
817 if (size < a.length)
818 a[size] = null;
819 return a;
820 }
821 return (size == x.length) ? x : Arrays.copyOf(x, size);
822 }
823 }
824
825 /**
826 * Returns an array containing all of the elements in this queue, in
827 * proper sequence.
828 *
829 * <p>The returned array will be "safe" in that no references to it are
830 * maintained by this queue. (In other words, this method must allocate
831 * a new array). The caller is thus free to modify the returned array.
832 *
833 * <p>This method acts as bridge between array-based and collection-based
834 * APIs.
835 *
836 * @return an array containing all of the elements in this queue
837 */
838 public Object[] toArray() {
839 return toArrayInternal(null);
840 }
841
842 /**
843 * Returns an array containing all of the elements in this queue, in
844 * proper sequence; the runtime type of the returned array is that of
845 * the specified array. If the queue fits in the specified array, it
846 * is returned therein. Otherwise, a new array is allocated with the
847 * runtime type of the specified array and the size of this queue.
848 *
849 * <p>If this queue fits in the specified array with room to spare
850 * (i.e., the array has more elements than this queue), the element in
851 * the array immediately following the end of the queue is set to
852 * {@code null}.
853 *
854 * <p>Like the {@link #toArray()} method, this method acts as bridge between
855 * array-based and collection-based APIs. Further, this method allows
856 * precise control over the runtime type of the output array, and may,
857 * under certain circumstances, be used to save allocation costs.
858 *
859 * <p>Suppose {@code x} is a queue known to contain only strings.
860 * The following code can be used to dump the queue into a newly
861 * allocated array of {@code String}:
862 *
863 * <pre> {@code String[] y = x.toArray(new String[0]);}</pre>
864 *
865 * Note that {@code toArray(new Object[0])} is identical in function to
866 * {@code toArray()}.
867 *
868 * @param a the array into which the elements of the queue are to
869 * be stored, if it is big enough; otherwise, a new array of the
870 * same runtime type is allocated for this purpose
871 * @return an array containing all of the elements in this queue
872 * @throws ArrayStoreException if the runtime type of the specified array
873 * is not a supertype of the runtime type of every element in
874 * this queue
875 * @throws NullPointerException if the specified array is null
876 */
877 @SuppressWarnings("unchecked")
878 public <T> T[] toArray(T[] a) {
879 Objects.requireNonNull(a);
880 return (T[]) toArrayInternal(a);
881 }
882
883 final class Itr implements Iterator<E> {
884 private Node nextNode; // next node to return item for
885 private E nextItem; // the corresponding item
886 private Node lastRet; // last returned node, to support remove
887 private Node lastPred; // predecessor to unlink lastRet
888
889 /**
890 * Moves to next node after prev, or first node if prev null.
891 */
892 private void advance(Node prev) {
893 /*
894 * To track and avoid buildup of deleted nodes in the face
895 * of calls to both Queue.remove and Itr.remove, we must
896 * include variants of unsplice and sweep upon each
897 * advance: Upon Itr.remove, we may need to catch up links
898 * from lastPred, and upon other removes, we might need to
899 * skip ahead from stale nodes and unsplice deleted ones
900 * found while advancing.
901 */
902
903 Node r, b; // reset lastPred upon possible deletion of lastRet
904 if ((r = lastRet) != null && !r.isMatched())
905 lastPred = r; // next lastPred is old lastRet
906 else if ((b = lastPred) == null || b.isMatched())
907 lastPred = null; // at start of list
908 else {
909 Node s, n; // help with removal of lastPred.next
910 while ((s = b.next) != null &&
911 s != b && s.isMatched() &&
912 (n = s.next) != null && n != s)
913 b.casNext(s, n);
914 }
915
916 this.lastRet = prev;
917
918 for (Node p = prev, s, n;;) {
919 s = (p == null) ? head : p.next;
920 if (s == null)
921 break;
922 else if (s == p) {
923 p = null;
924 continue;
925 }
926 Object item = s.item;
927 if (s.isData) {
928 if (item != null) {
929 @SuppressWarnings("unchecked") E itemE = (E) item;
930 nextItem = itemE;
931 nextNode = s;
932 return;
933 }
934 }
935 else if (item == null)
936 break;
937 // assert s.isMatched();
938 if (p == null)
939 p = s;
940 else if ((n = s.next) == null)
941 break;
942 else if (s == n)
943 p = null;
944 else
945 p.casNext(s, n);
946 }
947 nextNode = null;
948 nextItem = null;
949 }
950
951 Itr() {
952 advance(null);
953 }
954
955 public final boolean hasNext() {
956 return nextNode != null;
957 }
958
959 public final E next() {
960 Node p = nextNode;
961 if (p == null) throw new NoSuchElementException();
962 E e = nextItem;
963 advance(p);
964 return e;
965 }
966
967 // Default implementation of forEachRemaining is "good enough".
968
969 public final void remove() {
970 final Node lastRet = this.lastRet;
971 if (lastRet == null)
972 throw new IllegalStateException();
973 this.lastRet = null;
974 if (lastRet.tryMatchData())
975 unsplice(lastPred, lastRet);
976 }
977 }
978
979 /** A customized variant of Spliterators.IteratorSpliterator */
980 final class LTQSpliterator implements Spliterator<E> {
981 static final int MAX_BATCH = 1 << 25; // max batch array size;
982 Node current; // current node; null until initialized
983 int batch; // batch size for splits
984 boolean exhausted; // true when no more nodes
985 LTQSpliterator() {}
986
987 public Spliterator<E> trySplit() {
988 Node p, q;
989 if ((p = current()) == null || (q = p.next) == null)
990 return null;
991 int i = 0, n = batch = Math.min(batch + 1, MAX_BATCH);
992 Object[] a = null;
993 do {
994 final Object item = p.item;
995 if (p.isData) {
996 if (item != null)
997 ((a != null) ? a : (a = new Object[n]))[i++] = item;
998 } else if (item == null) {
999 p = null;
1000 break;
1001 }
1002 if (p == (p = q))
1003 p = firstDataNode();
1004 } while (p != null && (q = p.next) != null && i < n);
1005 setCurrent(p);
1006 return (i == 0) ? null :
1007 Spliterators.spliterator(a, 0, i, (Spliterator.ORDERED |
1008 Spliterator.NONNULL |
1009 Spliterator.CONCURRENT));
1010 }
1011
1012 @SuppressWarnings("unchecked")
1013 public void forEachRemaining(Consumer<? super E> action) {
1014 Objects.requireNonNull(action);
1015 final Node p;
1016 if ((p = current()) != null) {
1017 current = null;
1018 exhausted = true;
1019 forEachFrom(action, p);
1020 }
1021 }
1022
1023 @SuppressWarnings("unchecked")
1024 public boolean tryAdvance(Consumer<? super E> action) {
1025 Objects.requireNonNull(action);
1026 Node p;
1027 if ((p = current()) != null) {
1028 E e = null;
1029 do {
1030 final Object item = p.item;
1031 final boolean isData = p.isData;
1032 if (p == (p = p.next))
1033 p = head;
1034 if (isData) {
1035 if (item != null) {
1036 e = (E) item;
1037 break;
1038 }
1039 }
1040 else if (item == null)
1041 p = null;
1042 } while (p != null);
1043 setCurrent(p);
1044 if (e != null) {
1045 action.accept(e);
1046 return true;
1047 }
1048 }
1049 return false;
1050 }
1051
1052 private void setCurrent(Node p) {
1053 if ((current = p) == null)
1054 exhausted = true;
1055 }
1056
1057 private Node current() {
1058 Node p;
1059 if ((p = current) == null && !exhausted)
1060 setCurrent(p = firstDataNode());
1061 return p;
1062 }
1063
1064 public long estimateSize() { return Long.MAX_VALUE; }
1065
1066 public int characteristics() {
1067 return (Spliterator.ORDERED |
1068 Spliterator.NONNULL |
1069 Spliterator.CONCURRENT);
1070 }
1071 }
1072
1073 /**
1074 * Returns a {@link Spliterator} over the elements in this queue.
1075 *
1076 * <p>The returned spliterator is
1077 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1078 *
1079 * <p>The {@code Spliterator} reports {@link Spliterator#CONCURRENT},
1080 * {@link Spliterator#ORDERED}, and {@link Spliterator#NONNULL}.
1081 *
1082 * @implNote
1083 * The {@code Spliterator} implements {@code trySplit} to permit limited
1084 * parallelism.
1085 *
1086 * @return a {@code Spliterator} over the elements in this queue
1087 * @since 1.8
1088 */
1089 public Spliterator<E> spliterator() {
1090 return new LTQSpliterator();
1091 }
1092
1093 /* -------------- Removal methods -------------- */
1094
1095 /**
1096 * Unsplices (now or later) the given deleted/cancelled node with
1097 * the given predecessor.
1098 *
1099 * @param pred a node that was at one time known to be the
1100 * predecessor of s, or null or s itself if s is/was at head
1101 * @param s the node to be unspliced
1102 */
1103 final void unsplice(Node pred, Node s) {
1104 s.waiter = null; // disable signals
1105 /*
1106 * See above for rationale. Briefly: if pred still points to
1107 * s, try to unlink s. If s cannot be unlinked, because it is
1108 * trailing node or pred might be unlinked, and neither pred
1109 * nor s are head or offlist, add to sweepVotes, and if enough
1110 * votes have accumulated, sweep.
1111 */
1112 if (pred != null && pred != s && pred.next == s) {
1113 Node n = s.next;
1114 if (n == null ||
1115 (n != s && pred.casNext(s, n) && pred.isMatched())) {
1116 for (;;) { // check if at, or could be, head
1117 Node h = head;
1118 if (h == pred || h == s || h == null)
1119 return; // at head or list empty
1120 if (!h.isMatched())
1121 break;
1122 Node hn = h.next;
1123 if (hn == null)
1124 return; // now empty
1125 if (hn != h && casHead(h, hn))
1126 h.forgetNext(); // advance head
1127 }
1128 if (pred.next != pred && s.next != s) { // recheck if offlist
1129 for (;;) { // sweep now if enough votes
1130 int v = sweepVotes;
1131 if (v < SWEEP_THRESHOLD) {
1132 if (casSweepVotes(v, v + 1))
1133 break;
1134 }
1135 else if (casSweepVotes(v, 0)) {
1136 sweep();
1137 break;
1138 }
1139 }
1140 }
1141 }
1142 }
1143 }
1144
1145 /**
1146 * Unlinks matched (typically cancelled) nodes encountered in a
1147 * traversal from head.
1148 */
1149 private void sweep() {
1150 for (Node p = head, s, n; p != null && (s = p.next) != null; ) {
1151 if (!s.isMatched())
1152 // Unmatched nodes are never self-linked
1153 p = s;
1154 else if ((n = s.next) == null) // trailing node is pinned
1155 break;
1156 else if (s == n) // stale
1157 // No need to also check for p == s, since that implies s == n
1158 p = head;
1159 else
1160 p.casNext(s, n);
1161 }
1162 }
1163
1164 /**
1165 * Creates an initially empty {@code LinkedTransferQueue}.
1166 */
1167 public LinkedTransferQueue() {
1168 }
1169
1170 /**
1171 * Creates a {@code LinkedTransferQueue}
1172 * initially containing the elements of the given collection,
1173 * added in traversal order of the collection's iterator.
1174 *
1175 * @param c the collection of elements to initially contain
1176 * @throws NullPointerException if the specified collection or any
1177 * of its elements are null
1178 */
1179 public LinkedTransferQueue(Collection<? extends E> c) {
1180 this();
1181 addAll(c);
1182 }
1183
1184 /**
1185 * Inserts the specified element at the tail of this queue.
1186 * As the queue is unbounded, this method will never block.
1187 *
1188 * @throws NullPointerException if the specified element is null
1189 */
1190 public void put(E e) {
1191 xfer(e, true, ASYNC, 0);
1192 }
1193
1194 /**
1195 * Inserts the specified element at the tail of this queue.
1196 * As the queue is unbounded, this method will never block or
1197 * return {@code false}.
1198 *
1199 * @return {@code true} (as specified by
1200 * {@link java.util.concurrent.BlockingQueue#offer(Object,long,TimeUnit)
1201 * BlockingQueue.offer})
1202 * @throws NullPointerException if the specified element is null
1203 */
1204 public boolean offer(E e, long timeout, TimeUnit unit) {
1205 xfer(e, true, ASYNC, 0);
1206 return true;
1207 }
1208
1209 /**
1210 * Inserts the specified element at the tail of this queue.
1211 * As the queue is unbounded, this method will never return {@code false}.
1212 *
1213 * @return {@code true} (as specified by {@link Queue#offer})
1214 * @throws NullPointerException if the specified element is null
1215 */
1216 public boolean offer(E e) {
1217 xfer(e, true, ASYNC, 0);
1218 return true;
1219 }
1220
1221 /**
1222 * Inserts the specified element at the tail of this queue.
1223 * As the queue is unbounded, this method will never throw
1224 * {@link IllegalStateException} or return {@code false}.
1225 *
1226 * @return {@code true} (as specified by {@link Collection#add})
1227 * @throws NullPointerException if the specified element is null
1228 */
1229 public boolean add(E e) {
1230 xfer(e, true, ASYNC, 0);
1231 return true;
1232 }
1233
1234 /**
1235 * Transfers the element to a waiting consumer immediately, if possible.
1236 *
1237 * <p>More precisely, transfers the specified element immediately
1238 * if there exists a consumer already waiting to receive it (in
1239 * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
1240 * otherwise returning {@code false} without enqueuing the element.
1241 *
1242 * @throws NullPointerException if the specified element is null
1243 */
1244 public boolean tryTransfer(E e) {
1245 return xfer(e, true, NOW, 0) == null;
1246 }
1247
1248 /**
1249 * Transfers the element to a consumer, waiting if necessary to do so.
1250 *
1251 * <p>More precisely, transfers the specified element immediately
1252 * if there exists a consumer already waiting to receive it (in
1253 * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
1254 * else inserts the specified element at the tail of this queue
1255 * and waits until the element is received by a consumer.
1256 *
1257 * @throws NullPointerException if the specified element is null
1258 */
1259 public void transfer(E e) throws InterruptedException {
1260 if (xfer(e, true, SYNC, 0) != null) {
1261 Thread.interrupted(); // failure possible only due to interrupt
1262 throw new InterruptedException();
1263 }
1264 }
1265
1266 /**
1267 * Transfers the element to a consumer if it is possible to do so
1268 * before the timeout elapses.
1269 *
1270 * <p>More precisely, transfers the specified element immediately
1271 * if there exists a consumer already waiting to receive it (in
1272 * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
1273 * else inserts the specified element at the tail of this queue
1274 * and waits until the element is received by a consumer,
1275 * returning {@code false} if the specified wait time elapses
1276 * before the element can be transferred.
1277 *
1278 * @throws NullPointerException if the specified element is null
1279 */
1280 public boolean tryTransfer(E e, long timeout, TimeUnit unit)
1281 throws InterruptedException {
1282 if (xfer(e, true, TIMED, unit.toNanos(timeout)) == null)
1283 return true;
1284 if (!Thread.interrupted())
1285 return false;
1286 throw new InterruptedException();
1287 }
1288
1289 public E take() throws InterruptedException {
1290 E e = xfer(null, false, SYNC, 0);
1291 if (e != null)
1292 return e;
1293 Thread.interrupted();
1294 throw new InterruptedException();
1295 }
1296
1297 public E poll(long timeout, TimeUnit unit) throws InterruptedException {
1298 E e = xfer(null, false, TIMED, unit.toNanos(timeout));
1299 if (e != null || !Thread.interrupted())
1300 return e;
1301 throw new InterruptedException();
1302 }
1303
1304 public E poll() {
1305 return xfer(null, false, NOW, 0);
1306 }
1307
1308 /**
1309 * @throws NullPointerException {@inheritDoc}
1310 * @throws IllegalArgumentException {@inheritDoc}
1311 */
1312 public int drainTo(Collection<? super E> c) {
1313 Objects.requireNonNull(c);
1314 if (c == this)
1315 throw new IllegalArgumentException();
1316 int n = 0;
1317 for (E e; (e = poll()) != null; n++)
1318 c.add(e);
1319 return n;
1320 }
1321
1322 /**
1323 * @throws NullPointerException {@inheritDoc}
1324 * @throws IllegalArgumentException {@inheritDoc}
1325 */
1326 public int drainTo(Collection<? super E> c, int maxElements) {
1327 Objects.requireNonNull(c);
1328 if (c == this)
1329 throw new IllegalArgumentException();
1330 int n = 0;
1331 for (E e; n < maxElements && (e = poll()) != null; n++)
1332 c.add(e);
1333 return n;
1334 }
1335
1336 /**
1337 * Returns an iterator over the elements in this queue in proper sequence.
1338 * The elements will be returned in order from first (head) to last (tail).
1339 *
1340 * <p>The returned iterator is
1341 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1342 *
1343 * @return an iterator over the elements in this queue in proper sequence
1344 */
1345 public Iterator<E> iterator() {
1346 return new Itr();
1347 }
1348
1349 public E peek() {
1350 restartFromHead: for (;;) {
1351 for (Node p = head; p != null;) {
1352 Object item = p.item;
1353 if (p.isData) {
1354 if (item != null) {
1355 @SuppressWarnings("unchecked") E e = (E) item;
1356 return e;
1357 }
1358 }
1359 else if (item == null)
1360 break;
1361 if (p == (p = p.next))
1362 continue restartFromHead;
1363 }
1364 return null;
1365 }
1366 }
1367
1368 /**
1369 * Returns {@code true} if this queue contains no elements.
1370 *
1371 * @return {@code true} if this queue contains no elements
1372 */
1373 public boolean isEmpty() {
1374 return firstDataNode() == null;
1375 }
1376
1377 public boolean hasWaitingConsumer() {
1378 restartFromHead: for (;;) {
1379 for (Node p = head; p != null;) {
1380 Object item = p.item;
1381 if (p.isData) {
1382 if (item != null)
1383 break;
1384 }
1385 else if (item == null)
1386 return true;
1387 if (p == (p = p.next))
1388 continue restartFromHead;
1389 }
1390 return false;
1391 }
1392 }
1393
1394 /**
1395 * Returns the number of elements in this queue. If this queue
1396 * contains more than {@code Integer.MAX_VALUE} elements, returns
1397 * {@code Integer.MAX_VALUE}.
1398 *
1399 * <p>Beware that, unlike in most collections, this method is
1400 * <em>NOT</em> a constant-time operation. Because of the
1401 * asynchronous nature of these queues, determining the current
1402 * number of elements requires an O(n) traversal.
1403 *
1404 * @return the number of elements in this queue
1405 */
1406 public int size() {
1407 return countOfMode(true);
1408 }
1409
1410 public int getWaitingConsumerCount() {
1411 return countOfMode(false);
1412 }
1413
1414 /**
1415 * Removes a single instance of the specified element from this queue,
1416 * if it is present. More formally, removes an element {@code e} such
1417 * that {@code o.equals(e)}, if this queue contains one or more such
1418 * elements.
1419 * Returns {@code true} if this queue contained the specified element
1420 * (or equivalently, if this queue changed as a result of the call).
1421 *
1422 * @param o element to be removed from this queue, if present
1423 * @return {@code true} if this queue changed as a result of the call
1424 */
1425 public boolean remove(Object o) {
1426 if (o == null)
1427 return false;
1428 restartFromHead: for (;;) {
1429 for (Node pred = null, p = head; p != null; ) {
1430 Object item = p.item;
1431 if (p.isData) {
1432 if (item != null
1433 && o.equals(item)
1434 && p.tryMatchData()) {
1435 unsplice(pred, p);
1436 return true;
1437 }
1438 }
1439 else if (item == null)
1440 break;
1441 if ((pred = p) == (p = p.next))
1442 continue restartFromHead;
1443 }
1444 return false;
1445 }
1446 }
1447
1448 /**
1449 * Returns {@code true} if this queue contains the specified element.
1450 * More formally, returns {@code true} if and only if this queue contains
1451 * at least one element {@code e} such that {@code o.equals(e)}.
1452 *
1453 * @param o object to be checked for containment in this queue
1454 * @return {@code true} if this queue contains the specified element
1455 */
1456 public boolean contains(Object o) {
1457 if (o != null) {
1458 for (Node p = head; p != null; ) {
1459 Object item = p.item;
1460 if (p.isData) {
1461 if (item != null && o.equals(item))
1462 return true;
1463 }
1464 else if (item == null)
1465 break;
1466 if (p == (p = p.next))
1467 p = head;
1468 }
1469 }
1470 return false;
1471 }
1472
1473 /**
1474 * Always returns {@code Integer.MAX_VALUE} because a
1475 * {@code LinkedTransferQueue} is not capacity constrained.
1476 *
1477 * @return {@code Integer.MAX_VALUE} (as specified by
1478 * {@link java.util.concurrent.BlockingQueue#remainingCapacity()
1479 * BlockingQueue.remainingCapacity})
1480 */
1481 public int remainingCapacity() {
1482 return Integer.MAX_VALUE;
1483 }
1484
1485 /**
1486 * Saves this queue to a stream (that is, serializes it).
1487 *
1488 * @param s the stream
1489 * @throws java.io.IOException if an I/O error occurs
1490 * @serialData All of the elements (each an {@code E}) in
1491 * the proper order, followed by a null
1492 */
1493 private void writeObject(java.io.ObjectOutputStream s)
1494 throws java.io.IOException {
1495 s.defaultWriteObject();
1496 for (E e : this)
1497 s.writeObject(e);
1498 // Use trailing null as sentinel
1499 s.writeObject(null);
1500 }
1501
1502 /**
1503 * Reconstitutes this queue from a stream (that is, deserializes it).
1504 * @param s the stream
1505 * @throws ClassNotFoundException if the class of a serialized object
1506 * could not be found
1507 * @throws java.io.IOException if an I/O error occurs
1508 */
1509 private void readObject(java.io.ObjectInputStream s)
1510 throws java.io.IOException, ClassNotFoundException {
1511 s.defaultReadObject();
1512 for (;;) {
1513 @SuppressWarnings("unchecked")
1514 E item = (E) s.readObject();
1515 if (item == null)
1516 break;
1517 else
1518 offer(item);
1519 }
1520 }
1521
1522 /**
1523 * @throws NullPointerException {@inheritDoc}
1524 */
1525 public boolean removeIf(Predicate<? super E> filter) {
1526 Objects.requireNonNull(filter);
1527 return bulkRemove(filter);
1528 }
1529
1530 /**
1531 * @throws NullPointerException {@inheritDoc}
1532 */
1533 public boolean removeAll(Collection<?> c) {
1534 Objects.requireNonNull(c);
1535 return bulkRemove(e -> c.contains(e));
1536 }
1537
1538 /**
1539 * @throws NullPointerException {@inheritDoc}
1540 */
1541 public boolean retainAll(Collection<?> c) {
1542 Objects.requireNonNull(c);
1543 return bulkRemove(e -> !c.contains(e));
1544 }
1545
1546 /** Implementation of bulk remove methods. */
1547 @SuppressWarnings("unchecked")
1548 private boolean bulkRemove(Predicate<? super E> filter) {
1549 boolean removed = false;
1550 restartFromHead: for (;;) {
1551 for (Node pred = null, p = head; p != null; ) {
1552 final Object item = p.item;
1553 if (p.isData) {
1554 if (item != null
1555 && filter.test((E)item)
1556 && p.tryMatchData()) {
1557 removed = true;
1558 unsplice(pred, p);
1559 p = p.next;
1560 continue;
1561 }
1562 }
1563 else if (item == null)
1564 break;
1565 if ((pred = p) == (p = p.next))
1566 continue restartFromHead;
1567 }
1568 return removed;
1569 }
1570 }
1571
1572 /**
1573 * Runs action on each element found during a traversal starting at p.
1574 * If p is null, the action is not run.
1575 */
1576 @SuppressWarnings("unchecked")
1577 void forEachFrom(Consumer<? super E> action, Node p) {
1578 while (p != null) {
1579 final Object item = p.item;
1580 if (p.isData) {
1581 if (item != null)
1582 action.accept((E) item);
1583 }
1584 else if (item == null)
1585 break;
1586 if (p == (p = p.next))
1587 p = head;
1588 }
1589 }
1590
1591 /**
1592 * @throws NullPointerException {@inheritDoc}
1593 */
1594 public void forEach(Consumer<? super E> action) {
1595 Objects.requireNonNull(action);
1596 forEachFrom(action, head);
1597 }
1598
1599 // VarHandle mechanics
1600 private static final VarHandle HEAD;
1601 private static final VarHandle TAIL;
1602 private static final VarHandle SWEEPVOTES;
1603 static {
1604 try {
1605 MethodHandles.Lookup l = MethodHandles.lookup();
1606 HEAD = l.findVarHandle(LinkedTransferQueue.class, "head",
1607 Node.class);
1608 TAIL = l.findVarHandle(LinkedTransferQueue.class, "tail",
1609 Node.class);
1610 SWEEPVOTES = l.findVarHandle(LinkedTransferQueue.class, "sweepVotes",
1611 int.class);
1612 } catch (ReflectiveOperationException e) {
1613 throw new Error(e);
1614 }
1615
1616 // Reduce the risk of rare disastrous classloading in first call to
1617 // LockSupport.park: https://bugs.openjdk.java.net/browse/JDK-8074773
1618 Class<?> ensureLoaded = LockSupport.class;
1619 }
1620 }