ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/main/java/util/concurrent/SynchronousQueue.java
Revision: 1.124
Committed: Wed Apr 19 23:45:51 2017 UTC (7 years, 1 month ago) by jsr166
Branch: MAIN
Changes since 1.123: +1 -1 lines
Log Message:
Redo @link and @linkplain; one @link was pointing to the wrong poll method

File Contents

# Content
1 /*
2 * Written by Doug Lea, Bill Scherer, and Michael Scott with
3 * assistance from members of JCP JSR-166 Expert Group and released to
4 * the public domain, as explained at
5 * http://creativecommons.org/publicdomain/zero/1.0/
6 */
7
8 package java.util.concurrent;
9
10 import java.lang.invoke.MethodHandles;
11 import java.lang.invoke.VarHandle;
12 import java.util.AbstractQueue;
13 import java.util.Collection;
14 import java.util.Collections;
15 import java.util.Iterator;
16 import java.util.Objects;
17 import java.util.Spliterator;
18 import java.util.Spliterators;
19 import java.util.concurrent.locks.LockSupport;
20 import java.util.concurrent.locks.ReentrantLock;
21
22 /**
23 * A {@linkplain BlockingQueue blocking queue} in which each insert
24 * operation must wait for a corresponding remove operation by another
25 * thread, and vice versa. A synchronous queue does not have any
26 * internal capacity, not even a capacity of one. You cannot
27 * {@code peek} at a synchronous queue because an element is only
28 * present when you try to remove it; you cannot insert an element
29 * (using any method) unless another thread is trying to remove it;
30 * you cannot iterate as there is nothing to iterate. The
31 * <em>head</em> of the queue is the element that the first queued
32 * inserting thread is trying to add to the queue; if there is no such
33 * queued thread then no element is available for removal and
34 * {@code poll()} will return {@code null}. For purposes of other
35 * {@code Collection} methods (for example {@code contains}), a
36 * {@code SynchronousQueue} acts as an empty collection. This queue
37 * does not permit {@code null} elements.
38 *
39 * <p>Synchronous queues are similar to rendezvous channels used in
40 * CSP and Ada. They are well suited for handoff designs, in which an
41 * object running in one thread must sync up with an object running
42 * in another thread in order to hand it some information, event, or
43 * task.
44 *
45 * <p>This class supports an optional fairness policy for ordering
46 * waiting producer and consumer threads. By default, this ordering
47 * is not guaranteed. However, a queue constructed with fairness set
48 * to {@code true} grants threads access in FIFO order.
49 *
50 * <p>This class and its iterator implement all of the <em>optional</em>
51 * methods of the {@link Collection} and {@link Iterator} interfaces.
52 *
53 * <p>This class is a member of the
54 * <a href="{@docRoot}/../technotes/guides/collections/index.html">
55 * Java Collections Framework</a>.
56 *
57 * @since 1.5
58 * @author Doug Lea and Bill Scherer and Michael Scott
59 * @param <E> the type of elements held in this queue
60 */
61 public class SynchronousQueue<E> extends AbstractQueue<E>
62 implements BlockingQueue<E>, java.io.Serializable {
63 private static final long serialVersionUID = -3223113410248163686L;
64
65 /*
66 * This class implements extensions of the dual stack and dual
67 * queue algorithms described in "Nonblocking Concurrent Objects
68 * with Condition Synchronization", by W. N. Scherer III and
69 * M. L. Scott. 18th Annual Conf. on Distributed Computing,
70 * Oct. 2004 (see also
71 * http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/duals.html).
72 * The (Lifo) stack is used for non-fair mode, and the (Fifo)
73 * queue for fair mode. The performance of the two is generally
74 * similar. Fifo usually supports higher throughput under
75 * contention but Lifo maintains higher thread locality in common
76 * applications.
77 *
78 * A dual queue (and similarly stack) is one that at any given
79 * time either holds "data" -- items provided by put operations,
80 * or "requests" -- slots representing take operations, or is
81 * empty. A call to "fulfill" (i.e., a call requesting an item
82 * from a queue holding data or vice versa) dequeues a
83 * complementary node. The most interesting feature of these
84 * queues is that any operation can figure out which mode the
85 * queue is in, and act accordingly without needing locks.
86 *
87 * Both the queue and stack extend abstract class Transferer
88 * defining the single method transfer that does a put or a
89 * take. These are unified into a single method because in dual
90 * data structures, the put and take operations are symmetrical,
91 * so nearly all code can be combined. The resulting transfer
92 * methods are on the long side, but are easier to follow than
93 * they would be if broken up into nearly-duplicated parts.
94 *
95 * The queue and stack data structures share many conceptual
96 * similarities but very few concrete details. For simplicity,
97 * they are kept distinct so that they can later evolve
98 * separately.
99 *
100 * The algorithms here differ from the versions in the above paper
101 * in extending them for use in synchronous queues, as well as
102 * dealing with cancellation. The main differences include:
103 *
104 * 1. The original algorithms used bit-marked pointers, but
105 * the ones here use mode bits in nodes, leading to a number
106 * of further adaptations.
107 * 2. SynchronousQueues must block threads waiting to become
108 * fulfilled.
109 * 3. Support for cancellation via timeout and interrupts,
110 * including cleaning out cancelled nodes/threads
111 * from lists to avoid garbage retention and memory depletion.
112 *
113 * Blocking is mainly accomplished using LockSupport park/unpark,
114 * except that nodes that appear to be the next ones to become
115 * fulfilled first spin a bit (on multiprocessors only). On very
116 * busy synchronous queues, spinning can dramatically improve
117 * throughput. And on less busy ones, the amount of spinning is
118 * small enough not to be noticeable.
119 *
120 * Cleaning is done in different ways in queues vs stacks. For
121 * queues, we can almost always remove a node immediately in O(1)
122 * time (modulo retries for consistency checks) when it is
123 * cancelled. But if it may be pinned as the current tail, it must
124 * wait until some subsequent cancellation. For stacks, we need a
125 * potentially O(n) traversal to be sure that we can remove the
126 * node, but this can run concurrently with other threads
127 * accessing the stack.
128 *
129 * While garbage collection takes care of most node reclamation
130 * issues that otherwise complicate nonblocking algorithms, care
131 * is taken to "forget" references to data, other nodes, and
132 * threads that might be held on to long-term by blocked
133 * threads. In cases where setting to null would otherwise
134 * conflict with main algorithms, this is done by changing a
135 * node's link to now point to the node itself. This doesn't arise
136 * much for Stack nodes (because blocked threads do not hang on to
137 * old head pointers), but references in Queue nodes must be
138 * aggressively forgotten to avoid reachability of everything any
139 * node has ever referred to since arrival.
140 */
141
142 /**
143 * Shared internal API for dual stacks and queues.
144 */
145 abstract static class Transferer<E> {
146 /**
147 * Performs a put or take.
148 *
149 * @param e if non-null, the item to be handed to a consumer;
150 * if null, requests that transfer return an item
151 * offered by producer.
152 * @param timed if this operation should timeout
153 * @param nanos the timeout, in nanoseconds
154 * @return if non-null, the item provided or received; if null,
155 * the operation failed due to timeout or interrupt --
156 * the caller can distinguish which of these occurred
157 * by checking Thread.interrupted.
158 */
159 abstract E transfer(E e, boolean timed, long nanos);
160 }
161
162 /**
163 * The number of times to spin before blocking in timed waits.
164 * The value is empirically derived -- it works well across a
165 * variety of processors and OSes. Empirically, the best value
166 * seems not to vary with number of CPUs (beyond 2) so is just
167 * a constant.
168 */
169 static final int MAX_TIMED_SPINS =
170 (Runtime.getRuntime().availableProcessors() < 2) ? 0 : 32;
171
172 /**
173 * The number of times to spin before blocking in untimed waits.
174 * This is greater than timed value because untimed waits spin
175 * faster since they don't need to check times on each spin.
176 */
177 static final int MAX_UNTIMED_SPINS = MAX_TIMED_SPINS * 16;
178
179 /**
180 * The number of nanoseconds for which it is faster to spin
181 * rather than to use timed park. A rough estimate suffices.
182 */
183 static final long SPIN_FOR_TIMEOUT_THRESHOLD = 1000L;
184
185 /** Dual stack */
186 static final class TransferStack<E> extends Transferer<E> {
187 /*
188 * This extends Scherer-Scott dual stack algorithm, differing,
189 * among other ways, by using "covering" nodes rather than
190 * bit-marked pointers: Fulfilling operations push on marker
191 * nodes (with FULFILLING bit set in mode) to reserve a spot
192 * to match a waiting node.
193 */
194
195 /* Modes for SNodes, ORed together in node fields */
196 /** Node represents an unfulfilled consumer */
197 static final int REQUEST = 0;
198 /** Node represents an unfulfilled producer */
199 static final int DATA = 1;
200 /** Node is fulfilling another unfulfilled DATA or REQUEST */
201 static final int FULFILLING = 2;
202
203 /** Returns true if m has fulfilling bit set. */
204 static boolean isFulfilling(int m) { return (m & FULFILLING) != 0; }
205
206 /** Node class for TransferStacks. */
207 static final class SNode {
208 volatile SNode next; // next node in stack
209 volatile SNode match; // the node matched to this
210 volatile Thread waiter; // to control park/unpark
211 Object item; // data; or null for REQUESTs
212 int mode;
213 // Note: item and mode fields don't need to be volatile
214 // since they are always written before, and read after,
215 // other volatile/atomic operations.
216
217 SNode(Object item) {
218 this.item = item;
219 }
220
221 boolean casNext(SNode cmp, SNode val) {
222 return cmp == next &&
223 SNEXT.compareAndSet(this, cmp, val);
224 }
225
226 /**
227 * Tries to match node s to this node, if so, waking up thread.
228 * Fulfillers call tryMatch to identify their waiters.
229 * Waiters block until they have been matched.
230 *
231 * @param s the node to match
232 * @return true if successfully matched to s
233 */
234 boolean tryMatch(SNode s) {
235 if (match == null &&
236 SMATCH.compareAndSet(this, null, s)) {
237 Thread w = waiter;
238 if (w != null) { // waiters need at most one unpark
239 waiter = null;
240 LockSupport.unpark(w);
241 }
242 return true;
243 }
244 return match == s;
245 }
246
247 /**
248 * Tries to cancel a wait by matching node to itself.
249 */
250 void tryCancel() {
251 SMATCH.compareAndSet(this, null, this);
252 }
253
254 boolean isCancelled() {
255 return match == this;
256 }
257
258 // VarHandle mechanics
259 private static final VarHandle SMATCH;
260 private static final VarHandle SNEXT;
261 static {
262 try {
263 MethodHandles.Lookup l = MethodHandles.lookup();
264 SMATCH = l.findVarHandle(SNode.class, "match", SNode.class);
265 SNEXT = l.findVarHandle(SNode.class, "next", SNode.class);
266 } catch (ReflectiveOperationException e) {
267 throw new Error(e);
268 }
269 }
270 }
271
272 /** The head (top) of the stack */
273 volatile SNode head;
274
275 boolean casHead(SNode h, SNode nh) {
276 return h == head &&
277 SHEAD.compareAndSet(this, h, nh);
278 }
279
280 /**
281 * Creates or resets fields of a node. Called only from transfer
282 * where the node to push on stack is lazily created and
283 * reused when possible to help reduce intervals between reads
284 * and CASes of head and to avoid surges of garbage when CASes
285 * to push nodes fail due to contention.
286 */
287 static SNode snode(SNode s, Object e, SNode next, int mode) {
288 if (s == null) s = new SNode(e);
289 s.mode = mode;
290 s.next = next;
291 return s;
292 }
293
294 /**
295 * Puts or takes an item.
296 */
297 @SuppressWarnings("unchecked")
298 E transfer(E e, boolean timed, long nanos) {
299 /*
300 * Basic algorithm is to loop trying one of three actions:
301 *
302 * 1. If apparently empty or already containing nodes of same
303 * mode, try to push node on stack and wait for a match,
304 * returning it, or null if cancelled.
305 *
306 * 2. If apparently containing node of complementary mode,
307 * try to push a fulfilling node on to stack, match
308 * with corresponding waiting node, pop both from
309 * stack, and return matched item. The matching or
310 * unlinking might not actually be necessary because of
311 * other threads performing action 3:
312 *
313 * 3. If top of stack already holds another fulfilling node,
314 * help it out by doing its match and/or pop
315 * operations, and then continue. The code for helping
316 * is essentially the same as for fulfilling, except
317 * that it doesn't return the item.
318 */
319
320 SNode s = null; // constructed/reused as needed
321 int mode = (e == null) ? REQUEST : DATA;
322
323 for (;;) {
324 SNode h = head;
325 if (h == null || h.mode == mode) { // empty or same-mode
326 if (timed && nanos <= 0L) { // can't wait
327 if (h != null && h.isCancelled())
328 casHead(h, h.next); // pop cancelled node
329 else
330 return null;
331 } else if (casHead(h, s = snode(s, e, h, mode))) {
332 SNode m = awaitFulfill(s, timed, nanos);
333 if (m == s) { // wait was cancelled
334 clean(s);
335 return null;
336 }
337 if ((h = head) != null && h.next == s)
338 casHead(h, s.next); // help s's fulfiller
339 return (E) ((mode == REQUEST) ? m.item : s.item);
340 }
341 } else if (!isFulfilling(h.mode)) { // try to fulfill
342 if (h.isCancelled()) // already cancelled
343 casHead(h, h.next); // pop and retry
344 else if (casHead(h, s=snode(s, e, h, FULFILLING|mode))) {
345 for (;;) { // loop until matched or waiters disappear
346 SNode m = s.next; // m is s's match
347 if (m == null) { // all waiters are gone
348 casHead(s, null); // pop fulfill node
349 s = null; // use new node next time
350 break; // restart main loop
351 }
352 SNode mn = m.next;
353 if (m.tryMatch(s)) {
354 casHead(s, mn); // pop both s and m
355 return (E) ((mode == REQUEST) ? m.item : s.item);
356 } else // lost match
357 s.casNext(m, mn); // help unlink
358 }
359 }
360 } else { // help a fulfiller
361 SNode m = h.next; // m is h's match
362 if (m == null) // waiter is gone
363 casHead(h, null); // pop fulfilling node
364 else {
365 SNode mn = m.next;
366 if (m.tryMatch(h)) // help match
367 casHead(h, mn); // pop both h and m
368 else // lost match
369 h.casNext(m, mn); // help unlink
370 }
371 }
372 }
373 }
374
375 /**
376 * Spins/blocks until node s is matched by a fulfill operation.
377 *
378 * @param s the waiting node
379 * @param timed true if timed wait
380 * @param nanos timeout value
381 * @return matched node, or s if cancelled
382 */
383 SNode awaitFulfill(SNode s, boolean timed, long nanos) {
384 /*
385 * When a node/thread is about to block, it sets its waiter
386 * field and then rechecks state at least one more time
387 * before actually parking, thus covering race vs
388 * fulfiller noticing that waiter is non-null so should be
389 * woken.
390 *
391 * When invoked by nodes that appear at the point of call
392 * to be at the head of the stack, calls to park are
393 * preceded by spins to avoid blocking when producers and
394 * consumers are arriving very close in time. This can
395 * happen enough to bother only on multiprocessors.
396 *
397 * The order of checks for returning out of main loop
398 * reflects fact that interrupts have precedence over
399 * normal returns, which have precedence over
400 * timeouts. (So, on timeout, one last check for match is
401 * done before giving up.) Except that calls from untimed
402 * SynchronousQueue.{poll/offer} don't check interrupts
403 * and don't wait at all, so are trapped in transfer
404 * method rather than calling awaitFulfill.
405 */
406 final long deadline = timed ? System.nanoTime() + nanos : 0L;
407 Thread w = Thread.currentThread();
408 int spins = shouldSpin(s)
409 ? (timed ? MAX_TIMED_SPINS : MAX_UNTIMED_SPINS)
410 : 0;
411 for (;;) {
412 if (w.isInterrupted())
413 s.tryCancel();
414 SNode m = s.match;
415 if (m != null)
416 return m;
417 if (timed) {
418 nanos = deadline - System.nanoTime();
419 if (nanos <= 0L) {
420 s.tryCancel();
421 continue;
422 }
423 }
424 if (spins > 0) {
425 Thread.onSpinWait();
426 spins = shouldSpin(s) ? (spins - 1) : 0;
427 }
428 else if (s.waiter == null)
429 s.waiter = w; // establish waiter so can park next iter
430 else if (!timed)
431 LockSupport.park(this);
432 else if (nanos > SPIN_FOR_TIMEOUT_THRESHOLD)
433 LockSupport.parkNanos(this, nanos);
434 }
435 }
436
437 /**
438 * Returns true if node s is at head or there is an active
439 * fulfiller.
440 */
441 boolean shouldSpin(SNode s) {
442 SNode h = head;
443 return (h == s || h == null || isFulfilling(h.mode));
444 }
445
446 /**
447 * Unlinks s from the stack.
448 */
449 void clean(SNode s) {
450 s.item = null; // forget item
451 s.waiter = null; // forget thread
452
453 /*
454 * At worst we may need to traverse entire stack to unlink
455 * s. If there are multiple concurrent calls to clean, we
456 * might not see s if another thread has already removed
457 * it. But we can stop when we see any node known to
458 * follow s. We use s.next unless it too is cancelled, in
459 * which case we try the node one past. We don't check any
460 * further because we don't want to doubly traverse just to
461 * find sentinel.
462 */
463
464 SNode past = s.next;
465 if (past != null && past.isCancelled())
466 past = past.next;
467
468 // Absorb cancelled nodes at head
469 SNode p;
470 while ((p = head) != null && p != past && p.isCancelled())
471 casHead(p, p.next);
472
473 // Unsplice embedded nodes
474 while (p != null && p != past) {
475 SNode n = p.next;
476 if (n != null && n.isCancelled())
477 p.casNext(n, n.next);
478 else
479 p = n;
480 }
481 }
482
483 // VarHandle mechanics
484 private static final VarHandle SHEAD;
485 static {
486 try {
487 MethodHandles.Lookup l = MethodHandles.lookup();
488 SHEAD = l.findVarHandle(TransferStack.class, "head", SNode.class);
489 } catch (ReflectiveOperationException e) {
490 throw new Error(e);
491 }
492 }
493 }
494
495 /** Dual Queue */
496 static final class TransferQueue<E> extends Transferer<E> {
497 /*
498 * This extends Scherer-Scott dual queue algorithm, differing,
499 * among other ways, by using modes within nodes rather than
500 * marked pointers. The algorithm is a little simpler than
501 * that for stacks because fulfillers do not need explicit
502 * nodes, and matching is done by CAS'ing QNode.item field
503 * from non-null to null (for put) or vice versa (for take).
504 */
505
506 /** Node class for TransferQueue. */
507 static final class QNode {
508 volatile QNode next; // next node in queue
509 volatile Object item; // CAS'ed to or from null
510 volatile Thread waiter; // to control park/unpark
511 final boolean isData;
512
513 QNode(Object item, boolean isData) {
514 this.item = item;
515 this.isData = isData;
516 }
517
518 boolean casNext(QNode cmp, QNode val) {
519 return next == cmp &&
520 QNEXT.compareAndSet(this, cmp, val);
521 }
522
523 boolean casItem(Object cmp, Object val) {
524 return item == cmp &&
525 QITEM.compareAndSet(this, cmp, val);
526 }
527
528 /**
529 * Tries to cancel by CAS'ing ref to this as item.
530 */
531 void tryCancel(Object cmp) {
532 QITEM.compareAndSet(this, cmp, this);
533 }
534
535 boolean isCancelled() {
536 return item == this;
537 }
538
539 /**
540 * Returns true if this node is known to be off the queue
541 * because its next pointer has been forgotten due to
542 * an advanceHead operation.
543 */
544 boolean isOffList() {
545 return next == this;
546 }
547
548 // VarHandle mechanics
549 private static final VarHandle QITEM;
550 private static final VarHandle QNEXT;
551 static {
552 try {
553 MethodHandles.Lookup l = MethodHandles.lookup();
554 QITEM = l.findVarHandle(QNode.class, "item", Object.class);
555 QNEXT = l.findVarHandle(QNode.class, "next", QNode.class);
556 } catch (ReflectiveOperationException e) {
557 throw new Error(e);
558 }
559 }
560 }
561
562 /** Head of queue */
563 transient volatile QNode head;
564 /** Tail of queue */
565 transient volatile QNode tail;
566 /**
567 * Reference to a cancelled node that might not yet have been
568 * unlinked from queue because it was the last inserted node
569 * when it was cancelled.
570 */
571 transient volatile QNode cleanMe;
572
573 TransferQueue() {
574 QNode h = new QNode(null, false); // initialize to dummy node.
575 head = h;
576 tail = h;
577 }
578
579 /**
580 * Tries to cas nh as new head; if successful, unlink
581 * old head's next node to avoid garbage retention.
582 */
583 void advanceHead(QNode h, QNode nh) {
584 if (h == head &&
585 QHEAD.compareAndSet(this, h, nh))
586 h.next = h; // forget old next
587 }
588
589 /**
590 * Tries to cas nt as new tail.
591 */
592 void advanceTail(QNode t, QNode nt) {
593 if (tail == t)
594 QTAIL.compareAndSet(this, t, nt);
595 }
596
597 /**
598 * Tries to CAS cleanMe slot.
599 */
600 boolean casCleanMe(QNode cmp, QNode val) {
601 return cleanMe == cmp &&
602 QCLEANME.compareAndSet(this, cmp, val);
603 }
604
605 /**
606 * Puts or takes an item.
607 */
608 @SuppressWarnings("unchecked")
609 E transfer(E e, boolean timed, long nanos) {
610 /* Basic algorithm is to loop trying to take either of
611 * two actions:
612 *
613 * 1. If queue apparently empty or holding same-mode nodes,
614 * try to add node to queue of waiters, wait to be
615 * fulfilled (or cancelled) and return matching item.
616 *
617 * 2. If queue apparently contains waiting items, and this
618 * call is of complementary mode, try to fulfill by CAS'ing
619 * item field of waiting node and dequeuing it, and then
620 * returning matching item.
621 *
622 * In each case, along the way, check for and try to help
623 * advance head and tail on behalf of other stalled/slow
624 * threads.
625 *
626 * The loop starts off with a null check guarding against
627 * seeing uninitialized head or tail values. This never
628 * happens in current SynchronousQueue, but could if
629 * callers held non-volatile/final ref to the
630 * transferer. The check is here anyway because it places
631 * null checks at top of loop, which is usually faster
632 * than having them implicitly interspersed.
633 */
634
635 QNode s = null; // constructed/reused as needed
636 boolean isData = (e != null);
637
638 for (;;) {
639 QNode t = tail;
640 QNode h = head;
641 if (t == null || h == null) // saw uninitialized value
642 continue; // spin
643
644 if (h == t || t.isData == isData) { // empty or same-mode
645 QNode tn = t.next;
646 if (t != tail) // inconsistent read
647 continue;
648 if (tn != null) { // lagging tail
649 advanceTail(t, tn);
650 continue;
651 }
652 if (timed && nanos <= 0L) // can't wait
653 return null;
654 if (s == null)
655 s = new QNode(e, isData);
656 if (!t.casNext(null, s)) // failed to link in
657 continue;
658
659 advanceTail(t, s); // swing tail and wait
660 Object x = awaitFulfill(s, e, timed, nanos);
661 if (x == s) { // wait was cancelled
662 clean(t, s);
663 return null;
664 }
665
666 if (!s.isOffList()) { // not already unlinked
667 advanceHead(t, s); // unlink if head
668 if (x != null) // and forget fields
669 s.item = s;
670 s.waiter = null;
671 }
672 return (x != null) ? (E)x : e;
673
674 } else { // complementary-mode
675 QNode m = h.next; // node to fulfill
676 if (t != tail || m == null || h != head)
677 continue; // inconsistent read
678
679 Object x = m.item;
680 if (isData == (x != null) || // m already fulfilled
681 x == m || // m cancelled
682 !m.casItem(x, e)) { // lost CAS
683 advanceHead(h, m); // dequeue and retry
684 continue;
685 }
686
687 advanceHead(h, m); // successfully fulfilled
688 LockSupport.unpark(m.waiter);
689 return (x != null) ? (E)x : e;
690 }
691 }
692 }
693
694 /**
695 * Spins/blocks until node s is fulfilled.
696 *
697 * @param s the waiting node
698 * @param e the comparison value for checking match
699 * @param timed true if timed wait
700 * @param nanos timeout value
701 * @return matched item, or s if cancelled
702 */
703 Object awaitFulfill(QNode s, E e, boolean timed, long nanos) {
704 /* Same idea as TransferStack.awaitFulfill */
705 final long deadline = timed ? System.nanoTime() + nanos : 0L;
706 Thread w = Thread.currentThread();
707 int spins = (head.next == s)
708 ? (timed ? MAX_TIMED_SPINS : MAX_UNTIMED_SPINS)
709 : 0;
710 for (;;) {
711 if (w.isInterrupted())
712 s.tryCancel(e);
713 Object x = s.item;
714 if (x != e)
715 return x;
716 if (timed) {
717 nanos = deadline - System.nanoTime();
718 if (nanos <= 0L) {
719 s.tryCancel(e);
720 continue;
721 }
722 }
723 if (spins > 0) {
724 --spins;
725 Thread.onSpinWait();
726 }
727 else if (s.waiter == null)
728 s.waiter = w;
729 else if (!timed)
730 LockSupport.park(this);
731 else if (nanos > SPIN_FOR_TIMEOUT_THRESHOLD)
732 LockSupport.parkNanos(this, nanos);
733 }
734 }
735
736 /**
737 * Gets rid of cancelled node s with original predecessor pred.
738 */
739 void clean(QNode pred, QNode s) {
740 s.waiter = null; // forget thread
741 /*
742 * At any given time, exactly one node on list cannot be
743 * deleted -- the last inserted node. To accommodate this,
744 * if we cannot delete s, we save its predecessor as
745 * "cleanMe", deleting the previously saved version
746 * first. At least one of node s or the node previously
747 * saved can always be deleted, so this always terminates.
748 */
749 while (pred.next == s) { // Return early if already unlinked
750 QNode h = head;
751 QNode hn = h.next; // Absorb cancelled first node as head
752 if (hn != null && hn.isCancelled()) {
753 advanceHead(h, hn);
754 continue;
755 }
756 QNode t = tail; // Ensure consistent read for tail
757 if (t == h)
758 return;
759 QNode tn = t.next;
760 if (t != tail)
761 continue;
762 if (tn != null) {
763 advanceTail(t, tn);
764 continue;
765 }
766 if (s != t) { // If not tail, try to unsplice
767 QNode sn = s.next;
768 if (sn == s || pred.casNext(s, sn))
769 return;
770 }
771 QNode dp = cleanMe;
772 if (dp != null) { // Try unlinking previous cancelled node
773 QNode d = dp.next;
774 QNode dn;
775 if (d == null || // d is gone or
776 d == dp || // d is off list or
777 !d.isCancelled() || // d not cancelled or
778 (d != t && // d not tail and
779 (dn = d.next) != null && // has successor
780 dn != d && // that is on list
781 dp.casNext(d, dn))) // d unspliced
782 casCleanMe(dp, null);
783 if (dp == pred)
784 return; // s is already saved node
785 } else if (casCleanMe(null, pred))
786 return; // Postpone cleaning s
787 }
788 }
789
790 // VarHandle mechanics
791 private static final VarHandle QHEAD;
792 private static final VarHandle QTAIL;
793 private static final VarHandle QCLEANME;
794 static {
795 try {
796 MethodHandles.Lookup l = MethodHandles.lookup();
797 QHEAD = l.findVarHandle(TransferQueue.class, "head",
798 QNode.class);
799 QTAIL = l.findVarHandle(TransferQueue.class, "tail",
800 QNode.class);
801 QCLEANME = l.findVarHandle(TransferQueue.class, "cleanMe",
802 QNode.class);
803 } catch (ReflectiveOperationException e) {
804 throw new Error(e);
805 }
806 }
807 }
808
809 /**
810 * The transferer. Set only in constructor, but cannot be declared
811 * as final without further complicating serialization. Since
812 * this is accessed only at most once per public method, there
813 * isn't a noticeable performance penalty for using volatile
814 * instead of final here.
815 */
816 private transient volatile Transferer<E> transferer;
817
818 /**
819 * Creates a {@code SynchronousQueue} with nonfair access policy.
820 */
821 public SynchronousQueue() {
822 this(false);
823 }
824
825 /**
826 * Creates a {@code SynchronousQueue} with the specified fairness policy.
827 *
828 * @param fair if true, waiting threads contend in FIFO order for
829 * access; otherwise the order is unspecified.
830 */
831 public SynchronousQueue(boolean fair) {
832 transferer = fair ? new TransferQueue<E>() : new TransferStack<E>();
833 }
834
835 /**
836 * Adds the specified element to this queue, waiting if necessary for
837 * another thread to receive it.
838 *
839 * @throws InterruptedException {@inheritDoc}
840 * @throws NullPointerException {@inheritDoc}
841 */
842 public void put(E e) throws InterruptedException {
843 if (e == null) throw new NullPointerException();
844 if (transferer.transfer(e, false, 0) == null) {
845 Thread.interrupted();
846 throw new InterruptedException();
847 }
848 }
849
850 /**
851 * Inserts the specified element into this queue, waiting if necessary
852 * up to the specified wait time for another thread to receive it.
853 *
854 * @return {@code true} if successful, or {@code false} if the
855 * specified waiting time elapses before a consumer appears
856 * @throws InterruptedException {@inheritDoc}
857 * @throws NullPointerException {@inheritDoc}
858 */
859 public boolean offer(E e, long timeout, TimeUnit unit)
860 throws InterruptedException {
861 if (e == null) throw new NullPointerException();
862 if (transferer.transfer(e, true, unit.toNanos(timeout)) != null)
863 return true;
864 if (!Thread.interrupted())
865 return false;
866 throw new InterruptedException();
867 }
868
869 /**
870 * Inserts the specified element into this queue, if another thread is
871 * waiting to receive it.
872 *
873 * @param e the element to add
874 * @return {@code true} if the element was added to this queue, else
875 * {@code false}
876 * @throws NullPointerException if the specified element is null
877 */
878 public boolean offer(E e) {
879 if (e == null) throw new NullPointerException();
880 return transferer.transfer(e, true, 0) != null;
881 }
882
883 /**
884 * Retrieves and removes the head of this queue, waiting if necessary
885 * for another thread to insert it.
886 *
887 * @return the head of this queue
888 * @throws InterruptedException {@inheritDoc}
889 */
890 public E take() throws InterruptedException {
891 E e = transferer.transfer(null, false, 0);
892 if (e != null)
893 return e;
894 Thread.interrupted();
895 throw new InterruptedException();
896 }
897
898 /**
899 * Retrieves and removes the head of this queue, waiting
900 * if necessary up to the specified wait time, for another thread
901 * to insert it.
902 *
903 * @return the head of this queue, or {@code null} if the
904 * specified waiting time elapses before an element is present
905 * @throws InterruptedException {@inheritDoc}
906 */
907 public E poll(long timeout, TimeUnit unit) throws InterruptedException {
908 E e = transferer.transfer(null, true, unit.toNanos(timeout));
909 if (e != null || !Thread.interrupted())
910 return e;
911 throw new InterruptedException();
912 }
913
914 /**
915 * Retrieves and removes the head of this queue, if another thread
916 * is currently making an element available.
917 *
918 * @return the head of this queue, or {@code null} if no
919 * element is available
920 */
921 public E poll() {
922 return transferer.transfer(null, true, 0);
923 }
924
925 /**
926 * Always returns {@code true}.
927 * A {@code SynchronousQueue} has no internal capacity.
928 *
929 * @return {@code true}
930 */
931 public boolean isEmpty() {
932 return true;
933 }
934
935 /**
936 * Always returns zero.
937 * A {@code SynchronousQueue} has no internal capacity.
938 *
939 * @return zero
940 */
941 public int size() {
942 return 0;
943 }
944
945 /**
946 * Always returns zero.
947 * A {@code SynchronousQueue} has no internal capacity.
948 *
949 * @return zero
950 */
951 public int remainingCapacity() {
952 return 0;
953 }
954
955 /**
956 * Does nothing.
957 * A {@code SynchronousQueue} has no internal capacity.
958 */
959 public void clear() {
960 }
961
962 /**
963 * Always returns {@code false}.
964 * A {@code SynchronousQueue} has no internal capacity.
965 *
966 * @param o the element
967 * @return {@code false}
968 */
969 public boolean contains(Object o) {
970 return false;
971 }
972
973 /**
974 * Always returns {@code false}.
975 * A {@code SynchronousQueue} has no internal capacity.
976 *
977 * @param o the element to remove
978 * @return {@code false}
979 */
980 public boolean remove(Object o) {
981 return false;
982 }
983
984 /**
985 * Returns {@code false} unless the given collection is empty.
986 * A {@code SynchronousQueue} has no internal capacity.
987 *
988 * @param c the collection
989 * @return {@code false} unless given collection is empty
990 */
991 public boolean containsAll(Collection<?> c) {
992 return c.isEmpty();
993 }
994
995 /**
996 * Always returns {@code false}.
997 * A {@code SynchronousQueue} has no internal capacity.
998 *
999 * @param c the collection
1000 * @return {@code false}
1001 */
1002 public boolean removeAll(Collection<?> c) {
1003 return false;
1004 }
1005
1006 /**
1007 * Always returns {@code false}.
1008 * A {@code SynchronousQueue} has no internal capacity.
1009 *
1010 * @param c the collection
1011 * @return {@code false}
1012 */
1013 public boolean retainAll(Collection<?> c) {
1014 return false;
1015 }
1016
1017 /**
1018 * Always returns {@code null}.
1019 * A {@code SynchronousQueue} does not return elements
1020 * unless actively waited on.
1021 *
1022 * @return {@code null}
1023 */
1024 public E peek() {
1025 return null;
1026 }
1027
1028 /**
1029 * Returns an empty iterator in which {@code hasNext} always returns
1030 * {@code false}.
1031 *
1032 * @return an empty iterator
1033 */
1034 public Iterator<E> iterator() {
1035 return Collections.emptyIterator();
1036 }
1037
1038 /**
1039 * Returns an empty spliterator in which calls to
1040 * {@link Spliterator#trySplit() trySplit} always return {@code null}.
1041 *
1042 * @return an empty spliterator
1043 * @since 1.8
1044 */
1045 public Spliterator<E> spliterator() {
1046 return Spliterators.emptySpliterator();
1047 }
1048
1049 /**
1050 * Returns a zero-length array.
1051 * @return a zero-length array
1052 */
1053 public Object[] toArray() {
1054 return new Object[0];
1055 }
1056
1057 /**
1058 * Sets the zeroth element of the specified array to {@code null}
1059 * (if the array has non-zero length) and returns it.
1060 *
1061 * @param a the array
1062 * @return the specified array
1063 * @throws NullPointerException if the specified array is null
1064 */
1065 public <T> T[] toArray(T[] a) {
1066 if (a.length > 0)
1067 a[0] = null;
1068 return a;
1069 }
1070
1071 /**
1072 * Always returns {@code "[]"}.
1073 * @return {@code "[]"}
1074 */
1075 public String toString() {
1076 return "[]";
1077 }
1078
1079 /**
1080 * @throws UnsupportedOperationException {@inheritDoc}
1081 * @throws ClassCastException {@inheritDoc}
1082 * @throws NullPointerException {@inheritDoc}
1083 * @throws IllegalArgumentException {@inheritDoc}
1084 */
1085 public int drainTo(Collection<? super E> c) {
1086 Objects.requireNonNull(c);
1087 if (c == this)
1088 throw new IllegalArgumentException();
1089 int n = 0;
1090 for (E e; (e = poll()) != null; n++)
1091 c.add(e);
1092 return n;
1093 }
1094
1095 /**
1096 * @throws UnsupportedOperationException {@inheritDoc}
1097 * @throws ClassCastException {@inheritDoc}
1098 * @throws NullPointerException {@inheritDoc}
1099 * @throws IllegalArgumentException {@inheritDoc}
1100 */
1101 public int drainTo(Collection<? super E> c, int maxElements) {
1102 Objects.requireNonNull(c);
1103 if (c == this)
1104 throw new IllegalArgumentException();
1105 int n = 0;
1106 for (E e; n < maxElements && (e = poll()) != null; n++)
1107 c.add(e);
1108 return n;
1109 }
1110
1111 /*
1112 * To cope with serialization strategy in the 1.5 version of
1113 * SynchronousQueue, we declare some unused classes and fields
1114 * that exist solely to enable serializability across versions.
1115 * These fields are never used, so are initialized only if this
1116 * object is ever serialized or deserialized.
1117 */
1118
1119 @SuppressWarnings("serial")
1120 static class WaitQueue implements java.io.Serializable { }
1121 static class LifoWaitQueue extends WaitQueue {
1122 private static final long serialVersionUID = -3633113410248163686L;
1123 }
1124 static class FifoWaitQueue extends WaitQueue {
1125 private static final long serialVersionUID = -3623113410248163686L;
1126 }
1127 private ReentrantLock qlock;
1128 private WaitQueue waitingProducers;
1129 private WaitQueue waitingConsumers;
1130
1131 /**
1132 * Saves this queue to a stream (that is, serializes it).
1133 * @param s the stream
1134 * @throws java.io.IOException if an I/O error occurs
1135 */
1136 private void writeObject(java.io.ObjectOutputStream s)
1137 throws java.io.IOException {
1138 boolean fair = transferer instanceof TransferQueue;
1139 if (fair) {
1140 qlock = new ReentrantLock(true);
1141 waitingProducers = new FifoWaitQueue();
1142 waitingConsumers = new FifoWaitQueue();
1143 }
1144 else {
1145 qlock = new ReentrantLock();
1146 waitingProducers = new LifoWaitQueue();
1147 waitingConsumers = new LifoWaitQueue();
1148 }
1149 s.defaultWriteObject();
1150 }
1151
1152 /**
1153 * Reconstitutes this queue from a stream (that is, deserializes it).
1154 * @param s the stream
1155 * @throws ClassNotFoundException if the class of a serialized object
1156 * could not be found
1157 * @throws java.io.IOException if an I/O error occurs
1158 */
1159 private void readObject(java.io.ObjectInputStream s)
1160 throws java.io.IOException, ClassNotFoundException {
1161 s.defaultReadObject();
1162 if (waitingProducers instanceof FifoWaitQueue)
1163 transferer = new TransferQueue<E>();
1164 else
1165 transferer = new TransferStack<E>();
1166 }
1167
1168 static {
1169 // Reduce the risk of rare disastrous classloading in first call to
1170 // LockSupport.park: https://bugs.openjdk.java.net/browse/JDK-8074773
1171 Class<?> ensureLoaded = LockSupport.class;
1172 }
1173 }