ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/main/java/util/concurrent/SynchronousQueue.java
Revision: 1.96
Committed: Wed Mar 13 12:39:02 2013 UTC (11 years, 2 months ago) by dl
Branch: MAIN
Changes since 1.95: +1 -9 lines
Log Message:
Synch with lambda Spliterator API

File Contents

# Content
1 /*
2 * Written by Doug Lea, Bill Scherer, and Michael Scott with
3 * assistance from members of JCP JSR-166 Expert Group and released to
4 * the public domain, as explained at
5 * http://creativecommons.org/publicdomain/zero/1.0/
6 */
7
8 package java.util.concurrent;
9 import java.util.concurrent.locks.LockSupport;
10 import java.util.concurrent.locks.ReentrantLock;
11 import java.util.*;
12 import java.util.Spliterator;
13 import java.util.Spliterators;
14 import java.util.stream.Stream;
15 import java.util.stream.Streams;
16 import java.util.function.Consumer;
17
18 /**
19 * A {@linkplain BlockingQueue blocking queue} in which each insert
20 * operation must wait for a corresponding remove operation by another
21 * thread, and vice versa. A synchronous queue does not have any
22 * internal capacity, not even a capacity of one. You cannot
23 * {@code peek} at a synchronous queue because an element is only
24 * present when you try to remove it; you cannot insert an element
25 * (using any method) unless another thread is trying to remove it;
26 * you cannot iterate as there is nothing to iterate. The
27 * <em>head</em> of the queue is the element that the first queued
28 * inserting thread is trying to add to the queue; if there is no such
29 * queued thread then no element is available for removal and
30 * {@code poll()} will return {@code null}. For purposes of other
31 * {@code Collection} methods (for example {@code contains}), a
32 * {@code SynchronousQueue} acts as an empty collection. This queue
33 * does not permit {@code null} elements.
34 *
35 * <p>Synchronous queues are similar to rendezvous channels used in
36 * CSP and Ada. They are well suited for handoff designs, in which an
37 * object running in one thread must sync up with an object running
38 * in another thread in order to hand it some information, event, or
39 * task.
40 *
41 * <p>This class supports an optional fairness policy for ordering
42 * waiting producer and consumer threads. By default, this ordering
43 * is not guaranteed. However, a queue constructed with fairness set
44 * to {@code true} grants threads access in FIFO order.
45 *
46 * <p>This class and its iterator implement all of the
47 * <em>optional</em> methods of the {@link Collection} and {@link
48 * Iterator} interfaces.
49 *
50 * <p>This class is a member of the
51 * <a href="{@docRoot}/../technotes/guides/collections/index.html">
52 * Java Collections Framework</a>.
53 *
54 * @since 1.5
55 * @author Doug Lea and Bill Scherer and Michael Scott
56 * @param <E> the type of elements held in this collection
57 */
58 public class SynchronousQueue<E> extends AbstractQueue<E>
59 implements BlockingQueue<E>, java.io.Serializable {
60 private static final long serialVersionUID = -3223113410248163686L;
61
62 /*
63 * This class implements extensions of the dual stack and dual
64 * queue algorithms described in "Nonblocking Concurrent Objects
65 * with Condition Synchronization", by W. N. Scherer III and
66 * M. L. Scott. 18th Annual Conf. on Distributed Computing,
67 * Oct. 2004 (see also
68 * http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/duals.html).
69 * The (Lifo) stack is used for non-fair mode, and the (Fifo)
70 * queue for fair mode. The performance of the two is generally
71 * similar. Fifo usually supports higher throughput under
72 * contention but Lifo maintains higher thread locality in common
73 * applications.
74 *
75 * A dual queue (and similarly stack) is one that at any given
76 * time either holds "data" -- items provided by put operations,
77 * or "requests" -- slots representing take operations, or is
78 * empty. A call to "fulfill" (i.e., a call requesting an item
79 * from a queue holding data or vice versa) dequeues a
80 * complementary node. The most interesting feature of these
81 * queues is that any operation can figure out which mode the
82 * queue is in, and act accordingly without needing locks.
83 *
84 * Both the queue and stack extend abstract class Transferer
85 * defining the single method transfer that does a put or a
86 * take. These are unified into a single method because in dual
87 * data structures, the put and take operations are symmetrical,
88 * so nearly all code can be combined. The resulting transfer
89 * methods are on the long side, but are easier to follow than
90 * they would be if broken up into nearly-duplicated parts.
91 *
92 * The queue and stack data structures share many conceptual
93 * similarities but very few concrete details. For simplicity,
94 * they are kept distinct so that they can later evolve
95 * separately.
96 *
97 * The algorithms here differ from the versions in the above paper
98 * in extending them for use in synchronous queues, as well as
99 * dealing with cancellation. The main differences include:
100 *
101 * 1. The original algorithms used bit-marked pointers, but
102 * the ones here use mode bits in nodes, leading to a number
103 * of further adaptations.
104 * 2. SynchronousQueues must block threads waiting to become
105 * fulfilled.
106 * 3. Support for cancellation via timeout and interrupts,
107 * including cleaning out cancelled nodes/threads
108 * from lists to avoid garbage retention and memory depletion.
109 *
110 * Blocking is mainly accomplished using LockSupport park/unpark,
111 * except that nodes that appear to be the next ones to become
112 * fulfilled first spin a bit (on multiprocessors only). On very
113 * busy synchronous queues, spinning can dramatically improve
114 * throughput. And on less busy ones, the amount of spinning is
115 * small enough not to be noticeable.
116 *
117 * Cleaning is done in different ways in queues vs stacks. For
118 * queues, we can almost always remove a node immediately in O(1)
119 * time (modulo retries for consistency checks) when it is
120 * cancelled. But if it may be pinned as the current tail, it must
121 * wait until some subsequent cancellation. For stacks, we need a
122 * potentially O(n) traversal to be sure that we can remove the
123 * node, but this can run concurrently with other threads
124 * accessing the stack.
125 *
126 * While garbage collection takes care of most node reclamation
127 * issues that otherwise complicate nonblocking algorithms, care
128 * is taken to "forget" references to data, other nodes, and
129 * threads that might be held on to long-term by blocked
130 * threads. In cases where setting to null would otherwise
131 * conflict with main algorithms, this is done by changing a
132 * node's link to now point to the node itself. This doesn't arise
133 * much for Stack nodes (because blocked threads do not hang on to
134 * old head pointers), but references in Queue nodes must be
135 * aggressively forgotten to avoid reachability of everything any
136 * node has ever referred to since arrival.
137 */
138
139 /**
140 * Shared internal API for dual stacks and queues.
141 */
142 abstract static class Transferer<E> {
143 /**
144 * Performs a put or take.
145 *
146 * @param e if non-null, the item to be handed to a consumer;
147 * if null, requests that transfer return an item
148 * offered by producer.
149 * @param timed if this operation should timeout
150 * @param nanos the timeout, in nanoseconds
151 * @return if non-null, the item provided or received; if null,
152 * the operation failed due to timeout or interrupt --
153 * the caller can distinguish which of these occurred
154 * by checking Thread.interrupted.
155 */
156 abstract E transfer(E e, boolean timed, long nanos);
157 }
158
159 /** The number of CPUs, for spin control */
160 static final int NCPUS = Runtime.getRuntime().availableProcessors();
161
162 /**
163 * The number of times to spin before blocking in timed waits.
164 * The value is empirically derived -- it works well across a
165 * variety of processors and OSes. Empirically, the best value
166 * seems not to vary with number of CPUs (beyond 2) so is just
167 * a constant.
168 */
169 static final int maxTimedSpins = (NCPUS < 2) ? 0 : 32;
170
171 /**
172 * The number of times to spin before blocking in untimed waits.
173 * This is greater than timed value because untimed waits spin
174 * faster since they don't need to check times on each spin.
175 */
176 static final int maxUntimedSpins = maxTimedSpins * 16;
177
178 /**
179 * The number of nanoseconds for which it is faster to spin
180 * rather than to use timed park. A rough estimate suffices.
181 */
182 static final long spinForTimeoutThreshold = 1000L;
183
184 /** Dual stack */
185 static final class TransferStack<E> extends Transferer<E> {
186 /*
187 * This extends Scherer-Scott dual stack algorithm, differing,
188 * among other ways, by using "covering" nodes rather than
189 * bit-marked pointers: Fulfilling operations push on marker
190 * nodes (with FULFILLING bit set in mode) to reserve a spot
191 * to match a waiting node.
192 */
193
194 /* Modes for SNodes, ORed together in node fields */
195 /** Node represents an unfulfilled consumer */
196 static final int REQUEST = 0;
197 /** Node represents an unfulfilled producer */
198 static final int DATA = 1;
199 /** Node is fulfilling another unfulfilled DATA or REQUEST */
200 static final int FULFILLING = 2;
201
202 /** Returns true if m has fulfilling bit set. */
203 static boolean isFulfilling(int m) { return (m & FULFILLING) != 0; }
204
205 /** Node class for TransferStacks. */
206 static final class SNode {
207 volatile SNode next; // next node in stack
208 volatile SNode match; // the node matched to this
209 volatile Thread waiter; // to control park/unpark
210 Object item; // data; or null for REQUESTs
211 int mode;
212 // Note: item and mode fields don't need to be volatile
213 // since they are always written before, and read after,
214 // other volatile/atomic operations.
215
216 SNode(Object item) {
217 this.item = item;
218 }
219
220 boolean casNext(SNode cmp, SNode val) {
221 return cmp == next &&
222 UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val);
223 }
224
225 /**
226 * Tries to match node s to this node, if so, waking up thread.
227 * Fulfillers call tryMatch to identify their waiters.
228 * Waiters block until they have been matched.
229 *
230 * @param s the node to match
231 * @return true if successfully matched to s
232 */
233 boolean tryMatch(SNode s) {
234 if (match == null &&
235 UNSAFE.compareAndSwapObject(this, matchOffset, null, s)) {
236 Thread w = waiter;
237 if (w != null) { // waiters need at most one unpark
238 waiter = null;
239 LockSupport.unpark(w);
240 }
241 return true;
242 }
243 return match == s;
244 }
245
246 /**
247 * Tries to cancel a wait by matching node to itself.
248 */
249 void tryCancel() {
250 UNSAFE.compareAndSwapObject(this, matchOffset, null, this);
251 }
252
253 boolean isCancelled() {
254 return match == this;
255 }
256
257 // Unsafe mechanics
258 private static final sun.misc.Unsafe UNSAFE;
259 private static final long matchOffset;
260 private static final long nextOffset;
261
262 static {
263 try {
264 UNSAFE = sun.misc.Unsafe.getUnsafe();
265 Class<?> k = SNode.class;
266 matchOffset = UNSAFE.objectFieldOffset
267 (k.getDeclaredField("match"));
268 nextOffset = UNSAFE.objectFieldOffset
269 (k.getDeclaredField("next"));
270 } catch (Exception e) {
271 throw new Error(e);
272 }
273 }
274 }
275
276 /** The head (top) of the stack */
277 volatile SNode head;
278
279 boolean casHead(SNode h, SNode nh) {
280 return h == head &&
281 UNSAFE.compareAndSwapObject(this, headOffset, h, nh);
282 }
283
284 /**
285 * Creates or resets fields of a node. Called only from transfer
286 * where the node to push on stack is lazily created and
287 * reused when possible to help reduce intervals between reads
288 * and CASes of head and to avoid surges of garbage when CASes
289 * to push nodes fail due to contention.
290 */
291 static SNode snode(SNode s, Object e, SNode next, int mode) {
292 if (s == null) s = new SNode(e);
293 s.mode = mode;
294 s.next = next;
295 return s;
296 }
297
298 /**
299 * Puts or takes an item.
300 */
301 @SuppressWarnings("unchecked")
302 E transfer(E e, boolean timed, long nanos) {
303 /*
304 * Basic algorithm is to loop trying one of three actions:
305 *
306 * 1. If apparently empty or already containing nodes of same
307 * mode, try to push node on stack and wait for a match,
308 * returning it, or null if cancelled.
309 *
310 * 2. If apparently containing node of complementary mode,
311 * try to push a fulfilling node on to stack, match
312 * with corresponding waiting node, pop both from
313 * stack, and return matched item. The matching or
314 * unlinking might not actually be necessary because of
315 * other threads performing action 3:
316 *
317 * 3. If top of stack already holds another fulfilling node,
318 * help it out by doing its match and/or pop
319 * operations, and then continue. The code for helping
320 * is essentially the same as for fulfilling, except
321 * that it doesn't return the item.
322 */
323
324 SNode s = null; // constructed/reused as needed
325 int mode = (e == null) ? REQUEST : DATA;
326
327 for (;;) {
328 SNode h = head;
329 if (h == null || h.mode == mode) { // empty or same-mode
330 if (timed && nanos <= 0) { // can't wait
331 if (h != null && h.isCancelled())
332 casHead(h, h.next); // pop cancelled node
333 else
334 return null;
335 } else if (casHead(h, s = snode(s, e, h, mode))) {
336 SNode m = awaitFulfill(s, timed, nanos);
337 if (m == s) { // wait was cancelled
338 clean(s);
339 return null;
340 }
341 if ((h = head) != null && h.next == s)
342 casHead(h, s.next); // help s's fulfiller
343 return (E) ((mode == REQUEST) ? m.item : s.item);
344 }
345 } else if (!isFulfilling(h.mode)) { // try to fulfill
346 if (h.isCancelled()) // already cancelled
347 casHead(h, h.next); // pop and retry
348 else if (casHead(h, s=snode(s, e, h, FULFILLING|mode))) {
349 for (;;) { // loop until matched or waiters disappear
350 SNode m = s.next; // m is s's match
351 if (m == null) { // all waiters are gone
352 casHead(s, null); // pop fulfill node
353 s = null; // use new node next time
354 break; // restart main loop
355 }
356 SNode mn = m.next;
357 if (m.tryMatch(s)) {
358 casHead(s, mn); // pop both s and m
359 return (E) ((mode == REQUEST) ? m.item : s.item);
360 } else // lost match
361 s.casNext(m, mn); // help unlink
362 }
363 }
364 } else { // help a fulfiller
365 SNode m = h.next; // m is h's match
366 if (m == null) // waiter is gone
367 casHead(h, null); // pop fulfilling node
368 else {
369 SNode mn = m.next;
370 if (m.tryMatch(h)) // help match
371 casHead(h, mn); // pop both h and m
372 else // lost match
373 h.casNext(m, mn); // help unlink
374 }
375 }
376 }
377 }
378
379 /**
380 * Spins/blocks until node s is matched by a fulfill operation.
381 *
382 * @param s the waiting node
383 * @param timed true if timed wait
384 * @param nanos timeout value
385 * @return matched node, or s if cancelled
386 */
387 SNode awaitFulfill(SNode s, boolean timed, long nanos) {
388 /*
389 * When a node/thread is about to block, it sets its waiter
390 * field and then rechecks state at least one more time
391 * before actually parking, thus covering race vs
392 * fulfiller noticing that waiter is non-null so should be
393 * woken.
394 *
395 * When invoked by nodes that appear at the point of call
396 * to be at the head of the stack, calls to park are
397 * preceded by spins to avoid blocking when producers and
398 * consumers are arriving very close in time. This can
399 * happen enough to bother only on multiprocessors.
400 *
401 * The order of checks for returning out of main loop
402 * reflects fact that interrupts have precedence over
403 * normal returns, which have precedence over
404 * timeouts. (So, on timeout, one last check for match is
405 * done before giving up.) Except that calls from untimed
406 * SynchronousQueue.{poll/offer} don't check interrupts
407 * and don't wait at all, so are trapped in transfer
408 * method rather than calling awaitFulfill.
409 */
410 final long deadline = timed ? System.nanoTime() + nanos : 0L;
411 Thread w = Thread.currentThread();
412 int spins = (shouldSpin(s) ?
413 (timed ? maxTimedSpins : maxUntimedSpins) : 0);
414 for (;;) {
415 if (w.isInterrupted())
416 s.tryCancel();
417 SNode m = s.match;
418 if (m != null)
419 return m;
420 if (timed) {
421 nanos = deadline - System.nanoTime();
422 if (nanos <= 0L) {
423 s.tryCancel();
424 continue;
425 }
426 }
427 if (spins > 0)
428 spins = shouldSpin(s) ? (spins-1) : 0;
429 else if (s.waiter == null)
430 s.waiter = w; // establish waiter so can park next iter
431 else if (!timed)
432 LockSupport.park(this);
433 else if (nanos > spinForTimeoutThreshold)
434 LockSupport.parkNanos(this, nanos);
435 }
436 }
437
438 /**
439 * Returns true if node s is at head or there is an active
440 * fulfiller.
441 */
442 boolean shouldSpin(SNode s) {
443 SNode h = head;
444 return (h == s || h == null || isFulfilling(h.mode));
445 }
446
447 /**
448 * Unlinks s from the stack.
449 */
450 void clean(SNode s) {
451 s.item = null; // forget item
452 s.waiter = null; // forget thread
453
454 /*
455 * At worst we may need to traverse entire stack to unlink
456 * s. If there are multiple concurrent calls to clean, we
457 * might not see s if another thread has already removed
458 * it. But we can stop when we see any node known to
459 * follow s. We use s.next unless it too is cancelled, in
460 * which case we try the node one past. We don't check any
461 * further because we don't want to doubly traverse just to
462 * find sentinel.
463 */
464
465 SNode past = s.next;
466 if (past != null && past.isCancelled())
467 past = past.next;
468
469 // Absorb cancelled nodes at head
470 SNode p;
471 while ((p = head) != null && p != past && p.isCancelled())
472 casHead(p, p.next);
473
474 // Unsplice embedded nodes
475 while (p != null && p != past) {
476 SNode n = p.next;
477 if (n != null && n.isCancelled())
478 p.casNext(n, n.next);
479 else
480 p = n;
481 }
482 }
483
484 // Unsafe mechanics
485 private static final sun.misc.Unsafe UNSAFE;
486 private static final long headOffset;
487 static {
488 try {
489 UNSAFE = sun.misc.Unsafe.getUnsafe();
490 Class<?> k = TransferStack.class;
491 headOffset = UNSAFE.objectFieldOffset
492 (k.getDeclaredField("head"));
493 } catch (Exception e) {
494 throw new Error(e);
495 }
496 }
497 }
498
499 /** Dual Queue */
500 static final class TransferQueue<E> extends Transferer<E> {
501 /*
502 * This extends Scherer-Scott dual queue algorithm, differing,
503 * among other ways, by using modes within nodes rather than
504 * marked pointers. The algorithm is a little simpler than
505 * that for stacks because fulfillers do not need explicit
506 * nodes, and matching is done by CAS'ing QNode.item field
507 * from non-null to null (for put) or vice versa (for take).
508 */
509
510 /** Node class for TransferQueue. */
511 static final class QNode {
512 volatile QNode next; // next node in queue
513 volatile Object item; // CAS'ed to or from null
514 volatile Thread waiter; // to control park/unpark
515 final boolean isData;
516
517 QNode(Object item, boolean isData) {
518 this.item = item;
519 this.isData = isData;
520 }
521
522 boolean casNext(QNode cmp, QNode val) {
523 return next == cmp &&
524 UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val);
525 }
526
527 boolean casItem(Object cmp, Object val) {
528 return item == cmp &&
529 UNSAFE.compareAndSwapObject(this, itemOffset, cmp, val);
530 }
531
532 /**
533 * Tries to cancel by CAS'ing ref to this as item.
534 */
535 void tryCancel(Object cmp) {
536 UNSAFE.compareAndSwapObject(this, itemOffset, cmp, this);
537 }
538
539 boolean isCancelled() {
540 return item == this;
541 }
542
543 /**
544 * Returns true if this node is known to be off the queue
545 * because its next pointer has been forgotten due to
546 * an advanceHead operation.
547 */
548 boolean isOffList() {
549 return next == this;
550 }
551
552 // Unsafe mechanics
553 private static final sun.misc.Unsafe UNSAFE;
554 private static final long itemOffset;
555 private static final long nextOffset;
556
557 static {
558 try {
559 UNSAFE = sun.misc.Unsafe.getUnsafe();
560 Class<?> k = QNode.class;
561 itemOffset = UNSAFE.objectFieldOffset
562 (k.getDeclaredField("item"));
563 nextOffset = UNSAFE.objectFieldOffset
564 (k.getDeclaredField("next"));
565 } catch (Exception e) {
566 throw new Error(e);
567 }
568 }
569 }
570
571 /** Head of queue */
572 transient volatile QNode head;
573 /** Tail of queue */
574 transient volatile QNode tail;
575 /**
576 * Reference to a cancelled node that might not yet have been
577 * unlinked from queue because it was the last inserted node
578 * when it was cancelled.
579 */
580 transient volatile QNode cleanMe;
581
582 TransferQueue() {
583 QNode h = new QNode(null, false); // initialize to dummy node.
584 head = h;
585 tail = h;
586 }
587
588 /**
589 * Tries to cas nh as new head; if successful, unlink
590 * old head's next node to avoid garbage retention.
591 */
592 void advanceHead(QNode h, QNode nh) {
593 if (h == head &&
594 UNSAFE.compareAndSwapObject(this, headOffset, h, nh))
595 h.next = h; // forget old next
596 }
597
598 /**
599 * Tries to cas nt as new tail.
600 */
601 void advanceTail(QNode t, QNode nt) {
602 if (tail == t)
603 UNSAFE.compareAndSwapObject(this, tailOffset, t, nt);
604 }
605
606 /**
607 * Tries to CAS cleanMe slot.
608 */
609 boolean casCleanMe(QNode cmp, QNode val) {
610 return cleanMe == cmp &&
611 UNSAFE.compareAndSwapObject(this, cleanMeOffset, cmp, val);
612 }
613
614 /**
615 * Puts or takes an item.
616 */
617 @SuppressWarnings("unchecked")
618 E transfer(E e, boolean timed, long nanos) {
619 /* Basic algorithm is to loop trying to take either of
620 * two actions:
621 *
622 * 1. If queue apparently empty or holding same-mode nodes,
623 * try to add node to queue of waiters, wait to be
624 * fulfilled (or cancelled) and return matching item.
625 *
626 * 2. If queue apparently contains waiting items, and this
627 * call is of complementary mode, try to fulfill by CAS'ing
628 * item field of waiting node and dequeuing it, and then
629 * returning matching item.
630 *
631 * In each case, along the way, check for and try to help
632 * advance head and tail on behalf of other stalled/slow
633 * threads.
634 *
635 * The loop starts off with a null check guarding against
636 * seeing uninitialized head or tail values. This never
637 * happens in current SynchronousQueue, but could if
638 * callers held non-volatile/final ref to the
639 * transferer. The check is here anyway because it places
640 * null checks at top of loop, which is usually faster
641 * than having them implicitly interspersed.
642 */
643
644 QNode s = null; // constructed/reused as needed
645 boolean isData = (e != null);
646
647 for (;;) {
648 QNode t = tail;
649 QNode h = head;
650 if (t == null || h == null) // saw uninitialized value
651 continue; // spin
652
653 if (h == t || t.isData == isData) { // empty or same-mode
654 QNode tn = t.next;
655 if (t != tail) // inconsistent read
656 continue;
657 if (tn != null) { // lagging tail
658 advanceTail(t, tn);
659 continue;
660 }
661 if (timed && nanos <= 0) // can't wait
662 return null;
663 if (s == null)
664 s = new QNode(e, isData);
665 if (!t.casNext(null, s)) // failed to link in
666 continue;
667
668 advanceTail(t, s); // swing tail and wait
669 Object x = awaitFulfill(s, e, timed, nanos);
670 if (x == s) { // wait was cancelled
671 clean(t, s);
672 return null;
673 }
674
675 if (!s.isOffList()) { // not already unlinked
676 advanceHead(t, s); // unlink if head
677 if (x != null) // and forget fields
678 s.item = s;
679 s.waiter = null;
680 }
681 return (x != null) ? (E)x : e;
682
683 } else { // complementary-mode
684 QNode m = h.next; // node to fulfill
685 if (t != tail || m == null || h != head)
686 continue; // inconsistent read
687
688 Object x = m.item;
689 if (isData == (x != null) || // m already fulfilled
690 x == m || // m cancelled
691 !m.casItem(x, e)) { // lost CAS
692 advanceHead(h, m); // dequeue and retry
693 continue;
694 }
695
696 advanceHead(h, m); // successfully fulfilled
697 LockSupport.unpark(m.waiter);
698 return (x != null) ? (E)x : e;
699 }
700 }
701 }
702
703 /**
704 * Spins/blocks until node s is fulfilled.
705 *
706 * @param s the waiting node
707 * @param e the comparison value for checking match
708 * @param timed true if timed wait
709 * @param nanos timeout value
710 * @return matched item, or s if cancelled
711 */
712 Object awaitFulfill(QNode s, E e, boolean timed, long nanos) {
713 /* Same idea as TransferStack.awaitFulfill */
714 final long deadline = timed ? System.nanoTime() + nanos : 0L;
715 Thread w = Thread.currentThread();
716 int spins = ((head.next == s) ?
717 (timed ? maxTimedSpins : maxUntimedSpins) : 0);
718 for (;;) {
719 if (w.isInterrupted())
720 s.tryCancel(e);
721 Object x = s.item;
722 if (x != e)
723 return x;
724 if (timed) {
725 nanos = deadline - System.nanoTime();
726 if (nanos <= 0L) {
727 s.tryCancel(e);
728 continue;
729 }
730 }
731 if (spins > 0)
732 --spins;
733 else if (s.waiter == null)
734 s.waiter = w;
735 else if (!timed)
736 LockSupport.park(this);
737 else if (nanos > spinForTimeoutThreshold)
738 LockSupport.parkNanos(this, nanos);
739 }
740 }
741
742 /**
743 * Gets rid of cancelled node s with original predecessor pred.
744 */
745 void clean(QNode pred, QNode s) {
746 s.waiter = null; // forget thread
747 /*
748 * At any given time, exactly one node on list cannot be
749 * deleted -- the last inserted node. To accommodate this,
750 * if we cannot delete s, we save its predecessor as
751 * "cleanMe", deleting the previously saved version
752 * first. At least one of node s or the node previously
753 * saved can always be deleted, so this always terminates.
754 */
755 while (pred.next == s) { // Return early if already unlinked
756 QNode h = head;
757 QNode hn = h.next; // Absorb cancelled first node as head
758 if (hn != null && hn.isCancelled()) {
759 advanceHead(h, hn);
760 continue;
761 }
762 QNode t = tail; // Ensure consistent read for tail
763 if (t == h)
764 return;
765 QNode tn = t.next;
766 if (t != tail)
767 continue;
768 if (tn != null) {
769 advanceTail(t, tn);
770 continue;
771 }
772 if (s != t) { // If not tail, try to unsplice
773 QNode sn = s.next;
774 if (sn == s || pred.casNext(s, sn))
775 return;
776 }
777 QNode dp = cleanMe;
778 if (dp != null) { // Try unlinking previous cancelled node
779 QNode d = dp.next;
780 QNode dn;
781 if (d == null || // d is gone or
782 d == dp || // d is off list or
783 !d.isCancelled() || // d not cancelled or
784 (d != t && // d not tail and
785 (dn = d.next) != null && // has successor
786 dn != d && // that is on list
787 dp.casNext(d, dn))) // d unspliced
788 casCleanMe(dp, null);
789 if (dp == pred)
790 return; // s is already saved node
791 } else if (casCleanMe(null, pred))
792 return; // Postpone cleaning s
793 }
794 }
795
796 private static final sun.misc.Unsafe UNSAFE;
797 private static final long headOffset;
798 private static final long tailOffset;
799 private static final long cleanMeOffset;
800 static {
801 try {
802 UNSAFE = sun.misc.Unsafe.getUnsafe();
803 Class<?> k = TransferQueue.class;
804 headOffset = UNSAFE.objectFieldOffset
805 (k.getDeclaredField("head"));
806 tailOffset = UNSAFE.objectFieldOffset
807 (k.getDeclaredField("tail"));
808 cleanMeOffset = UNSAFE.objectFieldOffset
809 (k.getDeclaredField("cleanMe"));
810 } catch (Exception e) {
811 throw new Error(e);
812 }
813 }
814 }
815
816 /**
817 * The transferer. Set only in constructor, but cannot be declared
818 * as final without further complicating serialization. Since
819 * this is accessed only at most once per public method, there
820 * isn't a noticeable performance penalty for using volatile
821 * instead of final here.
822 */
823 private transient volatile Transferer<E> transferer;
824
825 /**
826 * Creates a {@code SynchronousQueue} with nonfair access policy.
827 */
828 public SynchronousQueue() {
829 this(false);
830 }
831
832 /**
833 * Creates a {@code SynchronousQueue} with the specified fairness policy.
834 *
835 * @param fair if true, waiting threads contend in FIFO order for
836 * access; otherwise the order is unspecified.
837 */
838 public SynchronousQueue(boolean fair) {
839 transferer = fair ? new TransferQueue<E>() : new TransferStack<E>();
840 }
841
842 /**
843 * Adds the specified element to this queue, waiting if necessary for
844 * another thread to receive it.
845 *
846 * @throws InterruptedException {@inheritDoc}
847 * @throws NullPointerException {@inheritDoc}
848 */
849 public void put(E e) throws InterruptedException {
850 if (e == null) throw new NullPointerException();
851 if (transferer.transfer(e, false, 0) == null) {
852 Thread.interrupted();
853 throw new InterruptedException();
854 }
855 }
856
857 /**
858 * Inserts the specified element into this queue, waiting if necessary
859 * up to the specified wait time for another thread to receive it.
860 *
861 * @return {@code true} if successful, or {@code false} if the
862 * specified waiting time elapses before a consumer appears
863 * @throws InterruptedException {@inheritDoc}
864 * @throws NullPointerException {@inheritDoc}
865 */
866 public boolean offer(E e, long timeout, TimeUnit unit)
867 throws InterruptedException {
868 if (e == null) throw new NullPointerException();
869 if (transferer.transfer(e, true, unit.toNanos(timeout)) != null)
870 return true;
871 if (!Thread.interrupted())
872 return false;
873 throw new InterruptedException();
874 }
875
876 /**
877 * Inserts the specified element into this queue, if another thread is
878 * waiting to receive it.
879 *
880 * @param e the element to add
881 * @return {@code true} if the element was added to this queue, else
882 * {@code false}
883 * @throws NullPointerException if the specified element is null
884 */
885 public boolean offer(E e) {
886 if (e == null) throw new NullPointerException();
887 return transferer.transfer(e, true, 0) != null;
888 }
889
890 /**
891 * Retrieves and removes the head of this queue, waiting if necessary
892 * for another thread to insert it.
893 *
894 * @return the head of this queue
895 * @throws InterruptedException {@inheritDoc}
896 */
897 public E take() throws InterruptedException {
898 E e = transferer.transfer(null, false, 0);
899 if (e != null)
900 return e;
901 Thread.interrupted();
902 throw new InterruptedException();
903 }
904
905 /**
906 * Retrieves and removes the head of this queue, waiting
907 * if necessary up to the specified wait time, for another thread
908 * to insert it.
909 *
910 * @return the head of this queue, or {@code null} if the
911 * specified waiting time elapses before an element is present
912 * @throws InterruptedException {@inheritDoc}
913 */
914 public E poll(long timeout, TimeUnit unit) throws InterruptedException {
915 E e = transferer.transfer(null, true, unit.toNanos(timeout));
916 if (e != null || !Thread.interrupted())
917 return e;
918 throw new InterruptedException();
919 }
920
921 /**
922 * Retrieves and removes the head of this queue, if another thread
923 * is currently making an element available.
924 *
925 * @return the head of this queue, or {@code null} if no
926 * element is available
927 */
928 public E poll() {
929 return transferer.transfer(null, true, 0);
930 }
931
932 /**
933 * Always returns {@code true}.
934 * A {@code SynchronousQueue} has no internal capacity.
935 *
936 * @return {@code true}
937 */
938 public boolean isEmpty() {
939 return true;
940 }
941
942 /**
943 * Always returns zero.
944 * A {@code SynchronousQueue} has no internal capacity.
945 *
946 * @return zero
947 */
948 public int size() {
949 return 0;
950 }
951
952 /**
953 * Always returns zero.
954 * A {@code SynchronousQueue} has no internal capacity.
955 *
956 * @return zero
957 */
958 public int remainingCapacity() {
959 return 0;
960 }
961
962 /**
963 * Does nothing.
964 * A {@code SynchronousQueue} has no internal capacity.
965 */
966 public void clear() {
967 }
968
969 /**
970 * Always returns {@code false}.
971 * A {@code SynchronousQueue} has no internal capacity.
972 *
973 * @param o the element
974 * @return {@code false}
975 */
976 public boolean contains(Object o) {
977 return false;
978 }
979
980 /**
981 * Always returns {@code false}.
982 * A {@code SynchronousQueue} has no internal capacity.
983 *
984 * @param o the element to remove
985 * @return {@code false}
986 */
987 public boolean remove(Object o) {
988 return false;
989 }
990
991 /**
992 * Returns {@code false} unless the given collection is empty.
993 * A {@code SynchronousQueue} has no internal capacity.
994 *
995 * @param c the collection
996 * @return {@code false} unless given collection is empty
997 */
998 public boolean containsAll(Collection<?> c) {
999 return c.isEmpty();
1000 }
1001
1002 /**
1003 * Always returns {@code false}.
1004 * A {@code SynchronousQueue} has no internal capacity.
1005 *
1006 * @param c the collection
1007 * @return {@code false}
1008 */
1009 public boolean removeAll(Collection<?> c) {
1010 return false;
1011 }
1012
1013 /**
1014 * Always returns {@code false}.
1015 * A {@code SynchronousQueue} has no internal capacity.
1016 *
1017 * @param c the collection
1018 * @return {@code false}
1019 */
1020 public boolean retainAll(Collection<?> c) {
1021 return false;
1022 }
1023
1024 /**
1025 * Always returns {@code null}.
1026 * A {@code SynchronousQueue} does not return elements
1027 * unless actively waited on.
1028 *
1029 * @return {@code null}
1030 */
1031 public E peek() {
1032 return null;
1033 }
1034
1035 /**
1036 * Returns an empty iterator in which {@code hasNext} always returns
1037 * {@code false}.
1038 *
1039 * @return an empty iterator
1040 */
1041 @SuppressWarnings("unchecked")
1042 public Iterator<E> iterator() {
1043 return (Iterator<E>) EmptyIterator.EMPTY_ITERATOR;
1044 }
1045
1046 // Replicated from a previous version of Collections
1047 private static class EmptyIterator<E> implements Iterator<E> {
1048 static final EmptyIterator<Object> EMPTY_ITERATOR
1049 = new EmptyIterator<Object>();
1050
1051 public boolean hasNext() { return false; }
1052 public E next() { throw new NoSuchElementException(); }
1053 public void remove() { throw new IllegalStateException(); }
1054 }
1055
1056 public Spliterator<E> spliterator() {
1057 return Spliterators.emptySpliterator();
1058 }
1059
1060 /**
1061 * Returns a zero-length array.
1062 * @return a zero-length array
1063 */
1064 public Object[] toArray() {
1065 return new Object[0];
1066 }
1067
1068 /**
1069 * Sets the zeroeth element of the specified array to {@code null}
1070 * (if the array has non-zero length) and returns it.
1071 *
1072 * @param a the array
1073 * @return the specified array
1074 * @throws NullPointerException if the specified array is null
1075 */
1076 public <T> T[] toArray(T[] a) {
1077 if (a.length > 0)
1078 a[0] = null;
1079 return a;
1080 }
1081
1082 /**
1083 * @throws UnsupportedOperationException {@inheritDoc}
1084 * @throws ClassCastException {@inheritDoc}
1085 * @throws NullPointerException {@inheritDoc}
1086 * @throws IllegalArgumentException {@inheritDoc}
1087 */
1088 public int drainTo(Collection<? super E> c) {
1089 if (c == null)
1090 throw new NullPointerException();
1091 if (c == this)
1092 throw new IllegalArgumentException();
1093 int n = 0;
1094 for (E e; (e = poll()) != null;) {
1095 c.add(e);
1096 ++n;
1097 }
1098 return n;
1099 }
1100
1101 /**
1102 * @throws UnsupportedOperationException {@inheritDoc}
1103 * @throws ClassCastException {@inheritDoc}
1104 * @throws NullPointerException {@inheritDoc}
1105 * @throws IllegalArgumentException {@inheritDoc}
1106 */
1107 public int drainTo(Collection<? super E> c, int maxElements) {
1108 if (c == null)
1109 throw new NullPointerException();
1110 if (c == this)
1111 throw new IllegalArgumentException();
1112 int n = 0;
1113 for (E e; n < maxElements && (e = poll()) != null;) {
1114 c.add(e);
1115 ++n;
1116 }
1117 return n;
1118 }
1119
1120 /*
1121 * To cope with serialization strategy in the 1.5 version of
1122 * SynchronousQueue, we declare some unused classes and fields
1123 * that exist solely to enable serializability across versions.
1124 * These fields are never used, so are initialized only if this
1125 * object is ever serialized or deserialized.
1126 */
1127
1128 @SuppressWarnings("serial")
1129 static class WaitQueue implements java.io.Serializable { }
1130 static class LifoWaitQueue extends WaitQueue {
1131 private static final long serialVersionUID = -3633113410248163686L;
1132 }
1133 static class FifoWaitQueue extends WaitQueue {
1134 private static final long serialVersionUID = -3623113410248163686L;
1135 }
1136 private ReentrantLock qlock;
1137 private WaitQueue waitingProducers;
1138 private WaitQueue waitingConsumers;
1139
1140 /**
1141 * Saves this queue to a stream (that is, serializes it).
1142 */
1143 private void writeObject(java.io.ObjectOutputStream s)
1144 throws java.io.IOException {
1145 boolean fair = transferer instanceof TransferQueue;
1146 if (fair) {
1147 qlock = new ReentrantLock(true);
1148 waitingProducers = new FifoWaitQueue();
1149 waitingConsumers = new FifoWaitQueue();
1150 }
1151 else {
1152 qlock = new ReentrantLock();
1153 waitingProducers = new LifoWaitQueue();
1154 waitingConsumers = new LifoWaitQueue();
1155 }
1156 s.defaultWriteObject();
1157 }
1158
1159 /**
1160 * Reconstitutes this queue from a stream (that is, deserializes it).
1161 */
1162 private void readObject(final java.io.ObjectInputStream s)
1163 throws java.io.IOException, ClassNotFoundException {
1164 s.defaultReadObject();
1165 if (waitingProducers instanceof FifoWaitQueue)
1166 transferer = new TransferQueue<E>();
1167 else
1168 transferer = new TransferStack<E>();
1169 }
1170
1171 // Unsafe mechanics
1172 static long objectFieldOffset(sun.misc.Unsafe UNSAFE,
1173 String field, Class<?> klazz) {
1174 try {
1175 return UNSAFE.objectFieldOffset(klazz.getDeclaredField(field));
1176 } catch (NoSuchFieldException e) {
1177 // Convert Exception to corresponding Error
1178 NoSuchFieldError error = new NoSuchFieldError(field);
1179 error.initCause(e);
1180 throw error;
1181 }
1182 }
1183
1184 }