ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/jdk7/java/util/concurrent/SynchronousQueue.java
Revision: 1.9
Committed: Mon Mar 23 18:56:40 2015 UTC (9 years, 1 month ago) by jsr166
Branch: MAIN
CVS Tags: HEAD
Changes since 1.8: +5 -0 lines
Log Message:
JDK-8074773: Reduce the risk of rare disastrous classloading in first call to LockSupport.park

File Contents

# Content
1 /*
2 * Written by Doug Lea, Bill Scherer, and Michael Scott with
3 * assistance from members of JCP JSR-166 Expert Group and released to
4 * the public domain, as explained at
5 * http://creativecommons.org/publicdomain/zero/1.0/
6 */
7
8 package java.util.concurrent;
9
10 import java.util.concurrent.locks.LockSupport;
11 import java.util.concurrent.locks.ReentrantLock;
12 import java.util.*;
13
14 /**
15 * A {@linkplain BlockingQueue blocking queue} in which each insert
16 * operation must wait for a corresponding remove operation by another
17 * thread, and vice versa. A synchronous queue does not have any
18 * internal capacity, not even a capacity of one. You cannot
19 * {@code peek} at a synchronous queue because an element is only
20 * present when you try to remove it; you cannot insert an element
21 * (using any method) unless another thread is trying to remove it;
22 * you cannot iterate as there is nothing to iterate. The
23 * <em>head</em> of the queue is the element that the first queued
24 * inserting thread is trying to add to the queue; if there is no such
25 * queued thread then no element is available for removal and
26 * {@code poll()} will return {@code null}. For purposes of other
27 * {@code Collection} methods (for example {@code contains}), a
28 * {@code SynchronousQueue} acts as an empty collection. This queue
29 * does not permit {@code null} elements.
30 *
31 * <p>Synchronous queues are similar to rendezvous channels used in
32 * CSP and Ada. They are well suited for handoff designs, in which an
33 * object running in one thread must sync up with an object running
34 * in another thread in order to hand it some information, event, or
35 * task.
36 *
37 * <p>This class supports an optional fairness policy for ordering
38 * waiting producer and consumer threads. By default, this ordering
39 * is not guaranteed. However, a queue constructed with fairness set
40 * to {@code true} grants threads access in FIFO order.
41 *
42 * <p>This class and its iterator implement all of the
43 * <em>optional</em> methods of the {@link Collection} and {@link
44 * Iterator} interfaces.
45 *
46 * <p>This class is a member of the
47 * <a href="{@docRoot}/../technotes/guides/collections/index.html">
48 * Java Collections Framework</a>.
49 *
50 * @since 1.5
51 * @author Doug Lea and Bill Scherer and Michael Scott
52 * @param <E> the type of elements held in this collection
53 */
54 public class SynchronousQueue<E> extends AbstractQueue<E>
55 implements BlockingQueue<E>, java.io.Serializable {
56 private static final long serialVersionUID = -3223113410248163686L;
57
58 /*
59 * This class implements extensions of the dual stack and dual
60 * queue algorithms described in "Nonblocking Concurrent Objects
61 * with Condition Synchronization", by W. N. Scherer III and
62 * M. L. Scott. 18th Annual Conf. on Distributed Computing,
63 * Oct. 2004 (see also
64 * http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/duals.html).
65 * The (Lifo) stack is used for non-fair mode, and the (Fifo)
66 * queue for fair mode. The performance of the two is generally
67 * similar. Fifo usually supports higher throughput under
68 * contention but Lifo maintains higher thread locality in common
69 * applications.
70 *
71 * A dual queue (and similarly stack) is one that at any given
72 * time either holds "data" -- items provided by put operations,
73 * or "requests" -- slots representing take operations, or is
74 * empty. A call to "fulfill" (i.e., a call requesting an item
75 * from a queue holding data or vice versa) dequeues a
76 * complementary node. The most interesting feature of these
77 * queues is that any operation can figure out which mode the
78 * queue is in, and act accordingly without needing locks.
79 *
80 * Both the queue and stack extend abstract class Transferer
81 * defining the single method transfer that does a put or a
82 * take. These are unified into a single method because in dual
83 * data structures, the put and take operations are symmetrical,
84 * so nearly all code can be combined. The resulting transfer
85 * methods are on the long side, but are easier to follow than
86 * they would be if broken up into nearly-duplicated parts.
87 *
88 * The queue and stack data structures share many conceptual
89 * similarities but very few concrete details. For simplicity,
90 * they are kept distinct so that they can later evolve
91 * separately.
92 *
93 * The algorithms here differ from the versions in the above paper
94 * in extending them for use in synchronous queues, as well as
95 * dealing with cancellation. The main differences include:
96 *
97 * 1. The original algorithms used bit-marked pointers, but
98 * the ones here use mode bits in nodes, leading to a number
99 * of further adaptations.
100 * 2. SynchronousQueues must block threads waiting to become
101 * fulfilled.
102 * 3. Support for cancellation via timeout and interrupts,
103 * including cleaning out cancelled nodes/threads
104 * from lists to avoid garbage retention and memory depletion.
105 *
106 * Blocking is mainly accomplished using LockSupport park/unpark,
107 * except that nodes that appear to be the next ones to become
108 * fulfilled first spin a bit (on multiprocessors only). On very
109 * busy synchronous queues, spinning can dramatically improve
110 * throughput. And on less busy ones, the amount of spinning is
111 * small enough not to be noticeable.
112 *
113 * Cleaning is done in different ways in queues vs stacks. For
114 * queues, we can almost always remove a node immediately in O(1)
115 * time (modulo retries for consistency checks) when it is
116 * cancelled. But if it may be pinned as the current tail, it must
117 * wait until some subsequent cancellation. For stacks, we need a
118 * potentially O(n) traversal to be sure that we can remove the
119 * node, but this can run concurrently with other threads
120 * accessing the stack.
121 *
122 * While garbage collection takes care of most node reclamation
123 * issues that otherwise complicate nonblocking algorithms, care
124 * is taken to "forget" references to data, other nodes, and
125 * threads that might be held on to long-term by blocked
126 * threads. In cases where setting to null would otherwise
127 * conflict with main algorithms, this is done by changing a
128 * node's link to now point to the node itself. This doesn't arise
129 * much for Stack nodes (because blocked threads do not hang on to
130 * old head pointers), but references in Queue nodes must be
131 * aggressively forgotten to avoid reachability of everything any
132 * node has ever referred to since arrival.
133 */
134
135 /**
136 * Shared internal API for dual stacks and queues.
137 */
138 abstract static class Transferer<E> {
139 /**
140 * Performs a put or take.
141 *
142 * @param e if non-null, the item to be handed to a consumer;
143 * if null, requests that transfer return an item
144 * offered by producer.
145 * @param timed if this operation should timeout
146 * @param nanos the timeout, in nanoseconds
147 * @return if non-null, the item provided or received; if null,
148 * the operation failed due to timeout or interrupt --
149 * the caller can distinguish which of these occurred
150 * by checking Thread.interrupted.
151 */
152 abstract E transfer(E e, boolean timed, long nanos);
153 }
154
155 /** The number of CPUs, for spin control */
156 static final int NCPUS = Runtime.getRuntime().availableProcessors();
157
158 /**
159 * The number of times to spin before blocking in timed waits.
160 * The value is empirically derived -- it works well across a
161 * variety of processors and OSes. Empirically, the best value
162 * seems not to vary with number of CPUs (beyond 2) so is just
163 * a constant.
164 */
165 static final int maxTimedSpins = (NCPUS < 2) ? 0 : 32;
166
167 /**
168 * The number of times to spin before blocking in untimed waits.
169 * This is greater than timed value because untimed waits spin
170 * faster since they don't need to check times on each spin.
171 */
172 static final int maxUntimedSpins = maxTimedSpins * 16;
173
174 /**
175 * The number of nanoseconds for which it is faster to spin
176 * rather than to use timed park. A rough estimate suffices.
177 */
178 static final long spinForTimeoutThreshold = 1000L;
179
180 /** Dual stack */
181 static final class TransferStack<E> extends Transferer<E> {
182 /*
183 * This extends Scherer-Scott dual stack algorithm, differing,
184 * among other ways, by using "covering" nodes rather than
185 * bit-marked pointers: Fulfilling operations push on marker
186 * nodes (with FULFILLING bit set in mode) to reserve a spot
187 * to match a waiting node.
188 */
189
190 /* Modes for SNodes, ORed together in node fields */
191 /** Node represents an unfulfilled consumer */
192 static final int REQUEST = 0;
193 /** Node represents an unfulfilled producer */
194 static final int DATA = 1;
195 /** Node is fulfilling another unfulfilled DATA or REQUEST */
196 static final int FULFILLING = 2;
197
198 /** Returns true if m has fulfilling bit set. */
199 static boolean isFulfilling(int m) { return (m & FULFILLING) != 0; }
200
201 /** Node class for TransferStacks. */
202 static final class SNode {
203 volatile SNode next; // next node in stack
204 volatile SNode match; // the node matched to this
205 volatile Thread waiter; // to control park/unpark
206 Object item; // data; or null for REQUESTs
207 int mode;
208 // Note: item and mode fields don't need to be volatile
209 // since they are always written before, and read after,
210 // other volatile/atomic operations.
211
212 SNode(Object item) {
213 this.item = item;
214 }
215
216 boolean casNext(SNode cmp, SNode val) {
217 return cmp == next &&
218 UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val);
219 }
220
221 /**
222 * Tries to match node s to this node, if so, waking up thread.
223 * Fulfillers call tryMatch to identify their waiters.
224 * Waiters block until they have been matched.
225 *
226 * @param s the node to match
227 * @return true if successfully matched to s
228 */
229 boolean tryMatch(SNode s) {
230 if (match == null &&
231 UNSAFE.compareAndSwapObject(this, matchOffset, null, s)) {
232 Thread w = waiter;
233 if (w != null) { // waiters need at most one unpark
234 waiter = null;
235 LockSupport.unpark(w);
236 }
237 return true;
238 }
239 return match == s;
240 }
241
242 /**
243 * Tries to cancel a wait by matching node to itself.
244 */
245 void tryCancel() {
246 UNSAFE.compareAndSwapObject(this, matchOffset, null, this);
247 }
248
249 boolean isCancelled() {
250 return match == this;
251 }
252
253 // Unsafe mechanics
254 private static final sun.misc.Unsafe UNSAFE;
255 private static final long matchOffset;
256 private static final long nextOffset;
257
258 static {
259 try {
260 UNSAFE = sun.misc.Unsafe.getUnsafe();
261 Class<?> k = SNode.class;
262 matchOffset = UNSAFE.objectFieldOffset
263 (k.getDeclaredField("match"));
264 nextOffset = UNSAFE.objectFieldOffset
265 (k.getDeclaredField("next"));
266 } catch (Exception e) {
267 throw new Error(e);
268 }
269 }
270 }
271
272 /** The head (top) of the stack */
273 volatile SNode head;
274
275 boolean casHead(SNode h, SNode nh) {
276 return h == head &&
277 UNSAFE.compareAndSwapObject(this, headOffset, h, nh);
278 }
279
280 /**
281 * Creates or resets fields of a node. Called only from transfer
282 * where the node to push on stack is lazily created and
283 * reused when possible to help reduce intervals between reads
284 * and CASes of head and to avoid surges of garbage when CASes
285 * to push nodes fail due to contention.
286 */
287 static SNode snode(SNode s, Object e, SNode next, int mode) {
288 if (s == null) s = new SNode(e);
289 s.mode = mode;
290 s.next = next;
291 return s;
292 }
293
294 /**
295 * Puts or takes an item.
296 */
297 @SuppressWarnings("unchecked")
298 E transfer(E e, boolean timed, long nanos) {
299 /*
300 * Basic algorithm is to loop trying one of three actions:
301 *
302 * 1. If apparently empty or already containing nodes of same
303 * mode, try to push node on stack and wait for a match,
304 * returning it, or null if cancelled.
305 *
306 * 2. If apparently containing node of complementary mode,
307 * try to push a fulfilling node on to stack, match
308 * with corresponding waiting node, pop both from
309 * stack, and return matched item. The matching or
310 * unlinking might not actually be necessary because of
311 * other threads performing action 3:
312 *
313 * 3. If top of stack already holds another fulfilling node,
314 * help it out by doing its match and/or pop
315 * operations, and then continue. The code for helping
316 * is essentially the same as for fulfilling, except
317 * that it doesn't return the item.
318 */
319
320 SNode s = null; // constructed/reused as needed
321 int mode = (e == null) ? REQUEST : DATA;
322
323 for (;;) {
324 SNode h = head;
325 if (h == null || h.mode == mode) { // empty or same-mode
326 if (timed && nanos <= 0) { // can't wait
327 if (h != null && h.isCancelled())
328 casHead(h, h.next); // pop cancelled node
329 else
330 return null;
331 } else if (casHead(h, s = snode(s, e, h, mode))) {
332 SNode m = awaitFulfill(s, timed, nanos);
333 if (m == s) { // wait was cancelled
334 clean(s);
335 return null;
336 }
337 if ((h = head) != null && h.next == s)
338 casHead(h, s.next); // help s's fulfiller
339 return (E) ((mode == REQUEST) ? m.item : s.item);
340 }
341 } else if (!isFulfilling(h.mode)) { // try to fulfill
342 if (h.isCancelled()) // already cancelled
343 casHead(h, h.next); // pop and retry
344 else if (casHead(h, s=snode(s, e, h, FULFILLING|mode))) {
345 for (;;) { // loop until matched or waiters disappear
346 SNode m = s.next; // m is s's match
347 if (m == null) { // all waiters are gone
348 casHead(s, null); // pop fulfill node
349 s = null; // use new node next time
350 break; // restart main loop
351 }
352 SNode mn = m.next;
353 if (m.tryMatch(s)) {
354 casHead(s, mn); // pop both s and m
355 return (E) ((mode == REQUEST) ? m.item : s.item);
356 } else // lost match
357 s.casNext(m, mn); // help unlink
358 }
359 }
360 } else { // help a fulfiller
361 SNode m = h.next; // m is h's match
362 if (m == null) // waiter is gone
363 casHead(h, null); // pop fulfilling node
364 else {
365 SNode mn = m.next;
366 if (m.tryMatch(h)) // help match
367 casHead(h, mn); // pop both h and m
368 else // lost match
369 h.casNext(m, mn); // help unlink
370 }
371 }
372 }
373 }
374
375 /**
376 * Spins/blocks until node s is matched by a fulfill operation.
377 *
378 * @param s the waiting node
379 * @param timed true if timed wait
380 * @param nanos timeout value
381 * @return matched node, or s if cancelled
382 */
383 SNode awaitFulfill(SNode s, boolean timed, long nanos) {
384 /*
385 * When a node/thread is about to block, it sets its waiter
386 * field and then rechecks state at least one more time
387 * before actually parking, thus covering race vs
388 * fulfiller noticing that waiter is non-null so should be
389 * woken.
390 *
391 * When invoked by nodes that appear at the point of call
392 * to be at the head of the stack, calls to park are
393 * preceded by spins to avoid blocking when producers and
394 * consumers are arriving very close in time. This can
395 * happen enough to bother only on multiprocessors.
396 *
397 * The order of checks for returning out of main loop
398 * reflects fact that interrupts have precedence over
399 * normal returns, which have precedence over
400 * timeouts. (So, on timeout, one last check for match is
401 * done before giving up.) Except that calls from untimed
402 * SynchronousQueue.{poll/offer} don't check interrupts
403 * and don't wait at all, so are trapped in transfer
404 * method rather than calling awaitFulfill.
405 */
406 final long deadline = timed ? System.nanoTime() + nanos : 0L;
407 Thread w = Thread.currentThread();
408 int spins = (shouldSpin(s) ?
409 (timed ? maxTimedSpins : maxUntimedSpins) : 0);
410 for (;;) {
411 if (w.isInterrupted())
412 s.tryCancel();
413 SNode m = s.match;
414 if (m != null)
415 return m;
416 if (timed) {
417 nanos = deadline - System.nanoTime();
418 if (nanos <= 0L) {
419 s.tryCancel();
420 continue;
421 }
422 }
423 if (spins > 0)
424 spins = shouldSpin(s) ? (spins-1) : 0;
425 else if (s.waiter == null)
426 s.waiter = w; // establish waiter so can park next iter
427 else if (!timed)
428 LockSupport.park(this);
429 else if (nanos > spinForTimeoutThreshold)
430 LockSupport.parkNanos(this, nanos);
431 }
432 }
433
434 /**
435 * Returns true if node s is at head or there is an active
436 * fulfiller.
437 */
438 boolean shouldSpin(SNode s) {
439 SNode h = head;
440 return (h == s || h == null || isFulfilling(h.mode));
441 }
442
443 /**
444 * Unlinks s from the stack.
445 */
446 void clean(SNode s) {
447 s.item = null; // forget item
448 s.waiter = null; // forget thread
449
450 /*
451 * At worst we may need to traverse entire stack to unlink
452 * s. If there are multiple concurrent calls to clean, we
453 * might not see s if another thread has already removed
454 * it. But we can stop when we see any node known to
455 * follow s. We use s.next unless it too is cancelled, in
456 * which case we try the node one past. We don't check any
457 * further because we don't want to doubly traverse just to
458 * find sentinel.
459 */
460
461 SNode past = s.next;
462 if (past != null && past.isCancelled())
463 past = past.next;
464
465 // Absorb cancelled nodes at head
466 SNode p;
467 while ((p = head) != null && p != past && p.isCancelled())
468 casHead(p, p.next);
469
470 // Unsplice embedded nodes
471 while (p != null && p != past) {
472 SNode n = p.next;
473 if (n != null && n.isCancelled())
474 p.casNext(n, n.next);
475 else
476 p = n;
477 }
478 }
479
480 // Unsafe mechanics
481 private static final sun.misc.Unsafe UNSAFE;
482 private static final long headOffset;
483 static {
484 try {
485 UNSAFE = sun.misc.Unsafe.getUnsafe();
486 Class<?> k = TransferStack.class;
487 headOffset = UNSAFE.objectFieldOffset
488 (k.getDeclaredField("head"));
489 } catch (Exception e) {
490 throw new Error(e);
491 }
492 }
493 }
494
495 /** Dual Queue */
496 static final class TransferQueue<E> extends Transferer<E> {
497 /*
498 * This extends Scherer-Scott dual queue algorithm, differing,
499 * among other ways, by using modes within nodes rather than
500 * marked pointers. The algorithm is a little simpler than
501 * that for stacks because fulfillers do not need explicit
502 * nodes, and matching is done by CAS'ing QNode.item field
503 * from non-null to null (for put) or vice versa (for take).
504 */
505
506 /** Node class for TransferQueue. */
507 static final class QNode {
508 volatile QNode next; // next node in queue
509 volatile Object item; // CAS'ed to or from null
510 volatile Thread waiter; // to control park/unpark
511 final boolean isData;
512
513 QNode(Object item, boolean isData) {
514 this.item = item;
515 this.isData = isData;
516 }
517
518 boolean casNext(QNode cmp, QNode val) {
519 return next == cmp &&
520 UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val);
521 }
522
523 boolean casItem(Object cmp, Object val) {
524 return item == cmp &&
525 UNSAFE.compareAndSwapObject(this, itemOffset, cmp, val);
526 }
527
528 /**
529 * Tries to cancel by CAS'ing ref to this as item.
530 */
531 void tryCancel(Object cmp) {
532 UNSAFE.compareAndSwapObject(this, itemOffset, cmp, this);
533 }
534
535 boolean isCancelled() {
536 return item == this;
537 }
538
539 /**
540 * Returns true if this node is known to be off the queue
541 * because its next pointer has been forgotten due to
542 * an advanceHead operation.
543 */
544 boolean isOffList() {
545 return next == this;
546 }
547
548 // Unsafe mechanics
549 private static final sun.misc.Unsafe UNSAFE;
550 private static final long itemOffset;
551 private static final long nextOffset;
552
553 static {
554 try {
555 UNSAFE = sun.misc.Unsafe.getUnsafe();
556 Class<?> k = QNode.class;
557 itemOffset = UNSAFE.objectFieldOffset
558 (k.getDeclaredField("item"));
559 nextOffset = UNSAFE.objectFieldOffset
560 (k.getDeclaredField("next"));
561 } catch (Exception e) {
562 throw new Error(e);
563 }
564 }
565 }
566
567 /** Head of queue */
568 transient volatile QNode head;
569 /** Tail of queue */
570 transient volatile QNode tail;
571 /**
572 * Reference to a cancelled node that might not yet have been
573 * unlinked from queue because it was the last inserted node
574 * when it was cancelled.
575 */
576 transient volatile QNode cleanMe;
577
578 TransferQueue() {
579 QNode h = new QNode(null, false); // initialize to dummy node.
580 head = h;
581 tail = h;
582 }
583
584 /**
585 * Tries to cas nh as new head; if successful, unlink
586 * old head's next node to avoid garbage retention.
587 */
588 void advanceHead(QNode h, QNode nh) {
589 if (h == head &&
590 UNSAFE.compareAndSwapObject(this, headOffset, h, nh))
591 h.next = h; // forget old next
592 }
593
594 /**
595 * Tries to cas nt as new tail.
596 */
597 void advanceTail(QNode t, QNode nt) {
598 if (tail == t)
599 UNSAFE.compareAndSwapObject(this, tailOffset, t, nt);
600 }
601
602 /**
603 * Tries to CAS cleanMe slot.
604 */
605 boolean casCleanMe(QNode cmp, QNode val) {
606 return cleanMe == cmp &&
607 UNSAFE.compareAndSwapObject(this, cleanMeOffset, cmp, val);
608 }
609
610 /**
611 * Puts or takes an item.
612 */
613 @SuppressWarnings("unchecked")
614 E transfer(E e, boolean timed, long nanos) {
615 /* Basic algorithm is to loop trying to take either of
616 * two actions:
617 *
618 * 1. If queue apparently empty or holding same-mode nodes,
619 * try to add node to queue of waiters, wait to be
620 * fulfilled (or cancelled) and return matching item.
621 *
622 * 2. If queue apparently contains waiting items, and this
623 * call is of complementary mode, try to fulfill by CAS'ing
624 * item field of waiting node and dequeuing it, and then
625 * returning matching item.
626 *
627 * In each case, along the way, check for and try to help
628 * advance head and tail on behalf of other stalled/slow
629 * threads.
630 *
631 * The loop starts off with a null check guarding against
632 * seeing uninitialized head or tail values. This never
633 * happens in current SynchronousQueue, but could if
634 * callers held non-volatile/final ref to the
635 * transferer. The check is here anyway because it places
636 * null checks at top of loop, which is usually faster
637 * than having them implicitly interspersed.
638 */
639
640 QNode s = null; // constructed/reused as needed
641 boolean isData = (e != null);
642
643 for (;;) {
644 QNode t = tail;
645 QNode h = head;
646 if (t == null || h == null) // saw uninitialized value
647 continue; // spin
648
649 if (h == t || t.isData == isData) { // empty or same-mode
650 QNode tn = t.next;
651 if (t != tail) // inconsistent read
652 continue;
653 if (tn != null) { // lagging tail
654 advanceTail(t, tn);
655 continue;
656 }
657 if (timed && nanos <= 0) // can't wait
658 return null;
659 if (s == null)
660 s = new QNode(e, isData);
661 if (!t.casNext(null, s)) // failed to link in
662 continue;
663
664 advanceTail(t, s); // swing tail and wait
665 Object x = awaitFulfill(s, e, timed, nanos);
666 if (x == s) { // wait was cancelled
667 clean(t, s);
668 return null;
669 }
670
671 if (!s.isOffList()) { // not already unlinked
672 advanceHead(t, s); // unlink if head
673 if (x != null) // and forget fields
674 s.item = s;
675 s.waiter = null;
676 }
677 return (x != null) ? (E)x : e;
678
679 } else { // complementary-mode
680 QNode m = h.next; // node to fulfill
681 if (t != tail || m == null || h != head)
682 continue; // inconsistent read
683
684 Object x = m.item;
685 if (isData == (x != null) || // m already fulfilled
686 x == m || // m cancelled
687 !m.casItem(x, e)) { // lost CAS
688 advanceHead(h, m); // dequeue and retry
689 continue;
690 }
691
692 advanceHead(h, m); // successfully fulfilled
693 LockSupport.unpark(m.waiter);
694 return (x != null) ? (E)x : e;
695 }
696 }
697 }
698
699 /**
700 * Spins/blocks until node s is fulfilled.
701 *
702 * @param s the waiting node
703 * @param e the comparison value for checking match
704 * @param timed true if timed wait
705 * @param nanos timeout value
706 * @return matched item, or s if cancelled
707 */
708 Object awaitFulfill(QNode s, E e, boolean timed, long nanos) {
709 /* Same idea as TransferStack.awaitFulfill */
710 final long deadline = timed ? System.nanoTime() + nanos : 0L;
711 Thread w = Thread.currentThread();
712 int spins = ((head.next == s) ?
713 (timed ? maxTimedSpins : maxUntimedSpins) : 0);
714 for (;;) {
715 if (w.isInterrupted())
716 s.tryCancel(e);
717 Object x = s.item;
718 if (x != e)
719 return x;
720 if (timed) {
721 nanos = deadline - System.nanoTime();
722 if (nanos <= 0L) {
723 s.tryCancel(e);
724 continue;
725 }
726 }
727 if (spins > 0)
728 --spins;
729 else if (s.waiter == null)
730 s.waiter = w;
731 else if (!timed)
732 LockSupport.park(this);
733 else if (nanos > spinForTimeoutThreshold)
734 LockSupport.parkNanos(this, nanos);
735 }
736 }
737
738 /**
739 * Gets rid of cancelled node s with original predecessor pred.
740 */
741 void clean(QNode pred, QNode s) {
742 s.waiter = null; // forget thread
743 /*
744 * At any given time, exactly one node on list cannot be
745 * deleted -- the last inserted node. To accommodate this,
746 * if we cannot delete s, we save its predecessor as
747 * "cleanMe", deleting the previously saved version
748 * first. At least one of node s or the node previously
749 * saved can always be deleted, so this always terminates.
750 */
751 while (pred.next == s) { // Return early if already unlinked
752 QNode h = head;
753 QNode hn = h.next; // Absorb cancelled first node as head
754 if (hn != null && hn.isCancelled()) {
755 advanceHead(h, hn);
756 continue;
757 }
758 QNode t = tail; // Ensure consistent read for tail
759 if (t == h)
760 return;
761 QNode tn = t.next;
762 if (t != tail)
763 continue;
764 if (tn != null) {
765 advanceTail(t, tn);
766 continue;
767 }
768 if (s != t) { // If not tail, try to unsplice
769 QNode sn = s.next;
770 if (sn == s || pred.casNext(s, sn))
771 return;
772 }
773 QNode dp = cleanMe;
774 if (dp != null) { // Try unlinking previous cancelled node
775 QNode d = dp.next;
776 QNode dn;
777 if (d == null || // d is gone or
778 d == dp || // d is off list or
779 !d.isCancelled() || // d not cancelled or
780 (d != t && // d not tail and
781 (dn = d.next) != null && // has successor
782 dn != d && // that is on list
783 dp.casNext(d, dn))) // d unspliced
784 casCleanMe(dp, null);
785 if (dp == pred)
786 return; // s is already saved node
787 } else if (casCleanMe(null, pred))
788 return; // Postpone cleaning s
789 }
790 }
791
792 private static final sun.misc.Unsafe UNSAFE;
793 private static final long headOffset;
794 private static final long tailOffset;
795 private static final long cleanMeOffset;
796 static {
797 try {
798 UNSAFE = sun.misc.Unsafe.getUnsafe();
799 Class<?> k = TransferQueue.class;
800 headOffset = UNSAFE.objectFieldOffset
801 (k.getDeclaredField("head"));
802 tailOffset = UNSAFE.objectFieldOffset
803 (k.getDeclaredField("tail"));
804 cleanMeOffset = UNSAFE.objectFieldOffset
805 (k.getDeclaredField("cleanMe"));
806 } catch (Exception e) {
807 throw new Error(e);
808 }
809 }
810 }
811
812 /**
813 * The transferer. Set only in constructor, but cannot be declared
814 * as final without further complicating serialization. Since
815 * this is accessed only at most once per public method, there
816 * isn't a noticeable performance penalty for using volatile
817 * instead of final here.
818 */
819 private transient volatile Transferer<E> transferer;
820
821 /**
822 * Creates a {@code SynchronousQueue} with nonfair access policy.
823 */
824 public SynchronousQueue() {
825 this(false);
826 }
827
828 /**
829 * Creates a {@code SynchronousQueue} with the specified fairness policy.
830 *
831 * @param fair if true, waiting threads contend in FIFO order for
832 * access; otherwise the order is unspecified.
833 */
834 public SynchronousQueue(boolean fair) {
835 transferer = fair ? new TransferQueue<E>() : new TransferStack<E>();
836 }
837
838 /**
839 * Adds the specified element to this queue, waiting if necessary for
840 * another thread to receive it.
841 *
842 * @throws InterruptedException {@inheritDoc}
843 * @throws NullPointerException {@inheritDoc}
844 */
845 public void put(E e) throws InterruptedException {
846 if (e == null) throw new NullPointerException();
847 if (transferer.transfer(e, false, 0) == null) {
848 Thread.interrupted();
849 throw new InterruptedException();
850 }
851 }
852
853 /**
854 * Inserts the specified element into this queue, waiting if necessary
855 * up to the specified wait time for another thread to receive it.
856 *
857 * @return {@code true} if successful, or {@code false} if the
858 * specified waiting time elapses before a consumer appears
859 * @throws InterruptedException {@inheritDoc}
860 * @throws NullPointerException {@inheritDoc}
861 */
862 public boolean offer(E e, long timeout, TimeUnit unit)
863 throws InterruptedException {
864 if (e == null) throw new NullPointerException();
865 if (transferer.transfer(e, true, unit.toNanos(timeout)) != null)
866 return true;
867 if (!Thread.interrupted())
868 return false;
869 throw new InterruptedException();
870 }
871
872 /**
873 * Inserts the specified element into this queue, if another thread is
874 * waiting to receive it.
875 *
876 * @param e the element to add
877 * @return {@code true} if the element was added to this queue, else
878 * {@code false}
879 * @throws NullPointerException if the specified element is null
880 */
881 public boolean offer(E e) {
882 if (e == null) throw new NullPointerException();
883 return transferer.transfer(e, true, 0) != null;
884 }
885
886 /**
887 * Retrieves and removes the head of this queue, waiting if necessary
888 * for another thread to insert it.
889 *
890 * @return the head of this queue
891 * @throws InterruptedException {@inheritDoc}
892 */
893 public E take() throws InterruptedException {
894 E e = transferer.transfer(null, false, 0);
895 if (e != null)
896 return e;
897 Thread.interrupted();
898 throw new InterruptedException();
899 }
900
901 /**
902 * Retrieves and removes the head of this queue, waiting
903 * if necessary up to the specified wait time, for another thread
904 * to insert it.
905 *
906 * @return the head of this queue, or {@code null} if the
907 * specified waiting time elapses before an element is present
908 * @throws InterruptedException {@inheritDoc}
909 */
910 public E poll(long timeout, TimeUnit unit) throws InterruptedException {
911 E e = transferer.transfer(null, true, unit.toNanos(timeout));
912 if (e != null || !Thread.interrupted())
913 return e;
914 throw new InterruptedException();
915 }
916
917 /**
918 * Retrieves and removes the head of this queue, if another thread
919 * is currently making an element available.
920 *
921 * @return the head of this queue, or {@code null} if no
922 * element is available
923 */
924 public E poll() {
925 return transferer.transfer(null, true, 0);
926 }
927
928 /**
929 * Always returns {@code true}.
930 * A {@code SynchronousQueue} has no internal capacity.
931 *
932 * @return {@code true}
933 */
934 public boolean isEmpty() {
935 return true;
936 }
937
938 /**
939 * Always returns zero.
940 * A {@code SynchronousQueue} has no internal capacity.
941 *
942 * @return zero
943 */
944 public int size() {
945 return 0;
946 }
947
948 /**
949 * Always returns zero.
950 * A {@code SynchronousQueue} has no internal capacity.
951 *
952 * @return zero
953 */
954 public int remainingCapacity() {
955 return 0;
956 }
957
958 /**
959 * Does nothing.
960 * A {@code SynchronousQueue} has no internal capacity.
961 */
962 public void clear() {
963 }
964
965 /**
966 * Always returns {@code false}.
967 * A {@code SynchronousQueue} has no internal capacity.
968 *
969 * @param o the element
970 * @return {@code false}
971 */
972 public boolean contains(Object o) {
973 return false;
974 }
975
976 /**
977 * Always returns {@code false}.
978 * A {@code SynchronousQueue} has no internal capacity.
979 *
980 * @param o the element to remove
981 * @return {@code false}
982 */
983 public boolean remove(Object o) {
984 return false;
985 }
986
987 /**
988 * Returns {@code false} unless the given collection is empty.
989 * A {@code SynchronousQueue} has no internal capacity.
990 *
991 * @param c the collection
992 * @return {@code false} unless given collection is empty
993 */
994 public boolean containsAll(Collection<?> c) {
995 return c.isEmpty();
996 }
997
998 /**
999 * Always returns {@code false}.
1000 * A {@code SynchronousQueue} has no internal capacity.
1001 *
1002 * @param c the collection
1003 * @return {@code false}
1004 */
1005 public boolean removeAll(Collection<?> c) {
1006 return false;
1007 }
1008
1009 /**
1010 * Always returns {@code false}.
1011 * A {@code SynchronousQueue} has no internal capacity.
1012 *
1013 * @param c the collection
1014 * @return {@code false}
1015 */
1016 public boolean retainAll(Collection<?> c) {
1017 return false;
1018 }
1019
1020 /**
1021 * Always returns {@code null}.
1022 * A {@code SynchronousQueue} does not return elements
1023 * unless actively waited on.
1024 *
1025 * @return {@code null}
1026 */
1027 public E peek() {
1028 return null;
1029 }
1030
1031 /**
1032 * Returns an empty iterator in which {@code hasNext} always returns
1033 * {@code false}.
1034 *
1035 * @return an empty iterator
1036 */
1037 @SuppressWarnings("unchecked")
1038 public Iterator<E> iterator() {
1039 return (Iterator<E>) EmptyIterator.EMPTY_ITERATOR;
1040 }
1041
1042 // Replicated from a previous version of Collections
1043 private static class EmptyIterator<E> implements Iterator<E> {
1044 static final EmptyIterator<Object> EMPTY_ITERATOR
1045 = new EmptyIterator<Object>();
1046
1047 public boolean hasNext() { return false; }
1048 public E next() { throw new NoSuchElementException(); }
1049 public void remove() { throw new IllegalStateException(); }
1050 }
1051
1052 /**
1053 * Returns a zero-length array.
1054 * @return a zero-length array
1055 */
1056 public Object[] toArray() {
1057 return new Object[0];
1058 }
1059
1060 /**
1061 * Sets the zeroth element of the specified array to {@code null}
1062 * (if the array has non-zero length) and returns it.
1063 *
1064 * @param a the array
1065 * @return the specified array
1066 * @throws NullPointerException if the specified array is null
1067 */
1068 public <T> T[] toArray(T[] a) {
1069 if (a.length > 0)
1070 a[0] = null;
1071 return a;
1072 }
1073
1074 /**
1075 * @throws UnsupportedOperationException {@inheritDoc}
1076 * @throws ClassCastException {@inheritDoc}
1077 * @throws NullPointerException {@inheritDoc}
1078 * @throws IllegalArgumentException {@inheritDoc}
1079 */
1080 public int drainTo(Collection<? super E> c) {
1081 if (c == null)
1082 throw new NullPointerException();
1083 if (c == this)
1084 throw new IllegalArgumentException();
1085 int n = 0;
1086 for (E e; (e = poll()) != null;) {
1087 c.add(e);
1088 ++n;
1089 }
1090 return n;
1091 }
1092
1093 /**
1094 * @throws UnsupportedOperationException {@inheritDoc}
1095 * @throws ClassCastException {@inheritDoc}
1096 * @throws NullPointerException {@inheritDoc}
1097 * @throws IllegalArgumentException {@inheritDoc}
1098 */
1099 public int drainTo(Collection<? super E> c, int maxElements) {
1100 if (c == null)
1101 throw new NullPointerException();
1102 if (c == this)
1103 throw new IllegalArgumentException();
1104 int n = 0;
1105 for (E e; n < maxElements && (e = poll()) != null;) {
1106 c.add(e);
1107 ++n;
1108 }
1109 return n;
1110 }
1111
1112 /*
1113 * To cope with serialization strategy in the 1.5 version of
1114 * SynchronousQueue, we declare some unused classes and fields
1115 * that exist solely to enable serializability across versions.
1116 * These fields are never used, so are initialized only if this
1117 * object is ever serialized or deserialized.
1118 */
1119
1120 @SuppressWarnings("serial")
1121 static class WaitQueue implements java.io.Serializable { }
1122 static class LifoWaitQueue extends WaitQueue {
1123 private static final long serialVersionUID = -3633113410248163686L;
1124 }
1125 static class FifoWaitQueue extends WaitQueue {
1126 private static final long serialVersionUID = -3623113410248163686L;
1127 }
1128 private ReentrantLock qlock;
1129 private WaitQueue waitingProducers;
1130 private WaitQueue waitingConsumers;
1131
1132 /**
1133 * Saves this queue to a stream (that is, serializes it).
1134 */
1135 private void writeObject(java.io.ObjectOutputStream s)
1136 throws java.io.IOException {
1137 boolean fair = transferer instanceof TransferQueue;
1138 if (fair) {
1139 qlock = new ReentrantLock(true);
1140 waitingProducers = new FifoWaitQueue();
1141 waitingConsumers = new FifoWaitQueue();
1142 }
1143 else {
1144 qlock = new ReentrantLock();
1145 waitingProducers = new LifoWaitQueue();
1146 waitingConsumers = new LifoWaitQueue();
1147 }
1148 s.defaultWriteObject();
1149 }
1150
1151 /**
1152 * Reconstitutes this queue from a stream (that is, deserializes it).
1153 */
1154 private void readObject(java.io.ObjectInputStream s)
1155 throws java.io.IOException, ClassNotFoundException {
1156 s.defaultReadObject();
1157 if (waitingProducers instanceof FifoWaitQueue)
1158 transferer = new TransferQueue<E>();
1159 else
1160 transferer = new TransferStack<E>();
1161 }
1162
1163 // Unsafe mechanics
1164 static long objectFieldOffset(sun.misc.Unsafe UNSAFE,
1165 String field, Class<?> klazz) {
1166 try {
1167 return UNSAFE.objectFieldOffset(klazz.getDeclaredField(field));
1168 } catch (NoSuchFieldException e) {
1169 // Convert Exception to corresponding Error
1170 NoSuchFieldError error = new NoSuchFieldError(field);
1171 error.initCause(e);
1172 throw error;
1173 }
1174 }
1175
1176 static {
1177 // Reduce the risk of rare disastrous classloading in first call to
1178 // LockSupport.park: https://bugs.openjdk.java.net/browse/JDK-8074773
1179 Class<?> ensureLoaded = LockSupport.class;
1180 }
1181 }