ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/jsr166y/LinkedTransferQueue.java
Revision: 1.49
Committed: Thu Oct 22 15:58:44 2009 UTC (14 years, 6 months ago) by jsr166
Branch: MAIN
Changes since 1.48: +3 -3 lines
Log Message:
typos

File Contents

# Content
1 /*
2 * Written by Doug Lea with assistance from members of JCP JSR-166
3 * Expert Group and released to the public domain, as explained at
4 * http://creativecommons.org/licenses/publicdomain
5 */
6
7 package jsr166y;
8
9 import java.util.concurrent.*;
10
11 import java.util.AbstractQueue;
12 import java.util.Collection;
13 import java.util.ConcurrentModificationException;
14 import java.util.Iterator;
15 import java.util.NoSuchElementException;
16 import java.util.Queue;
17 import java.util.concurrent.locks.LockSupport;
18 /**
19 * An unbounded {@link TransferQueue} based on linked nodes.
20 * This queue orders elements FIFO (first-in-first-out) with respect
21 * to any given producer. The <em>head</em> of the queue is that
22 * element that has been on the queue the longest time for some
23 * producer. The <em>tail</em> of the queue is that element that has
24 * been on the queue the shortest time for some producer.
25 *
26 * <p>Beware that, unlike in most collections, the {@code size}
27 * method is <em>NOT</em> a constant-time operation. Because of the
28 * asynchronous nature of these queues, determining the current number
29 * of elements requires a traversal of the elements.
30 *
31 * <p>This class and its iterator implement all of the
32 * <em>optional</em> methods of the {@link Collection} and {@link
33 * Iterator} interfaces.
34 *
35 * <p>Memory consistency effects: As with other concurrent
36 * collections, actions in a thread prior to placing an object into a
37 * {@code LinkedTransferQueue}
38 * <a href="package-summary.html#MemoryVisibility"><i>happen-before</i></a>
39 * actions subsequent to the access or removal of that element from
40 * the {@code LinkedTransferQueue} in another thread.
41 *
42 * <p>This class is a member of the
43 * <a href="{@docRoot}/../technotes/guides/collections/index.html">
44 * Java Collections Framework</a>.
45 *
46 * @since 1.7
47 * @author Doug Lea
48 * @param <E> the type of elements held in this collection
49 */
50 public class LinkedTransferQueue<E> extends AbstractQueue<E>
51 implements TransferQueue<E>, java.io.Serializable {
52 private static final long serialVersionUID = -3223113410248163686L;
53
54 /*
55 * *** Overview of Dual Queues with Slack ***
56 *
57 * Dual Queues, introduced by Scherer and Scott
58 * (http://www.cs.rice.edu/~wns1/papers/2004-DISC-DDS.pdf) are
59 * (linked) queues in which nodes may represent either data or
60 * requests. When a thread tries to enqueue a data node, but
61 * encounters a request node, it instead "matches" and removes it;
62 * and vice versa for enqueuing requests. Blocking Dual Queues
63 * arrange that threads enqueuing unmatched requests block until
64 * other threads provide the match. Dual Synchronous Queues (see
65 * Scherer, Lea, & Scott
66 * http://www.cs.rochester.edu/u/scott/papers/2009_Scherer_CACM_SSQ.pdf)
67 * additionally arrange that threads enqueuing unmatched data also
68 * block. Dual Transfer Queues support all of these modes, as
69 * dictated by callers.
70 *
71 * A FIFO dual queue may be implemented using a variation of the
72 * Michael & Scott (M&S) lock-free queue algorithm
73 * (http://www.cs.rochester.edu/u/scott/papers/1996_PODC_queues.pdf).
74 * It maintains two pointer fields, "head", pointing to a
75 * (matched) node that in turn points to the first actual
76 * (unmatched) queue node (or null if empty); and "tail" that
77 * points to the last node on the queue (or again null if
78 * empty). For example, here is a possible queue with four data
79 * elements:
80 *
81 * head tail
82 * | |
83 * v v
84 * M -> U -> U -> U -> U
85 *
86 * The M&S queue algorithm is known to be prone to scalability and
87 * overhead limitations when maintaining (via CAS) these head and
88 * tail pointers. This has led to the development of
89 * contention-reducing variants such as elimination arrays (see
90 * Moir et al http://portal.acm.org/citation.cfm?id=1074013) and
91 * optimistic back pointers (see Ladan-Mozes & Shavit
92 * http://people.csail.mit.edu/edya/publications/OptimisticFIFOQueue-journal.pdf).
93 * However, the nature of dual queues enables a simpler tactic for
94 * improving M&S-style implementations when dual-ness is needed.
95 *
96 * In a dual queue, each node must atomically maintain its match
97 * status. While there are other possible variants, we implement
98 * this here as: for a data-mode node, matching entails CASing an
99 * "item" field from a non-null data value to null upon match, and
100 * vice-versa for request nodes, CASing from null to a data
101 * value. (Note that the linearization properties of this style of
102 * queue are easy to verify -- elements are made available by
103 * linking, and unavailable by matching.) Compared to plain M&S
104 * queues, this property of dual queues requires one additional
105 * successful atomic operation per enq/deq pair. But it also
106 * enables lower cost variants of queue maintenance mechanics. (A
107 * variation of this idea applies even for non-dual queues that
108 * support deletion of interior elements, such as
109 * j.u.c.ConcurrentLinkedQueue.)
110 *
111 * Once a node is matched, its match status can never again
112 * change. We may thus arrange that the linked list of them
113 * contain a prefix of zero or more matched nodes, followed by a
114 * suffix of zero or more unmatched nodes. (Note that we allow
115 * both the prefix and suffix to be zero length, which in turn
116 * means that we do not use a dummy header.) If we were not
117 * concerned with either time or space efficiency, we could
118 * correctly perform enqueue and dequeue operations by traversing
119 * from a pointer to the initial node; CASing the item of the
120 * first unmatched node on match and CASing the next field of the
121 * trailing node on appends. (Plus some special-casing when
122 * initially empty). While this would be a terrible idea in
123 * itself, it does have the benefit of not requiring ANY atomic
124 * updates on head/tail fields.
125 *
126 * We introduce here an approach that lies between the extremes of
127 * never versus always updating queue (head and tail) pointers.
128 * This offers a tradeoff between sometimes requiring extra
129 * traversal steps to locate the first and/or last unmatched
130 * nodes, versus the reduced overhead and contention of fewer
131 * updates to queue pointers. For example, a possible snapshot of
132 * a queue is:
133 *
134 * head tail
135 * | |
136 * v v
137 * M -> M -> U -> U -> U -> U
138 *
139 * The best value for this "slack" (the targeted maximum distance
140 * between the value of "head" and the first unmatched node, and
141 * similarly for "tail") is an empirical matter. We have found
142 * that using very small constants in the range of 1-3 work best
143 * over a range of platforms. Larger values introduce increasing
144 * costs of cache misses and risks of long traversal chains, while
145 * smaller values increase CAS contention and overhead.
146 *
147 * Dual queues with slack differ from plain M&S dual queues by
148 * virtue of only sometimes updating head or tail pointers when
149 * matching, appending, or even traversing nodes; in order to
150 * maintain a targeted slack. The idea of "sometimes" may be
151 * operationalized in several ways. The simplest is to use a
152 * per-operation counter incremented on each traversal step, and
153 * to try (via CAS) to update the associated queue pointer
154 * whenever the count exceeds a threshold. Another, that requires
155 * more overhead, is to use random number generators to update
156 * with a given probability per traversal step.
157 *
158 * In any strategy along these lines, because CASes updating
159 * fields may fail, the actual slack may exceed targeted
160 * slack. However, they may be retried at any time to maintain
161 * targets. Even when using very small slack values, this
162 * approach works well for dual queues because it allows all
163 * operations up to the point of matching or appending an item
164 * (hence potentially releasing another thread) to be read-only,
165 * thus not introducing any further contention. As described
166 * below, we implement this by performing slack maintenance
167 * retries only after these points.
168 *
169 * As an accompaniment to such techniques, traversal overhead can
170 * be further reduced without increasing contention of head
171 * pointer updates. During traversals, threads may sometimes
172 * shortcut the "next" link path from the current "head" node to
173 * be closer to the currently known first unmatched node. Again,
174 * this may be triggered with using thresholds or randomization.
175 *
176 * These ideas must be further extended to avoid unbounded amounts
177 * of costly-to-reclaim garbage caused by the sequential "next"
178 * links of nodes starting at old forgotten head nodes: As first
179 * described in detail by Boehm
180 * (http://portal.acm.org/citation.cfm?doid=503272.503282) if a GC
181 * delays noticing that any arbitrarily old node has become
182 * garbage, all newer dead nodes will also be unreclaimed.
183 * (Similar issues arise in non-GC environments.) To cope with
184 * this in our implementation, upon CASing to advance the head
185 * pointer, we set the "next" link of the previous head to point
186 * only to itself; thus limiting the length of connected dead lists.
187 * (We also take similar care to wipe out possibly garbage
188 * retaining values held in other Node fields.) However, doing so
189 * adds some further complexity to traversal: If any "next"
190 * pointer links to itself, it indicates that the current thread
191 * has lagged behind a head-update, and so the traversal must
192 * continue from the "head". Traversals trying to find the
193 * current tail starting from "tail" may also encounter
194 * self-links, in which case they also continue at "head".
195 *
196 * It is tempting in slack-based scheme to not even use CAS for
197 * updates (similarly to Ladan-Mozes & Shavit). However, this
198 * cannot be done for head updates under the above link-forgetting
199 * mechanics because an update may leave head at a detached node.
200 * And while direct writes are possible for tail updates, they
201 * increase the risk of long retraversals, and hence long garbage
202 * chains which can be much more costly than is worthwhile
203 * considering that the cost difference of performing a CAS vs
204 * write is smaller when they are not triggered on each operation
205 * (especially considering that writes and CASes equally require
206 * additional GC bookkeeping ("write barriers") that are sometimes
207 * more costly than the writes themselves because of contention).
208 *
209 * Removal of interior nodes (due to timed out or interrupted
210 * waits, or calls to remove or Iterator.remove) uses a scheme
211 * roughly similar to that in Scherer, Lea, and Scott's
212 * SynchronousQueue. Given a predecessor, we can unsplice any node
213 * except the (actual) tail of the queue. To avoid build-up of
214 * cancelled trailing nodes, upon a request to remove a trailing
215 * node, it is placed in field "cleanMe" to be unspliced upon the
216 * next call to unsplice any other node. Situations needing such
217 * mechanics are not common but do occur in practice; for example
218 * when an unbounded series of short timed calls to poll
219 * repeatedly time out but never otherwise fall off the list
220 * because of an untimed call to take at the front of the
221 * queue. (Note that maintaining field cleanMe does not otherwise
222 * much impact garbage retention even if never cleared by some
223 * other call because the held node will eventually either
224 * directly or indirectly lead to a self-link once off the list.)
225 *
226 * *** Overview of implementation ***
227 *
228 * We use a threshold-based approach to updates, with a target
229 * slack of two. The slack value is hard-wired: a path greater
230 * than one is naturally implemented by checking equality of
231 * traversal pointers except when the list has only one element,
232 * in which case we keep target slack at one. Avoiding tracking
233 * explicit counts across method calls slightly simplifies an
234 * already-messy implementation. Using randomization would
235 * probably work better if there were a low-quality dirt-cheap
236 * per-thread one available, but even ThreadLocalRandom is too
237 * heavy for these purposes.
238 *
239 * With such a small target slack value, it is rarely worthwhile
240 * to augment this with path short-circuiting; i.e., unsplicing
241 * nodes between head and the first unmatched node, or similarly
242 * for tail, rather than advancing head or tail proper. However,
243 * it is used (in awaitMatch) immediately before a waiting thread
244 * starts to block, as a final bit of helping at a point when
245 * contention with others is extremely unlikely (since if other
246 * threads that could release it are operating, then the current
247 * thread wouldn't be blocking).
248 *
249 * We allow both the head and tail fields to be null before any
250 * nodes are enqueued; initializing upon first append. This
251 * simplifies some other logic, as well as providing more
252 * efficient explicit control paths instead of letting JVMs insert
253 * implicit NullPointerExceptions when they are null. While not
254 * currently fully implemented, we also leave open the possibility
255 * of re-nulling these fields when empty (which is complicated to
256 * arrange, for little benefit.)
257 *
258 * All enqueue/dequeue operations are handled by the single method
259 * "xfer" with parameters indicating whether to act as some form
260 * of offer, put, poll, take, or transfer (each possibly with
261 * timeout). The relative complexity of using one monolithic
262 * method outweighs the code bulk and maintenance problems of
263 * using nine separate methods.
264 *
265 * Operation consists of up to three phases. The first is
266 * implemented within method xfer, the second in tryAppend, and
267 * the third in method awaitMatch.
268 *
269 * 1. Try to match an existing node
270 *
271 * Starting at head, skip already-matched nodes until finding
272 * an unmatched node of opposite mode, if one exists, in which
273 * case matching it and returning, also if necessary updating
274 * head to one past the matched node (or the node itself if the
275 * list has no other unmatched nodes). If the CAS misses, then
276 * a loop retries advancing head by two steps until either
277 * success or the slack is at most two. By requiring that each
278 * attempt advances head by two (if applicable), we ensure that
279 * the slack does not grow without bound. Traversals also check
280 * if the initial head is now off-list, in which case they
281 * start at the new head.
282 *
283 * If no candidates are found and the call was untimed
284 * poll/offer, (argument "how" is NOW) return.
285 *
286 * 2. Try to append a new node (method tryAppend)
287 *
288 * Starting at current tail pointer, try to append a new node
289 * to the list (or if head was null, establish the first
290 * node). Nodes can be appended only if their predecessors are
291 * either already matched or are of the same mode. If we detect
292 * otherwise, then a new node with opposite mode must have been
293 * appended during traversal, so must restart at phase 1. The
294 * traversal and update steps are otherwise similar to phase 1:
295 * Retrying upon CAS misses and checking for staleness. In
296 * particular, if a self-link is encountered, then we can
297 * safely jump to a node on the list by continuing the
298 * traversal at current head.
299 *
300 * On successful append, if the call was ASYNC, return.
301 *
302 * 3. Await match or cancellation (method awaitMatch)
303 *
304 * Wait for another thread to match node; instead cancelling if
305 * current thread was interrupted or the wait timed out. On
306 * multiprocessors, we use front-of-queue spinning: If a node
307 * appears to be the first unmatched node in the queue, it
308 * spins a bit before blocking. In either case, before blocking
309 * it tries to unsplice any nodes between the current "head"
310 * and the first unmatched node.
311 *
312 * Front-of-queue spinning vastly improves performance of
313 * heavily contended queues. And so long as it is relatively
314 * brief and "quiet", spinning does not much impact performance
315 * of less-contended queues. During spins threads check their
316 * interrupt status and generate a thread-local random number
317 * to decide to occasionally perform a Thread.yield. While
318 * yield has underdefined specs, we assume that might it help,
319 * and will not hurt in limiting impact of spinning on busy
320 * systems. We also use much smaller (1/4) spins for nodes
321 * that are not known to be front but whose predecessors have
322 * not blocked -- these "chained" spins avoid artifacts of
323 * front-of-queue rules which otherwise lead to alternating
324 * nodes spinning vs blocking. Further, front threads that
325 * represent phase changes (from data to request node or vice
326 * versa) compared to their predecessors receive additional
327 * spins, reflecting the longer code path lengths necessary to
328 * release them under contention.
329 */
330
331 /** True if on multiprocessor */
332 private static final boolean MP =
333 Runtime.getRuntime().availableProcessors() > 1;
334
335 /**
336 * The number of times to spin (with on average one randomly
337 * interspersed call to Thread.yield) on multiprocessor before
338 * blocking when a node is apparently the first waiter in the
339 * queue. See above for explanation. Must be a power of two. The
340 * value is empirically derived -- it works pretty well across a
341 * variety of processors, numbers of CPUs, and OSes.
342 */
343 private static final int FRONT_SPINS = 1 << 7;
344
345 /**
346 * The number of times to spin before blocking when a node is
347 * preceded by another node that is apparently spinning.
348 */
349 private static final int CHAINED_SPINS = FRONT_SPINS >>> 2;
350
351 /**
352 * Queue nodes. Uses Object, not E, for items to allow forgetting
353 * them after use. Relies heavily on Unsafe mechanics to minimize
354 * unnecessary ordering constraints: Writes that intrinsically
355 * precede or follow CASes use simple relaxed forms. Other
356 * cleanups use releasing/lazy writes.
357 */
358 static final class Node {
359 final boolean isData; // false if this is a request node
360 volatile Object item; // initially non-null if isData; CASed to match
361 volatile Node next;
362 volatile Thread waiter; // null until waiting
363
364 // CAS methods for fields
365 final boolean casNext(Node cmp, Node val) {
366 return UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val);
367 }
368
369 final boolean casItem(Object cmp, Object val) {
370 return UNSAFE.compareAndSwapObject(this, itemOffset, cmp, val);
371 }
372
373 /**
374 * Creates a new node. Uses relaxed write because item can only
375 * be seen if followed by CAS.
376 */
377 Node(Object item, boolean isData) {
378 UNSAFE.putObject(this, itemOffset, item); // relaxed write
379 this.isData = isData;
380 }
381
382 /**
383 * Links node to itself to avoid garbage retention. Called
384 * only after CASing head field, so uses relaxed write.
385 */
386 final void forgetNext() {
387 UNSAFE.putObject(this, nextOffset, this);
388 }
389
390 /**
391 * Sets item to self (using a releasing/lazy write) and waiter
392 * to null, to avoid garbage retention after extracting or
393 * cancelling.
394 */
395 final void forgetContents() {
396 UNSAFE.putOrderedObject(this, itemOffset, this);
397 UNSAFE.putOrderedObject(this, waiterOffset, null);
398 }
399
400 /**
401 * Returns true if this node has been matched, including the
402 * case of artificial matches due to cancellation.
403 */
404 final boolean isMatched() {
405 Object x = item;
406 return x == this || (x != null) != isData;
407 }
408
409 /**
410 * Returns true if a node with the given mode cannot be
411 * appended to this node because this node is unmatched and
412 * has opposite data mode.
413 */
414 final boolean cannotPrecede(boolean haveData) {
415 boolean d = isData;
416 Object x;
417 return d != haveData && (x = item) != this && (x != null) == d;
418 }
419
420 /**
421 * Tries to artificially match a data node -- used by remove.
422 */
423 final boolean tryMatchData() {
424 Object x = item;
425 if (x != null && x != this && casItem(x, null)) {
426 LockSupport.unpark(waiter);
427 return true;
428 }
429 return false;
430 }
431
432 // Unsafe mechanics
433 private static final sun.misc.Unsafe UNSAFE = getUnsafe();
434 private static final long nextOffset =
435 objectFieldOffset(UNSAFE, "next", Node.class);
436 private static final long itemOffset =
437 objectFieldOffset(UNSAFE, "item", Node.class);
438 private static final long waiterOffset =
439 objectFieldOffset(UNSAFE, "waiter", Node.class);
440
441 private static final long serialVersionUID = -3375979862319811754L;
442 }
443
444 /** head of the queue; null until first enqueue */
445 private transient volatile Node head;
446
447 /** predecessor of dangling unspliceable node */
448 private transient volatile Node cleanMe; // decl here to reduce contention
449
450 /** tail of the queue; null until first append */
451 private transient volatile Node tail;
452
453 // CAS methods for fields
454 private boolean casTail(Node cmp, Node val) {
455 return UNSAFE.compareAndSwapObject(this, tailOffset, cmp, val);
456 }
457
458 private boolean casHead(Node cmp, Node val) {
459 return UNSAFE.compareAndSwapObject(this, headOffset, cmp, val);
460 }
461
462 private boolean casCleanMe(Node cmp, Node val) {
463 return UNSAFE.compareAndSwapObject(this, cleanMeOffset, cmp, val);
464 }
465
466 /*
467 * Possible values for "how" argument in xfer method. Beware that
468 * the order of assigned numerical values matters.
469 */
470 private static final int NOW = 0; // for untimed poll, tryTransfer
471 private static final int ASYNC = 1; // for offer, put, add
472 private static final int SYNC = 2; // for transfer, take
473 private static final int TIMEOUT = 3; // for timed poll, tryTransfer
474
475 /**
476 * Implements all queuing methods. See above for explanation.
477 *
478 * @param e the item or null for take
479 * @param haveData true if this is a put, else a take
480 * @param how NOW, ASYNC, SYNC, or TIMEOUT
481 * @param nanos timeout in nanosecs, used only if mode is TIMEOUT
482 * @return an item if matched, else e
483 * @throws NullPointerException if haveData mode but e is null
484 */
485 private Object xfer(Object e, boolean haveData, int how, long nanos) {
486 if (haveData && (e == null))
487 throw new NullPointerException();
488 Node s = null; // the node to append, if needed
489
490 retry: for (;;) { // restart on append race
491
492 for (Node h = head, p = h; p != null;) { // find & match first node
493 boolean isData = p.isData;
494 Object item = p.item;
495 if (item != p && (item != null) == isData) { // unmatched
496 if (isData == haveData) // can't match
497 break;
498 if (p.casItem(item, e)) { // match
499 Thread w = p.waiter;
500 while (p != h) { // update head
501 Node n = p.next; // by 2 unless singleton
502 if (n != null)
503 p = n;
504 if (head == h && casHead(h, p)) {
505 h.forgetNext();
506 break;
507 } // advance and retry
508 if ((h = head) == null ||
509 (p = h.next) == null || !p.isMatched())
510 break; // unless slack < 2
511 }
512 LockSupport.unpark(w);
513 return item;
514 }
515 }
516 Node n = p.next;
517 p = (p != n) ? n : (h = head); // Use head if p offlist
518 }
519
520 if (how >= ASYNC) { // No matches available
521 if (s == null)
522 s = new Node(e, haveData);
523 Node pred = tryAppend(s, haveData);
524 if (pred == null)
525 continue retry; // lost race vs opposite mode
526 if (how >= SYNC)
527 return awaitMatch(pred, s, e, how, nanos);
528 }
529 return e; // not waiting
530 }
531 }
532
533 /**
534 * Tries to append node s as tail.
535 *
536 * @param s the node to append
537 * @param haveData true if appending in data mode
538 * @return null on failure due to losing race with append in
539 * different mode, else s's predecessor, or s itself if no
540 * predecessor
541 */
542 private Node tryAppend(Node s, boolean haveData) {
543 for (Node t = tail, p = t;;) { // move p to last node and append
544 Node n, u; // temps for reads of next & tail
545 if (p == null && (p = head) == null) {
546 if (casHead(null, s))
547 return s; // initialize
548 }
549 else if (p.cannotPrecede(haveData))
550 return null; // lost race vs opposite mode
551 else if ((n = p.next) != null) // not last; keep traversing
552 p = p != t && t != (u = tail) ? (t = u) : // stale tail
553 (p != n) ? n : null; // restart if off list
554 else if (!p.casNext(null, s))
555 p = p.next; // re-read on CAS failure
556 else {
557 if (p != t) { // update if slack now >= 2
558 while ((tail != t || !casTail(t, s)) &&
559 (t = tail) != null &&
560 (s = t.next) != null && // advance and retry
561 (s = s.next) != null && s != t);
562 }
563 return p;
564 }
565 }
566 }
567
568 /**
569 * Spins/yields/blocks until node s is matched or caller gives up.
570 *
571 * @param pred the predecessor of s, or s or null if none
572 * @param s the waiting node
573 * @param e the comparison value for checking match
574 * @param how either SYNC or TIMEOUT
575 * @param nanos timeout value
576 * @return matched item, or e if unmatched on interrupt or timeout
577 */
578 private Object awaitMatch(Node pred, Node s, Object e,
579 int how, long nanos) {
580 long lastTime = (how == TIMEOUT) ? System.nanoTime() : 0L;
581 Thread w = Thread.currentThread();
582 int spins = -1; // initialized after first item and cancel checks
583 ThreadLocalRandom randomYields = null; // bound if needed
584
585 for (;;) {
586 Object item = s.item;
587 if (item != e) { // matched
588 s.forgetContents(); // avoid garbage
589 return item;
590 }
591 if ((w.isInterrupted() || (how == TIMEOUT && nanos <= 0)) &&
592 s.casItem(e, s)) { // cancel
593 unsplice(pred, s);
594 return e;
595 }
596
597 if (spins < 0) { // establish spins at/near front
598 if ((spins = spinsFor(pred, s.isData)) > 0)
599 randomYields = ThreadLocalRandom.current();
600 }
601 else if (spins > 0) { // spin, occasionally yield
602 if (randomYields.nextInt(FRONT_SPINS) == 0)
603 Thread.yield();
604 --spins;
605 }
606 else if (s.waiter == null) {
607 shortenHeadPath(); // reduce slack before blocking
608 s.waiter = w; // request unpark
609 }
610 else if (how == TIMEOUT) {
611 long now = System.nanoTime();
612 if ((nanos -= now - lastTime) > 0)
613 LockSupport.parkNanos(this, nanos);
614 lastTime = now;
615 }
616 else {
617 LockSupport.park(this);
618 spins = -1; // spin if front upon wakeup
619 }
620 }
621 }
622
623 /**
624 * Returns spin/yield value for a node with given predecessor and
625 * data mode. See above for explanation.
626 */
627 private static int spinsFor(Node pred, boolean haveData) {
628 if (MP && pred != null) {
629 boolean predData = pred.isData;
630 if (predData != haveData) // front and phase change
631 return FRONT_SPINS + (FRONT_SPINS >>> 1);
632 if (predData != (pred.item != null)) // probably at front
633 return FRONT_SPINS;
634 if (pred.waiter == null) // pred apparently spinning
635 return CHAINED_SPINS;
636 }
637 return 0;
638 }
639
640 /**
641 * Tries (once) to unsplice nodes between head and first unmatched
642 * or trailing node; failing on contention.
643 */
644 private void shortenHeadPath() {
645 Node h, hn, p, q;
646 if ((p = h = head) != null && h.isMatched() &&
647 (q = hn = h.next) != null) {
648 Node n;
649 while ((n = q.next) != q) {
650 if (n == null || !q.isMatched()) {
651 if (hn != q && h.next == hn)
652 h.casNext(hn, q);
653 break;
654 }
655 p = q;
656 q = n;
657 }
658 }
659 }
660
661 /* -------------- Traversal methods -------------- */
662
663 /**
664 * Returns the first unmatched node of the given mode, or null if
665 * none. Used by methods isEmpty, hasWaitingConsumer.
666 */
667 private Node firstOfMode(boolean data) {
668 for (Node p = head; p != null; ) {
669 if (!p.isMatched())
670 return (p.isData == data) ? p : null;
671 Node n = p.next;
672 p = (n != p) ? n : head;
673 }
674 return null;
675 }
676
677 /**
678 * Returns the item in the first unmatched node with isData; or
679 * null if none. Used by peek.
680 */
681 private Object firstDataItem() {
682 for (Node p = head; p != null; ) {
683 boolean isData = p.isData;
684 Object item = p.item;
685 if (item != p && (item != null) == isData)
686 return isData ? item : null;
687 Node n = p.next;
688 p = (n != p) ? n : head;
689 }
690 return null;
691 }
692
693 /**
694 * Traverses and counts unmatched nodes of the given mode.
695 * Used by methods size and getWaitingConsumerCount.
696 */
697 private int countOfMode(boolean data) {
698 int count = 0;
699 for (Node p = head; p != null; ) {
700 if (!p.isMatched()) {
701 if (p.isData != data)
702 return 0;
703 if (++count == Integer.MAX_VALUE) // saturated
704 break;
705 }
706 Node n = p.next;
707 if (n != p)
708 p = n;
709 else {
710 count = 0;
711 p = head;
712 }
713 }
714 return count;
715 }
716
717 final class Itr implements Iterator<E> {
718 private Node nextNode; // next node to return item for
719 private Object nextItem; // the corresponding item
720 private Node lastRet; // last returned node, to support remove
721
722 /**
723 * Moves to next node after prev, or first node if prev null.
724 */
725 private void advance(Node prev) {
726 lastRet = prev;
727 Node p;
728 if (prev == null || (p = prev.next) == prev)
729 p = head;
730 while (p != null) {
731 Object item = p.item;
732 if (p.isData) {
733 if (item != null && item != p) {
734 nextItem = item;
735 nextNode = p;
736 return;
737 }
738 }
739 else if (item == null)
740 break;
741 Node n = p.next;
742 p = (n != p) ? n : head;
743 }
744 nextNode = null;
745 }
746
747 Itr() {
748 advance(null);
749 }
750
751 public final boolean hasNext() {
752 return nextNode != null;
753 }
754
755 public final E next() {
756 Node p = nextNode;
757 if (p == null) throw new NoSuchElementException();
758 Object e = nextItem;
759 advance(p);
760 return (E) e;
761 }
762
763 public final void remove() {
764 Node p = lastRet;
765 if (p == null) throw new IllegalStateException();
766 lastRet = null;
767 findAndRemoveNode(p);
768 }
769 }
770
771 /* -------------- Removal methods -------------- */
772
773 /**
774 * Unsplices (now or later) the given deleted/cancelled node with
775 * the given predecessor.
776 *
777 * @param pred predecessor of node to be unspliced
778 * @param s the node to be unspliced
779 */
780 private void unsplice(Node pred, Node s) {
781 s.forgetContents(); // clear unneeded fields
782 /*
783 * At any given time, exactly one node on list cannot be
784 * unlinked -- the last inserted node. To accommodate this, if
785 * we cannot unlink s, we save its predecessor as "cleanMe",
786 * processing the previously saved version first. Because only
787 * one node in the list can have a null next, at least one of
788 * node s or the node previously saved can always be
789 * processed, so this always terminates.
790 */
791 if (pred != null && pred != s) {
792 while (pred.next == s) {
793 Node oldpred = (cleanMe == null) ? null : reclean();
794 Node n = s.next;
795 if (n != null) {
796 if (n != s)
797 pred.casNext(s, n);
798 break;
799 }
800 if (oldpred == pred || // Already saved
801 (oldpred == null && casCleanMe(null, pred)))
802 break; // Postpone cleaning
803 }
804 }
805 }
806
807 /**
808 * Tries to unsplice the deleted/cancelled node held in cleanMe
809 * that was previously uncleanable because it was at tail.
810 *
811 * @return current cleanMe node (or null)
812 */
813 private Node reclean() {
814 /*
815 * cleanMe is, or at one time was, predecessor of a cancelled
816 * node s that was the tail so could not be unspliced. If it
817 * is no longer the tail, try to unsplice if necessary and
818 * make cleanMe slot available. This differs from similar
819 * code in unsplice() because we must check that pred still
820 * points to a matched node that can be unspliced -- if not,
821 * we can (must) clear cleanMe without unsplicing. This can
822 * loop only due to contention.
823 */
824 Node pred;
825 while ((pred = cleanMe) != null) {
826 Node s = pred.next;
827 Node n;
828 if (s == null || s == pred || !s.isMatched())
829 casCleanMe(pred, null); // already gone
830 else if ((n = s.next) != null) {
831 if (n != s)
832 pred.casNext(s, n);
833 casCleanMe(pred, null);
834 }
835 else
836 break;
837 }
838 return pred;
839 }
840
841 /**
842 * Main implementation of Iterator.remove(). Find
843 * and unsplice the given node.
844 */
845 final void findAndRemoveNode(Node s) {
846 if (s.tryMatchData()) {
847 Node pred = null;
848 Node p = head;
849 while (p != null) {
850 if (p == s) {
851 unsplice(pred, p);
852 break;
853 }
854 if (!p.isData && !p.isMatched())
855 break;
856 pred = p;
857 if ((p = p.next) == pred) { // stale
858 pred = null;
859 p = head;
860 }
861 }
862 }
863 }
864
865 /**
866 * Main implementation of remove(Object)
867 */
868 private boolean findAndRemove(Object e) {
869 if (e != null) {
870 Node pred = null;
871 Node p = head;
872 while (p != null) {
873 Object item = p.item;
874 if (p.isData) {
875 if (item != null && item != p && e.equals(item) &&
876 p.tryMatchData()) {
877 unsplice(pred, p);
878 return true;
879 }
880 }
881 else if (item == null)
882 break;
883 pred = p;
884 if ((p = p.next) == pred) {
885 pred = null;
886 p = head;
887 }
888 }
889 }
890 return false;
891 }
892
893
894 /**
895 * Creates an initially empty {@code LinkedTransferQueue}.
896 */
897 public LinkedTransferQueue() {
898 }
899
900 /**
901 * Creates a {@code LinkedTransferQueue}
902 * initially containing the elements of the given collection,
903 * added in traversal order of the collection's iterator.
904 *
905 * @param c the collection of elements to initially contain
906 * @throws NullPointerException if the specified collection or any
907 * of its elements are null
908 */
909 public LinkedTransferQueue(Collection<? extends E> c) {
910 this();
911 addAll(c);
912 }
913
914 /**
915 * Inserts the specified element at the tail of this queue.
916 * As the queue is unbounded, this method will never block.
917 *
918 * @throws NullPointerException if the specified element is null
919 */
920 public void put(E e) {
921 xfer(e, true, ASYNC, 0);
922 }
923
924 /**
925 * Inserts the specified element at the tail of this queue.
926 * As the queue is unbounded, this method will never block or
927 * return {@code false}.
928 *
929 * @return {@code true} (as specified by
930 * {@link BlockingQueue#offer(Object,long,TimeUnit) BlockingQueue.offer})
931 * @throws NullPointerException if the specified element is null
932 */
933 public boolean offer(E e, long timeout, TimeUnit unit) {
934 xfer(e, true, ASYNC, 0);
935 return true;
936 }
937
938 /**
939 * Inserts the specified element at the tail of this queue.
940 * As the queue is unbounded, this method will never return {@code false}.
941 *
942 * @return {@code true} (as specified by
943 * {@link BlockingQueue#offer(Object) BlockingQueue.offer})
944 * @throws NullPointerException if the specified element is null
945 */
946 public boolean offer(E e) {
947 xfer(e, true, ASYNC, 0);
948 return true;
949 }
950
951 /**
952 * Inserts the specified element at the tail of this queue.
953 * As the queue is unbounded, this method will never throw
954 * {@link IllegalStateException} or return {@code false}.
955 *
956 * @return {@code true} (as specified by {@link Collection#add})
957 * @throws NullPointerException if the specified element is null
958 */
959 public boolean add(E e) {
960 xfer(e, true, ASYNC, 0);
961 return true;
962 }
963
964 /**
965 * Transfers the element to a waiting consumer immediately, if possible.
966 *
967 * <p>More precisely, transfers the specified element immediately
968 * if there exists a consumer already waiting to receive it (in
969 * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
970 * otherwise returning {@code false} without enqueuing the element.
971 *
972 * @throws NullPointerException if the specified element is null
973 */
974 public boolean tryTransfer(E e) {
975 return xfer(e, true, NOW, 0) == null;
976 }
977
978 /**
979 * Transfers the element to a consumer, waiting if necessary to do so.
980 *
981 * <p>More precisely, transfers the specified element immediately
982 * if there exists a consumer already waiting to receive it (in
983 * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
984 * else inserts the specified element at the tail of this queue
985 * and waits until the element is received by a consumer.
986 *
987 * @throws NullPointerException if the specified element is null
988 */
989 public void transfer(E e) throws InterruptedException {
990 if (xfer(e, true, SYNC, 0) != null) {
991 Thread.interrupted(); // failure possible only due to interrupt
992 throw new InterruptedException();
993 }
994 }
995
996 /**
997 * Transfers the element to a consumer if it is possible to do so
998 * before the timeout elapses.
999 *
1000 * <p>More precisely, transfers the specified element immediately
1001 * if there exists a consumer already waiting to receive it (in
1002 * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
1003 * else inserts the specified element at the tail of this queue
1004 * and waits until the element is received by a consumer,
1005 * returning {@code false} if the specified wait time elapses
1006 * before the element can be transferred.
1007 *
1008 * @throws NullPointerException if the specified element is null
1009 */
1010 public boolean tryTransfer(E e, long timeout, TimeUnit unit)
1011 throws InterruptedException {
1012 if (xfer(e, true, TIMEOUT, unit.toNanos(timeout)) == null)
1013 return true;
1014 if (!Thread.interrupted())
1015 return false;
1016 throw new InterruptedException();
1017 }
1018
1019 public E take() throws InterruptedException {
1020 Object e = xfer(null, false, SYNC, 0);
1021 if (e != null)
1022 return (E)e;
1023 Thread.interrupted();
1024 throw new InterruptedException();
1025 }
1026
1027 public E poll(long timeout, TimeUnit unit) throws InterruptedException {
1028 Object e = xfer(null, false, TIMEOUT, unit.toNanos(timeout));
1029 if (e != null || !Thread.interrupted())
1030 return (E)e;
1031 throw new InterruptedException();
1032 }
1033
1034 public E poll() {
1035 return (E)xfer(null, false, NOW, 0);
1036 }
1037
1038 /**
1039 * @throws NullPointerException {@inheritDoc}
1040 * @throws IllegalArgumentException {@inheritDoc}
1041 */
1042 public int drainTo(Collection<? super E> c) {
1043 if (c == null)
1044 throw new NullPointerException();
1045 if (c == this)
1046 throw new IllegalArgumentException();
1047 int n = 0;
1048 E e;
1049 while ( (e = poll()) != null) {
1050 c.add(e);
1051 ++n;
1052 }
1053 return n;
1054 }
1055
1056 /**
1057 * @throws NullPointerException {@inheritDoc}
1058 * @throws IllegalArgumentException {@inheritDoc}
1059 */
1060 public int drainTo(Collection<? super E> c, int maxElements) {
1061 if (c == null)
1062 throw new NullPointerException();
1063 if (c == this)
1064 throw new IllegalArgumentException();
1065 int n = 0;
1066 E e;
1067 while (n < maxElements && (e = poll()) != null) {
1068 c.add(e);
1069 ++n;
1070 }
1071 return n;
1072 }
1073
1074 /**
1075 * Returns an iterator over the elements in this queue in proper
1076 * sequence, from head to tail.
1077 *
1078 * <p>The returned iterator is a "weakly consistent" iterator that
1079 * will never throw
1080 * {@link ConcurrentModificationException ConcurrentModificationException},
1081 * and guarantees to traverse elements as they existed upon
1082 * construction of the iterator, and may (but is not guaranteed
1083 * to) reflect any modifications subsequent to construction.
1084 *
1085 * @return an iterator over the elements in this queue in proper sequence
1086 */
1087 public Iterator<E> iterator() {
1088 return new Itr();
1089 }
1090
1091 public E peek() {
1092 return (E) firstDataItem();
1093 }
1094
1095 /**
1096 * Returns {@code true} if this queue contains no elements.
1097 *
1098 * @return {@code true} if this queue contains no elements
1099 */
1100 public boolean isEmpty() {
1101 return firstOfMode(true) == null;
1102 }
1103
1104 public boolean hasWaitingConsumer() {
1105 return firstOfMode(false) != null;
1106 }
1107
1108 /**
1109 * Returns the number of elements in this queue. If this queue
1110 * contains more than {@code Integer.MAX_VALUE} elements, returns
1111 * {@code Integer.MAX_VALUE}.
1112 *
1113 * <p>Beware that, unlike in most collections, this method is
1114 * <em>NOT</em> a constant-time operation. Because of the
1115 * asynchronous nature of these queues, determining the current
1116 * number of elements requires an O(n) traversal.
1117 *
1118 * @return the number of elements in this queue
1119 */
1120 public int size() {
1121 return countOfMode(true);
1122 }
1123
1124 public int getWaitingConsumerCount() {
1125 return countOfMode(false);
1126 }
1127
1128 /**
1129 * Removes a single instance of the specified element from this queue,
1130 * if it is present. More formally, removes an element {@code e} such
1131 * that {@code o.equals(e)}, if this queue contains one or more such
1132 * elements.
1133 * Returns {@code true} if this queue contained the specified element
1134 * (or equivalently, if this queue changed as a result of the call).
1135 *
1136 * @param o element to be removed from this queue, if present
1137 * @return {@code true} if this queue changed as a result of the call
1138 */
1139 public boolean remove(Object o) {
1140 return findAndRemove(o);
1141 }
1142
1143 /**
1144 * Always returns {@code Integer.MAX_VALUE} because a
1145 * {@code LinkedTransferQueue} is not capacity constrained.
1146 *
1147 * @return {@code Integer.MAX_VALUE} (as specified by
1148 * {@link BlockingQueue#remainingCapacity()})
1149 */
1150 public int remainingCapacity() {
1151 return Integer.MAX_VALUE;
1152 }
1153
1154 /**
1155 * Saves the state to a stream (that is, serializes it).
1156 *
1157 * @serialData All of the elements (each an {@code E}) in
1158 * the proper order, followed by a null
1159 * @param s the stream
1160 */
1161 private void writeObject(java.io.ObjectOutputStream s)
1162 throws java.io.IOException {
1163 s.defaultWriteObject();
1164 for (E e : this)
1165 s.writeObject(e);
1166 // Use trailing null as sentinel
1167 s.writeObject(null);
1168 }
1169
1170 /**
1171 * Reconstitutes the Queue instance from a stream (that is,
1172 * deserializes it).
1173 *
1174 * @param s the stream
1175 */
1176 private void readObject(java.io.ObjectInputStream s)
1177 throws java.io.IOException, ClassNotFoundException {
1178 s.defaultReadObject();
1179 for (;;) {
1180 @SuppressWarnings("unchecked") E item = (E) s.readObject();
1181 if (item == null)
1182 break;
1183 else
1184 offer(item);
1185 }
1186 }
1187
1188
1189 // Unsafe mechanics
1190
1191 private static final sun.misc.Unsafe UNSAFE = getUnsafe();
1192 private static final long headOffset =
1193 objectFieldOffset(UNSAFE, "head", LinkedTransferQueue.class);
1194 private static final long tailOffset =
1195 objectFieldOffset(UNSAFE, "tail", LinkedTransferQueue.class);
1196 private static final long cleanMeOffset =
1197 objectFieldOffset(UNSAFE, "cleanMe", LinkedTransferQueue.class);
1198
1199 static long objectFieldOffset(sun.misc.Unsafe UNSAFE,
1200 String field, Class<?> klazz) {
1201 try {
1202 return UNSAFE.objectFieldOffset(klazz.getDeclaredField(field));
1203 } catch (NoSuchFieldException e) {
1204 // Convert Exception to corresponding Error
1205 NoSuchFieldError error = new NoSuchFieldError(field);
1206 error.initCause(e);
1207 throw error;
1208 }
1209 }
1210
1211 private static sun.misc.Unsafe getUnsafe() {
1212 try {
1213 return sun.misc.Unsafe.getUnsafe();
1214 } catch (SecurityException se) {
1215 try {
1216 return java.security.AccessController.doPrivileged
1217 (new java.security
1218 .PrivilegedExceptionAction<sun.misc.Unsafe>() {
1219 public sun.misc.Unsafe run() throws Exception {
1220 java.lang.reflect.Field f = sun.misc
1221 .Unsafe.class.getDeclaredField("theUnsafe");
1222 f.setAccessible(true);
1223 return (sun.misc.Unsafe) f.get(null);
1224 }});
1225 } catch (java.security.PrivilegedActionException e) {
1226 throw new RuntimeException("Could not initialize intrinsics",
1227 e.getCause());
1228 }
1229 }
1230 }
1231
1232 }