ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/jsr166y/LinkedTransferQueue.java
Revision: 1.52
Committed: Sat Oct 24 14:57:32 2009 UTC (14 years, 6 months ago) by dl
Branch: MAIN
Changes since 1.51: +7 -7 lines
Log Message:
Better version of last change

File Contents

# Content
1 /*
2 * Written by Doug Lea with assistance from members of JCP JSR-166
3 * Expert Group and released to the public domain, as explained at
4 * http://creativecommons.org/licenses/publicdomain
5 */
6
7 package jsr166y;
8
9 import java.util.concurrent.*;
10
11 import java.util.AbstractQueue;
12 import java.util.Collection;
13 import java.util.ConcurrentModificationException;
14 import java.util.Iterator;
15 import java.util.NoSuchElementException;
16 import java.util.Queue;
17 import java.util.concurrent.locks.LockSupport;
18 /**
19 * An unbounded {@link TransferQueue} based on linked nodes.
20 * This queue orders elements FIFO (first-in-first-out) with respect
21 * to any given producer. The <em>head</em> of the queue is that
22 * element that has been on the queue the longest time for some
23 * producer. The <em>tail</em> of the queue is that element that has
24 * been on the queue the shortest time for some producer.
25 *
26 * <p>Beware that, unlike in most collections, the {@code size}
27 * method is <em>NOT</em> a constant-time operation. Because of the
28 * asynchronous nature of these queues, determining the current number
29 * of elements requires a traversal of the elements.
30 *
31 * <p>This class and its iterator implement all of the
32 * <em>optional</em> methods of the {@link Collection} and {@link
33 * Iterator} interfaces.
34 *
35 * <p>Memory consistency effects: As with other concurrent
36 * collections, actions in a thread prior to placing an object into a
37 * {@code LinkedTransferQueue}
38 * <a href="package-summary.html#MemoryVisibility"><i>happen-before</i></a>
39 * actions subsequent to the access or removal of that element from
40 * the {@code LinkedTransferQueue} in another thread.
41 *
42 * <p>This class is a member of the
43 * <a href="{@docRoot}/../technotes/guides/collections/index.html">
44 * Java Collections Framework</a>.
45 *
46 * @since 1.7
47 * @author Doug Lea
48 * @param <E> the type of elements held in this collection
49 */
50 public class LinkedTransferQueue<E> extends AbstractQueue<E>
51 implements TransferQueue<E>, java.io.Serializable {
52 private static final long serialVersionUID = -3223113410248163686L;
53
54 /*
55 * *** Overview of Dual Queues with Slack ***
56 *
57 * Dual Queues, introduced by Scherer and Scott
58 * (http://www.cs.rice.edu/~wns1/papers/2004-DISC-DDS.pdf) are
59 * (linked) queues in which nodes may represent either data or
60 * requests. When a thread tries to enqueue a data node, but
61 * encounters a request node, it instead "matches" and removes it;
62 * and vice versa for enqueuing requests. Blocking Dual Queues
63 * arrange that threads enqueuing unmatched requests block until
64 * other threads provide the match. Dual Synchronous Queues (see
65 * Scherer, Lea, & Scott
66 * http://www.cs.rochester.edu/u/scott/papers/2009_Scherer_CACM_SSQ.pdf)
67 * additionally arrange that threads enqueuing unmatched data also
68 * block. Dual Transfer Queues support all of these modes, as
69 * dictated by callers.
70 *
71 * A FIFO dual queue may be implemented using a variation of the
72 * Michael & Scott (M&S) lock-free queue algorithm
73 * (http://www.cs.rochester.edu/u/scott/papers/1996_PODC_queues.pdf).
74 * It maintains two pointer fields, "head", pointing to a
75 * (matched) node that in turn points to the first actual
76 * (unmatched) queue node (or null if empty); and "tail" that
77 * points to the last node on the queue (or again null if
78 * empty). For example, here is a possible queue with four data
79 * elements:
80 *
81 * head tail
82 * | |
83 * v v
84 * M -> U -> U -> U -> U
85 *
86 * The M&S queue algorithm is known to be prone to scalability and
87 * overhead limitations when maintaining (via CAS) these head and
88 * tail pointers. This has led to the development of
89 * contention-reducing variants such as elimination arrays (see
90 * Moir et al http://portal.acm.org/citation.cfm?id=1074013) and
91 * optimistic back pointers (see Ladan-Mozes & Shavit
92 * http://people.csail.mit.edu/edya/publications/OptimisticFIFOQueue-journal.pdf).
93 * However, the nature of dual queues enables a simpler tactic for
94 * improving M&S-style implementations when dual-ness is needed.
95 *
96 * In a dual queue, each node must atomically maintain its match
97 * status. While there are other possible variants, we implement
98 * this here as: for a data-mode node, matching entails CASing an
99 * "item" field from a non-null data value to null upon match, and
100 * vice-versa for request nodes, CASing from null to a data
101 * value. (Note that the linearization properties of this style of
102 * queue are easy to verify -- elements are made available by
103 * linking, and unavailable by matching.) Compared to plain M&S
104 * queues, this property of dual queues requires one additional
105 * successful atomic operation per enq/deq pair. But it also
106 * enables lower cost variants of queue maintenance mechanics. (A
107 * variation of this idea applies even for non-dual queues that
108 * support deletion of interior elements, such as
109 * j.u.c.ConcurrentLinkedQueue.)
110 *
111 * Once a node is matched, its match status can never again
112 * change. We may thus arrange that the linked list of them
113 * contain a prefix of zero or more matched nodes, followed by a
114 * suffix of zero or more unmatched nodes. (Note that we allow
115 * both the prefix and suffix to be zero length, which in turn
116 * means that we do not use a dummy header.) If we were not
117 * concerned with either time or space efficiency, we could
118 * correctly perform enqueue and dequeue operations by traversing
119 * from a pointer to the initial node; CASing the item of the
120 * first unmatched node on match and CASing the next field of the
121 * trailing node on appends. (Plus some special-casing when
122 * initially empty). While this would be a terrible idea in
123 * itself, it does have the benefit of not requiring ANY atomic
124 * updates on head/tail fields.
125 *
126 * We introduce here an approach that lies between the extremes of
127 * never versus always updating queue (head and tail) pointers.
128 * This offers a tradeoff between sometimes requiring extra
129 * traversal steps to locate the first and/or last unmatched
130 * nodes, versus the reduced overhead and contention of fewer
131 * updates to queue pointers. For example, a possible snapshot of
132 * a queue is:
133 *
134 * head tail
135 * | |
136 * v v
137 * M -> M -> U -> U -> U -> U
138 *
139 * The best value for this "slack" (the targeted maximum distance
140 * between the value of "head" and the first unmatched node, and
141 * similarly for "tail") is an empirical matter. We have found
142 * that using very small constants in the range of 1-3 work best
143 * over a range of platforms. Larger values introduce increasing
144 * costs of cache misses and risks of long traversal chains, while
145 * smaller values increase CAS contention and overhead.
146 *
147 * Dual queues with slack differ from plain M&S dual queues by
148 * virtue of only sometimes updating head or tail pointers when
149 * matching, appending, or even traversing nodes; in order to
150 * maintain a targeted slack. The idea of "sometimes" may be
151 * operationalized in several ways. The simplest is to use a
152 * per-operation counter incremented on each traversal step, and
153 * to try (via CAS) to update the associated queue pointer
154 * whenever the count exceeds a threshold. Another, that requires
155 * more overhead, is to use random number generators to update
156 * with a given probability per traversal step.
157 *
158 * In any strategy along these lines, because CASes updating
159 * fields may fail, the actual slack may exceed targeted
160 * slack. However, they may be retried at any time to maintain
161 * targets. Even when using very small slack values, this
162 * approach works well for dual queues because it allows all
163 * operations up to the point of matching or appending an item
164 * (hence potentially allowing progress by another thread) to be
165 * read-only, thus not introducing any further contention. As
166 * described below, we implement this by performing slack
167 * maintenance retries only after these points.
168 *
169 * As an accompaniment to such techniques, traversal overhead can
170 * be further reduced without increasing contention of head
171 * pointer updates: Threads may sometimes shortcut the "next" link
172 * path from the current "head" node to be closer to the currently
173 * known first unmatched node, and similarly for tail. Again, this
174 * may be triggered with using thresholds or randomization.
175 *
176 * These ideas must be further extended to avoid unbounded amounts
177 * of costly-to-reclaim garbage caused by the sequential "next"
178 * links of nodes starting at old forgotten head nodes: As first
179 * described in detail by Boehm
180 * (http://portal.acm.org/citation.cfm?doid=503272.503282) if a GC
181 * delays noticing that any arbitrarily old node has become
182 * garbage, all newer dead nodes will also be unreclaimed.
183 * (Similar issues arise in non-GC environments.) To cope with
184 * this in our implementation, upon CASing to advance the head
185 * pointer, we set the "next" link of the previous head to point
186 * only to itself; thus limiting the length of connected dead lists.
187 * (We also take similar care to wipe out possibly garbage
188 * retaining values held in other Node fields.) However, doing so
189 * adds some further complexity to traversal: If any "next"
190 * pointer links to itself, it indicates that the current thread
191 * has lagged behind a head-update, and so the traversal must
192 * continue from the "head". Traversals trying to find the
193 * current tail starting from "tail" may also encounter
194 * self-links, in which case they also continue at "head".
195 *
196 * It is tempting in slack-based scheme to not even use CAS for
197 * updates (similarly to Ladan-Mozes & Shavit). However, this
198 * cannot be done for head updates under the above link-forgetting
199 * mechanics because an update may leave head at a detached node.
200 * And while direct writes are possible for tail updates, they
201 * increase the risk of long retraversals, and hence long garbage
202 * chains, which can be much more costly than is worthwhile
203 * considering that the cost difference of performing a CAS vs
204 * write is smaller when they are not triggered on each operation
205 * (especially considering that writes and CASes equally require
206 * additional GC bookkeeping ("write barriers") that are sometimes
207 * more costly than the writes themselves because of contention).
208 *
209 * Removal of interior nodes (due to timed out or interrupted
210 * waits, or calls to remove(x) or Iterator.remove) can use a
211 * scheme roughly similar to that described in Scherer, Lea, and
212 * Scott's SynchronousQueue. Given a predecessor, we can unsplice
213 * any node except the (actual) tail of the queue. To avoid
214 * build-up of cancelled trailing nodes, upon a request to remove
215 * a trailing node, it is placed in field "cleanMe" to be
216 * unspliced upon the next call to unsplice any other node.
217 * Situations needing such mechanics are not common but do occur
218 * in practice; for example when an unbounded series of short
219 * timed calls to poll repeatedly time out but never otherwise
220 * fall off the list because of an untimed call to take at the
221 * front of the queue. Note that maintaining field cleanMe does
222 * not otherwise much impact garbage retention even if never
223 * cleared by some other call because the held node will
224 * eventually either directly or indirectly lead to a self-link
225 * once off the list.
226 *
227 * *** Overview of implementation ***
228 *
229 * We use a threshold-based approach to updates, with a slack
230 * threshold of two -- that is, we update head/tail when the
231 * current pointer appears to be two or more steps away from the
232 * first/last node. The slack value is hard-wired: a path greater
233 * than one is naturally implemented by checking equality of
234 * traversal pointers except when the list has only one element,
235 * in which case we keep slack threshold at one. Avoiding tracking
236 * explicit counts across method calls slightly simplifies an
237 * already-messy implementation. Using randomization would
238 * probably work better if there were a low-quality dirt-cheap
239 * per-thread one available, but even ThreadLocalRandom is too
240 * heavy for these purposes.
241 *
242 * With such a small slack threshold value, it is rarely
243 * worthwhile to augment this with path short-circuiting; i.e.,
244 * unsplicing nodes between head and the first unmatched node, or
245 * similarly for tail, rather than advancing head or tail
246 * proper. However, it is used (in awaitMatch) immediately before
247 * a waiting thread starts to block, as a final bit of helping at
248 * a point when contention with others is extremely unlikely
249 * (since if other threads that could release it are operating,
250 * then the current thread wouldn't be blocking).
251 *
252 * We allow both the head and tail fields to be null before any
253 * nodes are enqueued; initializing upon first append. This
254 * simplifies some other logic, as well as providing more
255 * efficient explicit control paths instead of letting JVMs insert
256 * implicit NullPointerExceptions when they are null. While not
257 * currently fully implemented, we also leave open the possibility
258 * of re-nulling these fields when empty (which is complicated to
259 * arrange, for little benefit.)
260 *
261 * All enqueue/dequeue operations are handled by the single method
262 * "xfer" with parameters indicating whether to act as some form
263 * of offer, put, poll, take, or transfer (each possibly with
264 * timeout). The relative complexity of using one monolithic
265 * method outweighs the code bulk and maintenance problems of
266 * using separate methods for each case.
267 *
268 * Operation consists of up to three phases. The first is
269 * implemented within method xfer, the second in tryAppend, and
270 * the third in method awaitMatch.
271 *
272 * 1. Try to match an existing node
273 *
274 * Starting at head, skip already-matched nodes until finding
275 * an unmatched node of opposite mode, if one exists, in which
276 * case matching it and returning, also if necessary updating
277 * head to one past the matched node (or the node itself if the
278 * list has no other unmatched nodes). If the CAS misses, then
279 * a loop retries advancing head by two steps until either
280 * success or the slack is at most two. By requiring that each
281 * attempt advances head by two (if applicable), we ensure that
282 * the slack does not grow without bound. Traversals also check
283 * if the initial head is now off-list, in which case they
284 * start at the new head.
285 *
286 * If no candidates are found and the call was untimed
287 * poll/offer, (argument "how" is NOW) return.
288 *
289 * 2. Try to append a new node (method tryAppend)
290 *
291 * Starting at current tail pointer, find the actual last node
292 * and try to append a new node (or if head was null, establish
293 * the first node). Nodes can be appended only if their
294 * predecessors are either already matched or are of the same
295 * mode. If we detect otherwise, then a new node with opposite
296 * mode must have been appended during traversal, so we must
297 * restart at phase 1. The traversal and update steps are
298 * otherwise similar to phase 1: Retrying upon CAS misses and
299 * checking for staleness. In particular, if a self-link is
300 * encountered, then we can safely jump to a node on the list
301 * by continuing the traversal at current head.
302 *
303 * On successful append, if the call was ASYNC, return.
304 *
305 * 3. Await match or cancellation (method awaitMatch)
306 *
307 * Wait for another thread to match node; instead cancelling if
308 * the current thread was interrupted or the wait timed out. On
309 * multiprocessors, we use front-of-queue spinning: If a node
310 * appears to be the first unmatched node in the queue, it
311 * spins a bit before blocking. In either case, before blocking
312 * it tries to unsplice any nodes between the current "head"
313 * and the first unmatched node.
314 *
315 * Front-of-queue spinning vastly improves performance of
316 * heavily contended queues. And so long as it is relatively
317 * brief and "quiet", spinning does not much impact performance
318 * of less-contended queues. During spins threads check their
319 * interrupt status and generate a thread-local random number
320 * to decide to occasionally perform a Thread.yield. While
321 * yield has underdefined specs, we assume that might it help,
322 * and will not hurt in limiting impact of spinning on busy
323 * systems. We also use smaller (1/2) spins for nodes that are
324 * not known to be front but whose predecessors have not
325 * blocked -- these "chained" spins avoid artifacts of
326 * front-of-queue rules which otherwise lead to alternating
327 * nodes spinning vs blocking. Further, front threads that
328 * represent phase changes (from data to request node or vice
329 * versa) compared to their predecessors receive additional
330 * chained spins, reflecting longer paths typically required to
331 * unblock threads during phase changes.
332 */
333
334 /** True if on multiprocessor */
335 private static final boolean MP =
336 Runtime.getRuntime().availableProcessors() > 1;
337
338 /**
339 * The number of times to spin (with randomly interspersed calls
340 * to Thread.yield) on multiprocessor before blocking when a node
341 * is apparently the first waiter in the queue. See above for
342 * explanation. Must be a power of two. The value is empirically
343 * derived -- it works pretty well across a variety of processors,
344 * numbers of CPUs, and OSes.
345 */
346 private static final int FRONT_SPINS = 1 << 7;
347
348 /**
349 * The number of times to spin before blocking when a node is
350 * preceded by another node that is apparently spinning. Also
351 * serves as an increment to FRONT_SPINS on phase changes, and as
352 * base average frequency for yielding during spins. Must be a
353 * power of two.
354 */
355 private static final int CHAINED_SPINS = FRONT_SPINS >>> 1;
356
357 /**
358 * Queue nodes. Uses Object, not E, for items to allow forgetting
359 * them after use. Relies heavily on Unsafe mechanics to minimize
360 * unnecessary ordering constraints: Writes that intrinsically
361 * precede or follow CASes use simple relaxed forms. Other
362 * cleanups use releasing/lazy writes.
363 */
364 static final class Node {
365 final boolean isData; // false if this is a request node
366 volatile Object item; // initially non-null if isData; CASed to match
367 volatile Node next;
368 volatile Thread waiter; // null until waiting
369
370 // CAS methods for fields
371 final boolean casNext(Node cmp, Node val) {
372 return UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val);
373 }
374
375 final boolean casItem(Object cmp, Object val) {
376 return UNSAFE.compareAndSwapObject(this, itemOffset, cmp, val);
377 }
378
379 /**
380 * Creates a new node. Uses relaxed write because item can only
381 * be seen if followed by CAS.
382 */
383 Node(Object item, boolean isData) {
384 UNSAFE.putObject(this, itemOffset, item); // relaxed write
385 this.isData = isData;
386 }
387
388 /**
389 * Links node to itself to avoid garbage retention. Called
390 * only after CASing head field, so uses relaxed write.
391 */
392 final void forgetNext() {
393 UNSAFE.putObject(this, nextOffset, this);
394 }
395
396 /**
397 * Sets item to self (using a releasing/lazy write) and waiter
398 * to null, to avoid garbage retention after extracting or
399 * cancelling.
400 */
401 final void forgetContents() {
402 UNSAFE.putOrderedObject(this, itemOffset, this);
403 UNSAFE.putOrderedObject(this, waiterOffset, null);
404 }
405
406 /**
407 * Returns true if this node has been matched, including the
408 * case of artificial matches due to cancellation.
409 */
410 final boolean isMatched() {
411 Object x = item;
412 return x == this || (x != null) != isData;
413 }
414
415 /**
416 * Returns true if a node with the given mode cannot be
417 * appended to this node because this node is unmatched and
418 * has opposite data mode.
419 */
420 final boolean cannotPrecede(boolean haveData) {
421 boolean d = isData;
422 Object x;
423 return d != haveData && (x = item) != this && (x != null) == d;
424 }
425
426 /**
427 * Tries to artificially match a data node -- used by remove.
428 */
429 final boolean tryMatchData() {
430 Object x = item;
431 if (x != null && x != this && casItem(x, null)) {
432 LockSupport.unpark(waiter);
433 return true;
434 }
435 return false;
436 }
437
438 // Unsafe mechanics
439 private static final sun.misc.Unsafe UNSAFE = getUnsafe();
440 private static final long nextOffset =
441 objectFieldOffset(UNSAFE, "next", Node.class);
442 private static final long itemOffset =
443 objectFieldOffset(UNSAFE, "item", Node.class);
444 private static final long waiterOffset =
445 objectFieldOffset(UNSAFE, "waiter", Node.class);
446
447 private static final long serialVersionUID = -3375979862319811754L;
448 }
449
450 /** head of the queue; null until first enqueue */
451 private transient volatile Node head;
452
453 /** predecessor of dangling unspliceable node */
454 private transient volatile Node cleanMe; // decl here to reduce contention
455
456 /** tail of the queue; null until first append */
457 private transient volatile Node tail;
458
459 // CAS methods for fields
460 private boolean casTail(Node cmp, Node val) {
461 return UNSAFE.compareAndSwapObject(this, tailOffset, cmp, val);
462 }
463
464 private boolean casHead(Node cmp, Node val) {
465 return UNSAFE.compareAndSwapObject(this, headOffset, cmp, val);
466 }
467
468 private boolean casCleanMe(Node cmp, Node val) {
469 return UNSAFE.compareAndSwapObject(this, cleanMeOffset, cmp, val);
470 }
471
472 /*
473 * Possible values for "how" argument in xfer method. Beware that
474 * the order of assigned numerical values matters.
475 */
476 private static final int NOW = 0; // for untimed poll, tryTransfer
477 private static final int ASYNC = 1; // for offer, put, add
478 private static final int SYNC = 2; // for transfer, take
479 private static final int TIMEOUT = 3; // for timed poll, tryTransfer
480
481 /**
482 * Implements all queuing methods. See above for explanation.
483 *
484 * @param e the item or null for take
485 * @param haveData true if this is a put, else a take
486 * @param how NOW, ASYNC, SYNC, or TIMEOUT
487 * @param nanos timeout in nanosecs, used only if mode is TIMEOUT
488 * @return an item if matched, else e
489 * @throws NullPointerException if haveData mode but e is null
490 */
491 private Object xfer(Object e, boolean haveData, int how, long nanos) {
492 if (haveData && (e == null))
493 throw new NullPointerException();
494 Node s = null; // the node to append, if needed
495
496 retry: for (;;) { // restart on append race
497
498 for (Node h = head, p = h; p != null;) { // find & match first node
499 boolean isData = p.isData;
500 Object item = p.item;
501 if (item != p && (item != null) == isData) { // unmatched
502 if (isData == haveData) // can't match
503 break;
504 if (p.casItem(item, e)) { // match
505 for (Node q = p; q != h;) {
506 Node n = q.next; // update head by 2
507 if (n != null) // unless singleton
508 q = n;
509 if (head == h && casHead(h, q)) {
510 h.forgetNext();
511 break;
512 } // advance and retry
513 if ((h = head) == null ||
514 (q = h.next) == null || !q.isMatched())
515 break; // unless slack < 2
516 }
517 LockSupport.unpark(p.waiter);
518 return item;
519 }
520 }
521 Node n = p.next;
522 p = (p != n) ? n : (h = head); // Use head if p offlist
523 }
524
525 if (how >= ASYNC) { // No matches available
526 if (s == null)
527 s = new Node(e, haveData);
528 Node pred = tryAppend(s, haveData);
529 if (pred == null)
530 continue retry; // lost race vs opposite mode
531 if (how >= SYNC)
532 return awaitMatch(s, pred, e, how, nanos);
533 }
534 return e; // not waiting
535 }
536 }
537
538 /**
539 * Tries to append node s as tail.
540 *
541 * @param s the node to append
542 * @param haveData true if appending in data mode
543 * @return null on failure due to losing race with append in
544 * different mode, else s's predecessor, or s itself if no
545 * predecessor
546 */
547 private Node tryAppend(Node s, boolean haveData) {
548 for (Node t = tail, p = t;;) { // move p to last node and append
549 Node n, u; // temps for reads of next & tail
550 if (p == null && (p = head) == null) {
551 if (casHead(null, s))
552 return s; // initialize
553 }
554 else if (p.cannotPrecede(haveData))
555 return null; // lost race vs opposite mode
556 else if ((n = p.next) != null) // not last; keep traversing
557 p = p != t && t != (u = tail) ? (t = u) : // stale tail
558 (p != n) ? n : null; // restart if off list
559 else if (!p.casNext(null, s))
560 p = p.next; // re-read on CAS failure
561 else {
562 if (p != t) { // update if slack now >= 2
563 while ((tail != t || !casTail(t, s)) &&
564 (t = tail) != null &&
565 (s = t.next) != null && // advance and retry
566 (s = s.next) != null && s != t);
567 }
568 return p;
569 }
570 }
571 }
572
573 /**
574 * Spins/yields/blocks until node s is matched or caller gives up.
575 *
576 * @param s the waiting node
577 * @param pred the predecessor of s, or s itself if it has no
578 * predecessor, or null if unknown (the null case does not occur
579 * in any current calls but may in possible future extensions)
580 * @param e the comparison value for checking match
581 * @param how either SYNC or TIMEOUT
582 * @param nanos timeout value
583 * @return matched item, or e if unmatched on interrupt or timeout
584 */
585 private Object awaitMatch(Node s, Node pred, Object e,
586 int how, long nanos) {
587 long lastTime = (how == TIMEOUT) ? System.nanoTime() : 0L;
588 Thread w = Thread.currentThread();
589 int spins = -1; // initialized after first item and cancel checks
590 ThreadLocalRandom randomYields = null; // bound if needed
591
592 for (;;) {
593 Object item = s.item;
594 if (item != e) { // matched
595 s.forgetContents(); // avoid garbage
596 return item;
597 }
598 if ((w.isInterrupted() || (how == TIMEOUT && nanos <= 0)) &&
599 s.casItem(e, s)) { // cancel
600 unsplice(pred, s);
601 return e;
602 }
603
604 if (spins < 0) { // establish spins at/near front
605 if ((spins = spinsFor(pred, s.isData)) > 0)
606 randomYields = ThreadLocalRandom.current();
607 }
608 else if (spins > 0) { // spin
609 if (--spins == 0)
610 shortenHeadPath(); // reduce slack before blocking
611 else if (randomYields.nextInt(CHAINED_SPINS) == 0)
612 Thread.yield(); // occasionally yield
613 }
614 else if (s.waiter == null) {
615 s.waiter = w; // request unpark then recheck
616 }
617 else if (how == TIMEOUT) {
618 long now = System.nanoTime();
619 if ((nanos -= now - lastTime) > 0)
620 LockSupport.parkNanos(this, nanos);
621 lastTime = now;
622 }
623 else {
624 LockSupport.park(this);
625 s.waiter = null;
626 spins = -1; // spin if front upon wakeup
627 }
628 }
629 }
630
631 /**
632 * Returns spin/yield value for a node with given predecessor and
633 * data mode. See above for explanation.
634 */
635 private static int spinsFor(Node pred, boolean haveData) {
636 if (MP && pred != null) {
637 if (pred.isData != haveData) // phase change
638 return FRONT_SPINS + CHAINED_SPINS;
639 if (pred.isMatched()) // probably at front
640 return FRONT_SPINS;
641 if (pred.waiter == null) // pred apparently spinning
642 return CHAINED_SPINS;
643 }
644 return 0;
645 }
646
647 /**
648 * Tries (once) to unsplice nodes between head and first unmatched
649 * or trailing node; failing on contention.
650 */
651 private void shortenHeadPath() {
652 Node h, hn, p, q;
653 if ((p = h = head) != null && h.isMatched() &&
654 (q = hn = h.next) != null) {
655 Node n;
656 while ((n = q.next) != q) {
657 if (n == null || !q.isMatched()) {
658 if (hn != q && h.next == hn)
659 h.casNext(hn, q);
660 break;
661 }
662 p = q;
663 q = n;
664 }
665 }
666 }
667
668 /* -------------- Traversal methods -------------- */
669
670 /**
671 * Returns the first unmatched node of the given mode, or null if
672 * none. Used by methods isEmpty, hasWaitingConsumer.
673 */
674 private Node firstOfMode(boolean data) {
675 for (Node p = head; p != null; ) {
676 if (!p.isMatched())
677 return (p.isData == data) ? p : null;
678 Node n = p.next;
679 p = (n != p) ? n : head;
680 }
681 return null;
682 }
683
684 /**
685 * Returns the item in the first unmatched node with isData; or
686 * null if none. Used by peek.
687 */
688 private Object firstDataItem() {
689 for (Node p = head; p != null; ) {
690 boolean isData = p.isData;
691 Object item = p.item;
692 if (item != p && (item != null) == isData)
693 return isData ? item : null;
694 Node n = p.next;
695 p = (n != p) ? n : head;
696 }
697 return null;
698 }
699
700 /**
701 * Traverses and counts unmatched nodes of the given mode.
702 * Used by methods size and getWaitingConsumerCount.
703 */
704 private int countOfMode(boolean data) {
705 int count = 0;
706 for (Node p = head; p != null; ) {
707 if (!p.isMatched()) {
708 if (p.isData != data)
709 return 0;
710 if (++count == Integer.MAX_VALUE) // saturated
711 break;
712 }
713 Node n = p.next;
714 if (n != p)
715 p = n;
716 else {
717 count = 0;
718 p = head;
719 }
720 }
721 return count;
722 }
723
724 final class Itr implements Iterator<E> {
725 private Node nextNode; // next node to return item for
726 private Object nextItem; // the corresponding item
727 private Node lastRet; // last returned node, to support remove
728
729 /**
730 * Moves to next node after prev, or first node if prev null.
731 */
732 private void advance(Node prev) {
733 lastRet = prev;
734 Node p;
735 if (prev == null || (p = prev.next) == prev)
736 p = head;
737 while (p != null) {
738 Object item = p.item;
739 if (p.isData) {
740 if (item != null && item != p) {
741 nextItem = item;
742 nextNode = p;
743 return;
744 }
745 }
746 else if (item == null)
747 break;
748 Node n = p.next;
749 p = (n != p) ? n : head;
750 }
751 nextNode = null;
752 }
753
754 Itr() {
755 advance(null);
756 }
757
758 public final boolean hasNext() {
759 return nextNode != null;
760 }
761
762 public final E next() {
763 Node p = nextNode;
764 if (p == null) throw new NoSuchElementException();
765 Object e = nextItem;
766 advance(p);
767 return (E) e;
768 }
769
770 public final void remove() {
771 Node p = lastRet;
772 if (p == null) throw new IllegalStateException();
773 lastRet = null;
774 findAndRemoveNode(p);
775 }
776 }
777
778 /* -------------- Removal methods -------------- */
779
780 /**
781 * Unsplices (now or later) the given deleted/cancelled node with
782 * the given predecessor.
783 *
784 * @param pred predecessor of node to be unspliced
785 * @param s the node to be unspliced
786 */
787 private void unsplice(Node pred, Node s) {
788 s.forgetContents(); // clear unneeded fields
789 /*
790 * At any given time, exactly one node on list cannot be
791 * unlinked -- the last inserted node. To accommodate this, if
792 * we cannot unlink s, we save its predecessor as "cleanMe",
793 * processing the previously saved version first. Because only
794 * one node in the list can have a null next, at least one of
795 * node s or the node previously saved can always be
796 * processed, so this always terminates.
797 */
798 if (pred != null && pred != s) {
799 while (pred.next == s) {
800 Node oldpred = (cleanMe == null) ? null : reclean();
801 Node n = s.next;
802 if (n != null) {
803 if (n != s)
804 pred.casNext(s, n);
805 break;
806 }
807 if (oldpred == pred || // Already saved
808 (oldpred == null && casCleanMe(null, pred)))
809 break; // Postpone cleaning
810 }
811 }
812 }
813
814 /**
815 * Tries to unsplice the deleted/cancelled node held in cleanMe
816 * that was previously uncleanable because it was at tail.
817 *
818 * @return current cleanMe node (or null)
819 */
820 private Node reclean() {
821 /*
822 * cleanMe is, or at one time was, predecessor of a cancelled
823 * node s that was the tail so could not be unspliced. If it
824 * is no longer the tail, try to unsplice if necessary and
825 * make cleanMe slot available. This differs from similar
826 * code in unsplice() because we must check that pred still
827 * points to a matched node that can be unspliced -- if not,
828 * we can (must) clear cleanMe without unsplicing. This can
829 * loop only due to contention.
830 */
831 Node pred;
832 while ((pred = cleanMe) != null) {
833 Node s = pred.next;
834 Node n;
835 if (s == null || s == pred || !s.isMatched())
836 casCleanMe(pred, null); // already gone
837 else if ((n = s.next) != null) {
838 if (n != s)
839 pred.casNext(s, n);
840 casCleanMe(pred, null);
841 }
842 else
843 break;
844 }
845 return pred;
846 }
847
848 /**
849 * Main implementation of Iterator.remove(). Find
850 * and unsplice the given node.
851 */
852 final void findAndRemoveNode(Node s) {
853 if (s.tryMatchData()) {
854 Node pred = null;
855 Node p = head;
856 while (p != null) {
857 if (p == s) {
858 unsplice(pred, p);
859 break;
860 }
861 if (!p.isData && !p.isMatched())
862 break;
863 pred = p;
864 if ((p = p.next) == pred) { // stale
865 pred = null;
866 p = head;
867 }
868 }
869 }
870 }
871
872 /**
873 * Main implementation of remove(Object)
874 */
875 private boolean findAndRemove(Object e) {
876 if (e != null) {
877 Node pred = null;
878 Node p = head;
879 while (p != null) {
880 Object item = p.item;
881 if (p.isData) {
882 if (item != null && item != p && e.equals(item) &&
883 p.tryMatchData()) {
884 unsplice(pred, p);
885 return true;
886 }
887 }
888 else if (item == null)
889 break;
890 pred = p;
891 if ((p = p.next) == pred) {
892 pred = null;
893 p = head;
894 }
895 }
896 }
897 return false;
898 }
899
900
901 /**
902 * Creates an initially empty {@code LinkedTransferQueue}.
903 */
904 public LinkedTransferQueue() {
905 }
906
907 /**
908 * Creates a {@code LinkedTransferQueue}
909 * initially containing the elements of the given collection,
910 * added in traversal order of the collection's iterator.
911 *
912 * @param c the collection of elements to initially contain
913 * @throws NullPointerException if the specified collection or any
914 * of its elements are null
915 */
916 public LinkedTransferQueue(Collection<? extends E> c) {
917 this();
918 addAll(c);
919 }
920
921 /**
922 * Inserts the specified element at the tail of this queue.
923 * As the queue is unbounded, this method will never block.
924 *
925 * @throws NullPointerException if the specified element is null
926 */
927 public void put(E e) {
928 xfer(e, true, ASYNC, 0);
929 }
930
931 /**
932 * Inserts the specified element at the tail of this queue.
933 * As the queue is unbounded, this method will never block or
934 * return {@code false}.
935 *
936 * @return {@code true} (as specified by
937 * {@link BlockingQueue#offer(Object,long,TimeUnit) BlockingQueue.offer})
938 * @throws NullPointerException if the specified element is null
939 */
940 public boolean offer(E e, long timeout, TimeUnit unit) {
941 xfer(e, true, ASYNC, 0);
942 return true;
943 }
944
945 /**
946 * Inserts the specified element at the tail of this queue.
947 * As the queue is unbounded, this method will never return {@code false}.
948 *
949 * @return {@code true} (as specified by
950 * {@link BlockingQueue#offer(Object) BlockingQueue.offer})
951 * @throws NullPointerException if the specified element is null
952 */
953 public boolean offer(E e) {
954 xfer(e, true, ASYNC, 0);
955 return true;
956 }
957
958 /**
959 * Inserts the specified element at the tail of this queue.
960 * As the queue is unbounded, this method will never throw
961 * {@link IllegalStateException} or return {@code false}.
962 *
963 * @return {@code true} (as specified by {@link Collection#add})
964 * @throws NullPointerException if the specified element is null
965 */
966 public boolean add(E e) {
967 xfer(e, true, ASYNC, 0);
968 return true;
969 }
970
971 /**
972 * Transfers the element to a waiting consumer immediately, if possible.
973 *
974 * <p>More precisely, transfers the specified element immediately
975 * if there exists a consumer already waiting to receive it (in
976 * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
977 * otherwise returning {@code false} without enqueuing the element.
978 *
979 * @throws NullPointerException if the specified element is null
980 */
981 public boolean tryTransfer(E e) {
982 return xfer(e, true, NOW, 0) == null;
983 }
984
985 /**
986 * Transfers the element to a consumer, waiting if necessary to do so.
987 *
988 * <p>More precisely, transfers the specified element immediately
989 * if there exists a consumer already waiting to receive it (in
990 * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
991 * else inserts the specified element at the tail of this queue
992 * and waits until the element is received by a consumer.
993 *
994 * @throws NullPointerException if the specified element is null
995 */
996 public void transfer(E e) throws InterruptedException {
997 if (xfer(e, true, SYNC, 0) != null) {
998 Thread.interrupted(); // failure possible only due to interrupt
999 throw new InterruptedException();
1000 }
1001 }
1002
1003 /**
1004 * Transfers the element to a consumer if it is possible to do so
1005 * before the timeout elapses.
1006 *
1007 * <p>More precisely, transfers the specified element immediately
1008 * if there exists a consumer already waiting to receive it (in
1009 * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
1010 * else inserts the specified element at the tail of this queue
1011 * and waits until the element is received by a consumer,
1012 * returning {@code false} if the specified wait time elapses
1013 * before the element can be transferred.
1014 *
1015 * @throws NullPointerException if the specified element is null
1016 */
1017 public boolean tryTransfer(E e, long timeout, TimeUnit unit)
1018 throws InterruptedException {
1019 if (xfer(e, true, TIMEOUT, unit.toNanos(timeout)) == null)
1020 return true;
1021 if (!Thread.interrupted())
1022 return false;
1023 throw new InterruptedException();
1024 }
1025
1026 public E take() throws InterruptedException {
1027 Object e = xfer(null, false, SYNC, 0);
1028 if (e != null)
1029 return (E)e;
1030 Thread.interrupted();
1031 throw new InterruptedException();
1032 }
1033
1034 public E poll(long timeout, TimeUnit unit) throws InterruptedException {
1035 Object e = xfer(null, false, TIMEOUT, unit.toNanos(timeout));
1036 if (e != null || !Thread.interrupted())
1037 return (E)e;
1038 throw new InterruptedException();
1039 }
1040
1041 public E poll() {
1042 return (E)xfer(null, false, NOW, 0);
1043 }
1044
1045 /**
1046 * @throws NullPointerException {@inheritDoc}
1047 * @throws IllegalArgumentException {@inheritDoc}
1048 */
1049 public int drainTo(Collection<? super E> c) {
1050 if (c == null)
1051 throw new NullPointerException();
1052 if (c == this)
1053 throw new IllegalArgumentException();
1054 int n = 0;
1055 E e;
1056 while ( (e = poll()) != null) {
1057 c.add(e);
1058 ++n;
1059 }
1060 return n;
1061 }
1062
1063 /**
1064 * @throws NullPointerException {@inheritDoc}
1065 * @throws IllegalArgumentException {@inheritDoc}
1066 */
1067 public int drainTo(Collection<? super E> c, int maxElements) {
1068 if (c == null)
1069 throw new NullPointerException();
1070 if (c == this)
1071 throw new IllegalArgumentException();
1072 int n = 0;
1073 E e;
1074 while (n < maxElements && (e = poll()) != null) {
1075 c.add(e);
1076 ++n;
1077 }
1078 return n;
1079 }
1080
1081 /**
1082 * Returns an iterator over the elements in this queue in proper
1083 * sequence, from head to tail.
1084 *
1085 * <p>The returned iterator is a "weakly consistent" iterator that
1086 * will never throw
1087 * {@link ConcurrentModificationException ConcurrentModificationException},
1088 * and guarantees to traverse elements as they existed upon
1089 * construction of the iterator, and may (but is not guaranteed
1090 * to) reflect any modifications subsequent to construction.
1091 *
1092 * @return an iterator over the elements in this queue in proper sequence
1093 */
1094 public Iterator<E> iterator() {
1095 return new Itr();
1096 }
1097
1098 public E peek() {
1099 return (E) firstDataItem();
1100 }
1101
1102 /**
1103 * Returns {@code true} if this queue contains no elements.
1104 *
1105 * @return {@code true} if this queue contains no elements
1106 */
1107 public boolean isEmpty() {
1108 return firstOfMode(true) == null;
1109 }
1110
1111 public boolean hasWaitingConsumer() {
1112 return firstOfMode(false) != null;
1113 }
1114
1115 /**
1116 * Returns the number of elements in this queue. If this queue
1117 * contains more than {@code Integer.MAX_VALUE} elements, returns
1118 * {@code Integer.MAX_VALUE}.
1119 *
1120 * <p>Beware that, unlike in most collections, this method is
1121 * <em>NOT</em> a constant-time operation. Because of the
1122 * asynchronous nature of these queues, determining the current
1123 * number of elements requires an O(n) traversal.
1124 *
1125 * @return the number of elements in this queue
1126 */
1127 public int size() {
1128 return countOfMode(true);
1129 }
1130
1131 public int getWaitingConsumerCount() {
1132 return countOfMode(false);
1133 }
1134
1135 /**
1136 * Removes a single instance of the specified element from this queue,
1137 * if it is present. More formally, removes an element {@code e} such
1138 * that {@code o.equals(e)}, if this queue contains one or more such
1139 * elements.
1140 * Returns {@code true} if this queue contained the specified element
1141 * (or equivalently, if this queue changed as a result of the call).
1142 *
1143 * @param o element to be removed from this queue, if present
1144 * @return {@code true} if this queue changed as a result of the call
1145 */
1146 public boolean remove(Object o) {
1147 return findAndRemove(o);
1148 }
1149
1150 /**
1151 * Always returns {@code Integer.MAX_VALUE} because a
1152 * {@code LinkedTransferQueue} is not capacity constrained.
1153 *
1154 * @return {@code Integer.MAX_VALUE} (as specified by
1155 * {@link BlockingQueue#remainingCapacity()})
1156 */
1157 public int remainingCapacity() {
1158 return Integer.MAX_VALUE;
1159 }
1160
1161 /**
1162 * Saves the state to a stream (that is, serializes it).
1163 *
1164 * @serialData All of the elements (each an {@code E}) in
1165 * the proper order, followed by a null
1166 * @param s the stream
1167 */
1168 private void writeObject(java.io.ObjectOutputStream s)
1169 throws java.io.IOException {
1170 s.defaultWriteObject();
1171 for (E e : this)
1172 s.writeObject(e);
1173 // Use trailing null as sentinel
1174 s.writeObject(null);
1175 }
1176
1177 /**
1178 * Reconstitutes the Queue instance from a stream (that is,
1179 * deserializes it).
1180 *
1181 * @param s the stream
1182 */
1183 private void readObject(java.io.ObjectInputStream s)
1184 throws java.io.IOException, ClassNotFoundException {
1185 s.defaultReadObject();
1186 for (;;) {
1187 @SuppressWarnings("unchecked") E item = (E) s.readObject();
1188 if (item == null)
1189 break;
1190 else
1191 offer(item);
1192 }
1193 }
1194
1195
1196 // Unsafe mechanics
1197
1198 private static final sun.misc.Unsafe UNSAFE = getUnsafe();
1199 private static final long headOffset =
1200 objectFieldOffset(UNSAFE, "head", LinkedTransferQueue.class);
1201 private static final long tailOffset =
1202 objectFieldOffset(UNSAFE, "tail", LinkedTransferQueue.class);
1203 private static final long cleanMeOffset =
1204 objectFieldOffset(UNSAFE, "cleanMe", LinkedTransferQueue.class);
1205
1206 static long objectFieldOffset(sun.misc.Unsafe UNSAFE,
1207 String field, Class<?> klazz) {
1208 try {
1209 return UNSAFE.objectFieldOffset(klazz.getDeclaredField(field));
1210 } catch (NoSuchFieldException e) {
1211 // Convert Exception to corresponding Error
1212 NoSuchFieldError error = new NoSuchFieldError(field);
1213 error.initCause(e);
1214 throw error;
1215 }
1216 }
1217
1218 private static sun.misc.Unsafe getUnsafe() {
1219 try {
1220 return sun.misc.Unsafe.getUnsafe();
1221 } catch (SecurityException se) {
1222 try {
1223 return java.security.AccessController.doPrivileged
1224 (new java.security
1225 .PrivilegedExceptionAction<sun.misc.Unsafe>() {
1226 public sun.misc.Unsafe run() throws Exception {
1227 java.lang.reflect.Field f = sun.misc
1228 .Unsafe.class.getDeclaredField("theUnsafe");
1229 f.setAccessible(true);
1230 return (sun.misc.Unsafe) f.get(null);
1231 }});
1232 } catch (java.security.PrivilegedActionException e) {
1233 throw new RuntimeException("Could not initialize intrinsics",
1234 e.getCause());
1235 }
1236 }
1237 }
1238
1239 }