1 |
/* |
2 |
* Written by Doug Lea with assistance from members of JCP JSR-166 |
3 |
* Expert Group and released to the public domain, as explained at |
4 |
* http://creativecommons.org/licenses/publicdomain |
5 |
*/ |
6 |
|
7 |
package jsr166y; |
8 |
|
9 |
import java.util.concurrent.*; |
10 |
|
11 |
import java.util.AbstractQueue; |
12 |
import java.util.Collection; |
13 |
import java.util.ConcurrentModificationException; |
14 |
import java.util.Iterator; |
15 |
import java.util.NoSuchElementException; |
16 |
import java.util.Queue; |
17 |
import java.util.concurrent.locks.LockSupport; |
18 |
import java.util.concurrent.atomic.AtomicReference; |
19 |
|
20 |
/** |
21 |
* An unbounded {@linkplain TransferQueue} based on linked nodes. |
22 |
* This queue orders elements FIFO (first-in-first-out) with respect |
23 |
* to any given producer. The <em>head</em> of the queue is that |
24 |
* element that has been on the queue the longest time for some |
25 |
* producer. The <em>tail</em> of the queue is that element that has |
26 |
* been on the queue the shortest time for some producer. |
27 |
* |
28 |
* <p>Beware that, unlike in most collections, the {@code size} |
29 |
* method is <em>NOT</em> a constant-time operation. Because of the |
30 |
* asynchronous nature of these queues, determining the current number |
31 |
* of elements requires a traversal of the elements. |
32 |
* |
33 |
* <p>This class and its iterator implement all of the |
34 |
* <em>optional</em> methods of the {@link Collection} and {@link |
35 |
* Iterator} interfaces. |
36 |
* |
37 |
* <p>Memory consistency effects: As with other concurrent |
38 |
* collections, actions in a thread prior to placing an object into a |
39 |
* {@code LinkedTransferQueue} |
40 |
* <a href="package-summary.html#MemoryVisibility"><i>happen-before</i></a> |
41 |
* actions subsequent to the access or removal of that element from |
42 |
* the {@code LinkedTransferQueue} in another thread. |
43 |
* |
44 |
* <p>This class is a member of the |
45 |
* <a href="{@docRoot}/../technotes/guides/collections/index.html"> |
46 |
* Java Collections Framework</a>. |
47 |
* |
48 |
* @since 1.7 |
49 |
* @author Doug Lea |
50 |
* @param <E> the type of elements held in this collection |
51 |
*/ |
52 |
public class LinkedTransferQueue<E> extends AbstractQueue<E> |
53 |
implements TransferQueue<E>, java.io.Serializable { |
54 |
private static final long serialVersionUID = -3223113410248163686L; |
55 |
|
56 |
/* |
57 |
* This class extends the approach used in FIFO-mode |
58 |
* SynchronousQueues. See the internal documentation, as well as |
59 |
* the PPoPP 2006 paper "Scalable Synchronous Queues" by Scherer, |
60 |
* Lea & Scott |
61 |
* (http://www.cs.rice.edu/~wns1/papers/2006-PPoPP-SQ.pdf) |
62 |
* |
63 |
* The main extension is to provide different Wait modes for the |
64 |
* main "xfer" method that puts or takes items. These don't |
65 |
* impact the basic dual-queue logic, but instead control whether |
66 |
* or how threads block upon insertion of request or data nodes |
67 |
* into the dual queue. It also uses slightly different |
68 |
* conventions for tracking whether nodes are off-list or |
69 |
* cancelled. |
70 |
*/ |
71 |
|
72 |
// Wait modes for xfer method |
73 |
static final int NOWAIT = 0; |
74 |
static final int TIMEOUT = 1; |
75 |
static final int WAIT = 2; |
76 |
|
77 |
/** The number of CPUs, for spin control */ |
78 |
static final int NCPUS = Runtime.getRuntime().availableProcessors(); |
79 |
|
80 |
/** |
81 |
* The number of times to spin before blocking in timed waits. |
82 |
* The value is empirically derived -- it works well across a |
83 |
* variety of processors and OSes. Empirically, the best value |
84 |
* seems not to vary with number of CPUs (beyond 2) so is just |
85 |
* a constant. |
86 |
*/ |
87 |
static final int maxTimedSpins = (NCPUS < 2) ? 0 : 32; |
88 |
|
89 |
/** |
90 |
* The number of times to spin before blocking in untimed waits. |
91 |
* This is greater than timed value because untimed waits spin |
92 |
* faster since they don't need to check times on each spin. |
93 |
*/ |
94 |
static final int maxUntimedSpins = maxTimedSpins * 16; |
95 |
|
96 |
/** |
97 |
* The number of nanoseconds for which it is faster to spin |
98 |
* rather than to use timed park. A rough estimate suffices. |
99 |
*/ |
100 |
static final long spinForTimeoutThreshold = 1000L; |
101 |
|
102 |
/** |
103 |
* Node class for LinkedTransferQueue. Opportunistically |
104 |
* subclasses from AtomicReference to represent item. Uses Object, |
105 |
* not E, to allow setting item to "this" after use, to avoid |
106 |
* garbage retention. Similarly, setting the next field to this is |
107 |
* used as sentinel that node is off list. |
108 |
*/ |
109 |
static final class Node<E> extends AtomicReference<Object> { |
110 |
volatile Node<E> next; |
111 |
volatile Thread waiter; // to control park/unpark |
112 |
final boolean isData; |
113 |
|
114 |
Node(E item, boolean isData) { |
115 |
super(item); |
116 |
this.isData = isData; |
117 |
} |
118 |
|
119 |
// Unsafe mechanics |
120 |
|
121 |
private static final sun.misc.Unsafe UNSAFE = getUnsafe(); |
122 |
private static final long nextOffset = |
123 |
objectFieldOffset(UNSAFE, "next", Node.class); |
124 |
|
125 |
final boolean casNext(Node<E> cmp, Node<E> val) { |
126 |
return UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val); |
127 |
} |
128 |
|
129 |
final void clearNext() { |
130 |
UNSAFE.putOrderedObject(this, nextOffset, this); |
131 |
} |
132 |
|
133 |
/** |
134 |
* Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. |
135 |
* Replace with a simple call to Unsafe.getUnsafe when integrating |
136 |
* into a jdk. |
137 |
* |
138 |
* @return a sun.misc.Unsafe |
139 |
*/ |
140 |
private static sun.misc.Unsafe getUnsafe() { |
141 |
try { |
142 |
return sun.misc.Unsafe.getUnsafe(); |
143 |
} catch (SecurityException se) { |
144 |
try { |
145 |
return java.security.AccessController.doPrivileged |
146 |
(new java.security |
147 |
.PrivilegedExceptionAction<sun.misc.Unsafe>() { |
148 |
public sun.misc.Unsafe run() throws Exception { |
149 |
java.lang.reflect.Field f = sun.misc |
150 |
.Unsafe.class.getDeclaredField("theUnsafe"); |
151 |
f.setAccessible(true); |
152 |
return (sun.misc.Unsafe) f.get(null); |
153 |
}}); |
154 |
} catch (java.security.PrivilegedActionException e) { |
155 |
throw new RuntimeException("Could not initialize intrinsics", |
156 |
e.getCause()); |
157 |
} |
158 |
} |
159 |
} |
160 |
|
161 |
private static final long serialVersionUID = -3375979862319811754L; |
162 |
} |
163 |
|
164 |
/** |
165 |
* Padded version of AtomicReference used for head, tail and |
166 |
* cleanMe, to alleviate contention across threads CASing one vs |
167 |
* the other. |
168 |
*/ |
169 |
static final class PaddedAtomicReference<T> extends AtomicReference<T> { |
170 |
// enough padding for 64bytes with 4byte refs |
171 |
Object p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pa, pb, pc, pd, pe; |
172 |
PaddedAtomicReference(T r) { super(r); } |
173 |
private static final long serialVersionUID = 8170090609809740854L; |
174 |
} |
175 |
|
176 |
|
177 |
/** head of the queue */ |
178 |
private transient final PaddedAtomicReference<Node<E>> head; |
179 |
|
180 |
/** tail of the queue */ |
181 |
private transient final PaddedAtomicReference<Node<E>> tail; |
182 |
|
183 |
/** |
184 |
* Reference to a cancelled node that might not yet have been |
185 |
* unlinked from queue because it was the last inserted node |
186 |
* when it cancelled. |
187 |
*/ |
188 |
private transient final PaddedAtomicReference<Node<E>> cleanMe; |
189 |
|
190 |
/** |
191 |
* Tries to cas nh as new head; if successful, unlink |
192 |
* old head's next node to avoid garbage retention. |
193 |
*/ |
194 |
private boolean advanceHead(Node<E> h, Node<E> nh) { |
195 |
if (h == head.get() && head.compareAndSet(h, nh)) { |
196 |
h.clearNext(); // forget old next |
197 |
return true; |
198 |
} |
199 |
return false; |
200 |
} |
201 |
|
202 |
/** |
203 |
* Puts or takes an item. Used for most queue operations (except |
204 |
* poll() and tryTransfer()). See the similar code in |
205 |
* SynchronousQueue for detailed explanation. |
206 |
* |
207 |
* @param e the item or if null, signifies that this is a take |
208 |
* @param mode the wait mode: NOWAIT, TIMEOUT, WAIT |
209 |
* @param nanos timeout in nanosecs, used only if mode is TIMEOUT |
210 |
* @return an item, or null on failure |
211 |
*/ |
212 |
private E xfer(E e, int mode, long nanos) { |
213 |
boolean isData = (e != null); |
214 |
Node<E> s = null; |
215 |
final PaddedAtomicReference<Node<E>> head = this.head; |
216 |
final PaddedAtomicReference<Node<E>> tail = this.tail; |
217 |
|
218 |
for (;;) { |
219 |
Node<E> t = tail.get(); |
220 |
Node<E> h = head.get(); |
221 |
|
222 |
if (t != null && (t == h || t.isData == isData)) { |
223 |
if (s == null) |
224 |
s = new Node<E>(e, isData); |
225 |
Node<E> last = t.next; |
226 |
if (last != null) { |
227 |
if (t == tail.get()) |
228 |
tail.compareAndSet(t, last); |
229 |
} |
230 |
else if (t.casNext(null, s)) { |
231 |
tail.compareAndSet(t, s); |
232 |
return awaitFulfill(t, s, e, mode, nanos); |
233 |
} |
234 |
} |
235 |
|
236 |
else if (h != null) { |
237 |
Node<E> first = h.next; |
238 |
if (t == tail.get() && first != null && |
239 |
advanceHead(h, first)) { |
240 |
Object x = first.get(); |
241 |
if (x != first && first.compareAndSet(x, e)) { |
242 |
LockSupport.unpark(first.waiter); |
243 |
return isData ? e : (E) x; |
244 |
} |
245 |
} |
246 |
} |
247 |
} |
248 |
} |
249 |
|
250 |
|
251 |
/** |
252 |
* Version of xfer for poll() and tryTransfer, which |
253 |
* simplifies control paths both here and in xfer. |
254 |
*/ |
255 |
private E fulfill(E e) { |
256 |
boolean isData = (e != null); |
257 |
final PaddedAtomicReference<Node<E>> head = this.head; |
258 |
final PaddedAtomicReference<Node<E>> tail = this.tail; |
259 |
|
260 |
for (;;) { |
261 |
Node<E> t = tail.get(); |
262 |
Node<E> h = head.get(); |
263 |
|
264 |
if (t != null && (t == h || t.isData == isData)) { |
265 |
Node<E> last = t.next; |
266 |
if (t == tail.get()) { |
267 |
if (last != null) |
268 |
tail.compareAndSet(t, last); |
269 |
else |
270 |
return null; |
271 |
} |
272 |
} |
273 |
else if (h != null) { |
274 |
Node<E> first = h.next; |
275 |
if (t == tail.get() && |
276 |
first != null && |
277 |
advanceHead(h, first)) { |
278 |
Object x = first.get(); |
279 |
if (x != first && first.compareAndSet(x, e)) { |
280 |
LockSupport.unpark(first.waiter); |
281 |
return isData ? e : (E) x; |
282 |
} |
283 |
} |
284 |
} |
285 |
} |
286 |
} |
287 |
|
288 |
/** |
289 |
* Spins/blocks until node s is fulfilled or caller gives up, |
290 |
* depending on wait mode. |
291 |
* |
292 |
* @param pred the predecessor of waiting node |
293 |
* @param s the waiting node |
294 |
* @param e the comparison value for checking match |
295 |
* @param mode mode |
296 |
* @param nanos timeout value |
297 |
* @return matched item, or s if cancelled |
298 |
*/ |
299 |
private E awaitFulfill(Node<E> pred, Node<E> s, E e, |
300 |
int mode, long nanos) { |
301 |
if (mode == NOWAIT) |
302 |
return null; |
303 |
|
304 |
long lastTime = (mode == TIMEOUT) ? System.nanoTime() : 0; |
305 |
Thread w = Thread.currentThread(); |
306 |
int spins = -1; // set to desired spin count below |
307 |
for (;;) { |
308 |
if (w.isInterrupted()) |
309 |
s.compareAndSet(e, s); |
310 |
Object x = s.get(); |
311 |
if (x != e) { // Node was matched or cancelled |
312 |
advanceHead(pred, s); // unlink if head |
313 |
if (x == s) { // was cancelled |
314 |
clean(pred, s); |
315 |
return null; |
316 |
} |
317 |
else if (x != null) { |
318 |
s.set(s); // avoid garbage retention |
319 |
return (E) x; |
320 |
} |
321 |
else |
322 |
return e; |
323 |
} |
324 |
if (mode == TIMEOUT) { |
325 |
long now = System.nanoTime(); |
326 |
nanos -= now - lastTime; |
327 |
lastTime = now; |
328 |
if (nanos <= 0) { |
329 |
s.compareAndSet(e, s); // try to cancel |
330 |
continue; |
331 |
} |
332 |
} |
333 |
if (spins < 0) { |
334 |
Node<E> h = head.get(); // only spin if at head |
335 |
spins = ((h != null && h.next == s) ? |
336 |
((mode == TIMEOUT) ? |
337 |
maxTimedSpins : maxUntimedSpins) : 0); |
338 |
} |
339 |
if (spins > 0) |
340 |
--spins; |
341 |
else if (s.waiter == null) |
342 |
s.waiter = w; |
343 |
else if (mode != TIMEOUT) { |
344 |
LockSupport.park(this); |
345 |
s.waiter = null; |
346 |
spins = -1; |
347 |
} |
348 |
else if (nanos > spinForTimeoutThreshold) { |
349 |
LockSupport.parkNanos(this, nanos); |
350 |
s.waiter = null; |
351 |
spins = -1; |
352 |
} |
353 |
} |
354 |
} |
355 |
|
356 |
/** |
357 |
* Returns validated tail for use in cleaning methods. |
358 |
*/ |
359 |
private Node<E> getValidatedTail() { |
360 |
for (;;) { |
361 |
Node<E> h = head.get(); |
362 |
Node<E> first = h.next; |
363 |
if (first != null && first.next == first) { // help advance |
364 |
advanceHead(h, first); |
365 |
continue; |
366 |
} |
367 |
Node<E> t = tail.get(); |
368 |
Node<E> last = t.next; |
369 |
if (t == tail.get()) { |
370 |
if (last != null) |
371 |
tail.compareAndSet(t, last); // help advance |
372 |
else |
373 |
return t; |
374 |
} |
375 |
} |
376 |
} |
377 |
|
378 |
/** |
379 |
* Gets rid of cancelled node s with original predecessor pred. |
380 |
* |
381 |
* @param pred predecessor of cancelled node |
382 |
* @param s the cancelled node |
383 |
*/ |
384 |
private void clean(Node<E> pred, Node<E> s) { |
385 |
Thread w = s.waiter; |
386 |
if (w != null) { // Wake up thread |
387 |
s.waiter = null; |
388 |
if (w != Thread.currentThread()) |
389 |
LockSupport.unpark(w); |
390 |
} |
391 |
|
392 |
if (pred == null) |
393 |
return; |
394 |
|
395 |
/* |
396 |
* At any given time, exactly one node on list cannot be |
397 |
* deleted -- the last inserted node. To accommodate this, if |
398 |
* we cannot delete s, we save its predecessor as "cleanMe", |
399 |
* processing the previously saved version first. At least one |
400 |
* of node s or the node previously saved can always be |
401 |
* processed, so this always terminates. |
402 |
*/ |
403 |
while (pred.next == s) { |
404 |
Node<E> oldpred = reclean(); // First, help get rid of cleanMe |
405 |
Node<E> t = getValidatedTail(); |
406 |
if (s != t) { // If not tail, try to unsplice |
407 |
Node<E> sn = s.next; // s.next == s means s already off list |
408 |
if (sn == s || pred.casNext(s, sn)) |
409 |
break; |
410 |
} |
411 |
else if (oldpred == pred || // Already saved |
412 |
(oldpred == null && cleanMe.compareAndSet(null, pred))) |
413 |
break; // Postpone cleaning |
414 |
} |
415 |
} |
416 |
|
417 |
/** |
418 |
* Tries to unsplice the cancelled node held in cleanMe that was |
419 |
* previously uncleanable because it was at tail. |
420 |
* |
421 |
* @return current cleanMe node (or null) |
422 |
*/ |
423 |
private Node<E> reclean() { |
424 |
/* |
425 |
* cleanMe is, or at one time was, predecessor of cancelled |
426 |
* node s that was the tail so could not be unspliced. If s |
427 |
* is no longer the tail, try to unsplice if necessary and |
428 |
* make cleanMe slot available. This differs from similar |
429 |
* code in clean() because we must check that pred still |
430 |
* points to a cancelled node that must be unspliced -- if |
431 |
* not, we can (must) clear cleanMe without unsplicing. |
432 |
* This can loop only due to contention on casNext or |
433 |
* clearing cleanMe. |
434 |
*/ |
435 |
Node<E> pred; |
436 |
while ((pred = cleanMe.get()) != null) { |
437 |
Node<E> t = getValidatedTail(); |
438 |
Node<E> s = pred.next; |
439 |
if (s != t) { |
440 |
Node<E> sn; |
441 |
if (s == null || s == pred || s.get() != s || |
442 |
(sn = s.next) == s || pred.casNext(s, sn)) |
443 |
cleanMe.compareAndSet(pred, null); |
444 |
} |
445 |
else // s is still tail; cannot clean |
446 |
break; |
447 |
} |
448 |
return pred; |
449 |
} |
450 |
|
451 |
/** |
452 |
* Creates an initially empty {@code LinkedTransferQueue}. |
453 |
*/ |
454 |
public LinkedTransferQueue() { |
455 |
Node<E> dummy = new Node<E>(null, false); |
456 |
head = new PaddedAtomicReference<Node<E>>(dummy); |
457 |
tail = new PaddedAtomicReference<Node<E>>(dummy); |
458 |
cleanMe = new PaddedAtomicReference<Node<E>>(null); |
459 |
} |
460 |
|
461 |
/** |
462 |
* Creates a {@code LinkedTransferQueue} |
463 |
* initially containing the elements of the given collection, |
464 |
* added in traversal order of the collection's iterator. |
465 |
* |
466 |
* @param c the collection of elements to initially contain |
467 |
* @throws NullPointerException if the specified collection or any |
468 |
* of its elements are null |
469 |
*/ |
470 |
public LinkedTransferQueue(Collection<? extends E> c) { |
471 |
this(); |
472 |
addAll(c); |
473 |
} |
474 |
|
475 |
/** |
476 |
* Inserts the specified element at the tail of this queue. |
477 |
* As the queue is unbounded, this method will never block. |
478 |
* |
479 |
* @throws NullPointerException if the specified element is null |
480 |
*/ |
481 |
public void put(E e) { |
482 |
offer(e); |
483 |
} |
484 |
|
485 |
/** |
486 |
* Inserts the specified element at the tail of this queue. |
487 |
* As the queue is unbounded, this method will never block or |
488 |
* return {@code false}. |
489 |
* |
490 |
* @return {@code true} (as specified by |
491 |
* {@link BlockingQueue#offer(Object,long,TimeUnit) BlockingQueue.offer}) |
492 |
* @throws NullPointerException if the specified element is null |
493 |
*/ |
494 |
public boolean offer(E e, long timeout, TimeUnit unit) { |
495 |
return offer(e); |
496 |
} |
497 |
|
498 |
/** |
499 |
* Inserts the specified element at the tail of this queue. |
500 |
* As the queue is unbounded, this method will never return {@code false}. |
501 |
* |
502 |
* @return {@code true} (as specified by |
503 |
* {@link BlockingQueue#offer(Object) BlockingQueue.offer}) |
504 |
* @throws NullPointerException if the specified element is null |
505 |
*/ |
506 |
public boolean offer(E e) { |
507 |
if (e == null) throw new NullPointerException(); |
508 |
xfer(e, NOWAIT, 0); |
509 |
return true; |
510 |
} |
511 |
|
512 |
/** |
513 |
* Inserts the specified element at the tail of this queue. |
514 |
* As the queue is unbounded, this method will never throw |
515 |
* {@link IllegalStateException} or return {@code false}. |
516 |
* |
517 |
* @return {@code true} (as specified by {@link Collection#add}) |
518 |
* @throws NullPointerException if the specified element is null |
519 |
*/ |
520 |
public boolean add(E e) { |
521 |
return offer(e); |
522 |
} |
523 |
|
524 |
/** |
525 |
* Transfers the specified element immediately if there exists a |
526 |
* consumer already waiting to receive it (in {@link #take} or |
527 |
* timed {@link #poll(long,TimeUnit) poll}), otherwise |
528 |
* returning {@code false} without enqueuing the element. |
529 |
* |
530 |
* @throws NullPointerException if the specified element is null |
531 |
*/ |
532 |
public boolean tryTransfer(E e) { |
533 |
if (e == null) throw new NullPointerException(); |
534 |
return fulfill(e) != null; |
535 |
} |
536 |
|
537 |
/** |
538 |
* Inserts the specified element at the tail of this queue, |
539 |
* waiting if necessary for the element to be received by a |
540 |
* consumer invoking {@code take} or {@code poll}. |
541 |
* |
542 |
* @throws NullPointerException if the specified element is null |
543 |
*/ |
544 |
public void transfer(E e) throws InterruptedException { |
545 |
if (e == null) throw new NullPointerException(); |
546 |
if (xfer(e, WAIT, 0) == null) { |
547 |
Thread.interrupted(); |
548 |
throw new InterruptedException(); |
549 |
} |
550 |
} |
551 |
|
552 |
/** |
553 |
* Inserts the specified element at the tail of this queue, |
554 |
* waiting up to the specified wait time if necessary for the |
555 |
* element to be received by a consumer invoking {@code take} or |
556 |
* {@code poll}. |
557 |
* |
558 |
* @throws NullPointerException if the specified element is null |
559 |
*/ |
560 |
public boolean tryTransfer(E e, long timeout, TimeUnit unit) |
561 |
throws InterruptedException { |
562 |
if (e == null) throw new NullPointerException(); |
563 |
if (xfer(e, TIMEOUT, unit.toNanos(timeout)) != null) |
564 |
return true; |
565 |
if (!Thread.interrupted()) |
566 |
return false; |
567 |
throw new InterruptedException(); |
568 |
} |
569 |
|
570 |
public E take() throws InterruptedException { |
571 |
E e = xfer(null, WAIT, 0); |
572 |
if (e != null) |
573 |
return e; |
574 |
Thread.interrupted(); |
575 |
throw new InterruptedException(); |
576 |
} |
577 |
|
578 |
public E poll(long timeout, TimeUnit unit) throws InterruptedException { |
579 |
E e = xfer(null, TIMEOUT, unit.toNanos(timeout)); |
580 |
if (e != null || !Thread.interrupted()) |
581 |
return e; |
582 |
throw new InterruptedException(); |
583 |
} |
584 |
|
585 |
public E poll() { |
586 |
return fulfill(null); |
587 |
} |
588 |
|
589 |
/** |
590 |
* @throws NullPointerException {@inheritDoc} |
591 |
* @throws IllegalArgumentException {@inheritDoc} |
592 |
*/ |
593 |
public int drainTo(Collection<? super E> c) { |
594 |
if (c == null) |
595 |
throw new NullPointerException(); |
596 |
if (c == this) |
597 |
throw new IllegalArgumentException(); |
598 |
int n = 0; |
599 |
E e; |
600 |
while ( (e = poll()) != null) { |
601 |
c.add(e); |
602 |
++n; |
603 |
} |
604 |
return n; |
605 |
} |
606 |
|
607 |
/** |
608 |
* @throws NullPointerException {@inheritDoc} |
609 |
* @throws IllegalArgumentException {@inheritDoc} |
610 |
*/ |
611 |
public int drainTo(Collection<? super E> c, int maxElements) { |
612 |
if (c == null) |
613 |
throw new NullPointerException(); |
614 |
if (c == this) |
615 |
throw new IllegalArgumentException(); |
616 |
int n = 0; |
617 |
E e; |
618 |
while (n < maxElements && (e = poll()) != null) { |
619 |
c.add(e); |
620 |
++n; |
621 |
} |
622 |
return n; |
623 |
} |
624 |
|
625 |
// Traversal-based methods |
626 |
|
627 |
/** |
628 |
* Returns head after performing any outstanding helping steps. |
629 |
*/ |
630 |
private Node<E> traversalHead() { |
631 |
for (;;) { |
632 |
Node<E> t = tail.get(); |
633 |
Node<E> h = head.get(); |
634 |
if (h != null && t != null) { |
635 |
Node<E> last = t.next; |
636 |
Node<E> first = h.next; |
637 |
if (t == tail.get()) { |
638 |
if (last != null) |
639 |
tail.compareAndSet(t, last); |
640 |
else if (first != null) { |
641 |
Object x = first.get(); |
642 |
if (x == first) |
643 |
advanceHead(h, first); |
644 |
else |
645 |
return h; |
646 |
} |
647 |
else |
648 |
return h; |
649 |
} |
650 |
} |
651 |
reclean(); |
652 |
} |
653 |
} |
654 |
|
655 |
/** |
656 |
* Returns an iterator over the elements in this queue in proper |
657 |
* sequence, from head to tail. |
658 |
* |
659 |
* <p>The returned iterator is a "weakly consistent" iterator that |
660 |
* will never throw |
661 |
* {@link ConcurrentModificationException ConcurrentModificationException}, |
662 |
* and guarantees to traverse elements as they existed upon |
663 |
* construction of the iterator, and may (but is not guaranteed |
664 |
* to) reflect any modifications subsequent to construction. |
665 |
* |
666 |
* @return an iterator over the elements in this queue in proper sequence |
667 |
*/ |
668 |
public Iterator<E> iterator() { |
669 |
return new Itr(); |
670 |
} |
671 |
|
672 |
/** |
673 |
* Iterators. Basic strategy is to traverse list, treating |
674 |
* non-data (i.e., request) nodes as terminating list. |
675 |
* Once a valid data node is found, the item is cached |
676 |
* so that the next call to next() will return it even |
677 |
* if subsequently removed. |
678 |
*/ |
679 |
class Itr implements Iterator<E> { |
680 |
Node<E> next; // node to return next |
681 |
Node<E> pnext; // predecessor of next |
682 |
Node<E> curr; // last returned node, for remove() |
683 |
Node<E> pcurr; // predecessor of curr, for remove() |
684 |
E nextItem; // Cache of next item, once committed to in next |
685 |
|
686 |
Itr() { |
687 |
advance(); |
688 |
} |
689 |
|
690 |
/** |
691 |
* Moves to next valid node and returns item to return for |
692 |
* next(), or null if no such. |
693 |
*/ |
694 |
private E advance() { |
695 |
pcurr = pnext; |
696 |
curr = next; |
697 |
E item = nextItem; |
698 |
|
699 |
for (;;) { |
700 |
pnext = (next == null) ? traversalHead() : next; |
701 |
next = pnext.next; |
702 |
if (next == pnext) { |
703 |
next = null; |
704 |
continue; // restart |
705 |
} |
706 |
if (next == null) |
707 |
break; |
708 |
Object x = next.get(); |
709 |
if (x != null && x != next) { |
710 |
nextItem = (E) x; |
711 |
break; |
712 |
} |
713 |
} |
714 |
return item; |
715 |
} |
716 |
|
717 |
public boolean hasNext() { |
718 |
return next != null; |
719 |
} |
720 |
|
721 |
public E next() { |
722 |
if (next == null) |
723 |
throw new NoSuchElementException(); |
724 |
return advance(); |
725 |
} |
726 |
|
727 |
public void remove() { |
728 |
Node<E> p = curr; |
729 |
if (p == null) |
730 |
throw new IllegalStateException(); |
731 |
Object x = p.get(); |
732 |
if (x != null && x != p && p.compareAndSet(x, p)) |
733 |
clean(pcurr, p); |
734 |
} |
735 |
} |
736 |
|
737 |
public E peek() { |
738 |
for (;;) { |
739 |
Node<E> h = traversalHead(); |
740 |
Node<E> p = h.next; |
741 |
if (p == null) |
742 |
return null; |
743 |
Object x = p.get(); |
744 |
if (p != x) { |
745 |
if (!p.isData) |
746 |
return null; |
747 |
if (x != null) |
748 |
return (E) x; |
749 |
} |
750 |
} |
751 |
} |
752 |
|
753 |
public boolean isEmpty() { |
754 |
for (;;) { |
755 |
Node<E> h = traversalHead(); |
756 |
Node<E> p = h.next; |
757 |
if (p == null) |
758 |
return true; |
759 |
Object x = p.get(); |
760 |
if (p != x) { |
761 |
if (!p.isData) |
762 |
return true; |
763 |
if (x != null) |
764 |
return false; |
765 |
} |
766 |
} |
767 |
} |
768 |
|
769 |
public boolean hasWaitingConsumer() { |
770 |
for (;;) { |
771 |
Node<E> h = traversalHead(); |
772 |
Node<E> p = h.next; |
773 |
if (p == null) |
774 |
return false; |
775 |
Object x = p.get(); |
776 |
if (p != x) |
777 |
return !p.isData; |
778 |
} |
779 |
} |
780 |
|
781 |
/** |
782 |
* Returns the number of elements in this queue. If this queue |
783 |
* contains more than {@code Integer.MAX_VALUE} elements, returns |
784 |
* {@code Integer.MAX_VALUE}. |
785 |
* |
786 |
* <p>Beware that, unlike in most collections, this method is |
787 |
* <em>NOT</em> a constant-time operation. Because of the |
788 |
* asynchronous nature of these queues, determining the current |
789 |
* number of elements requires an O(n) traversal. |
790 |
* |
791 |
* @return the number of elements in this queue |
792 |
*/ |
793 |
public int size() { |
794 |
for (;;) { |
795 |
int count = 0; |
796 |
Node<E> pred = traversalHead(); |
797 |
for (;;) { |
798 |
Node<E> q = pred.next; |
799 |
if (q == pred) // restart |
800 |
break; |
801 |
if (q == null || !q.isData) |
802 |
return count; |
803 |
Object x = q.get(); |
804 |
if (x != null && x != q) { |
805 |
if (++count == Integer.MAX_VALUE) // saturated |
806 |
return count; |
807 |
} |
808 |
pred = q; |
809 |
} |
810 |
} |
811 |
} |
812 |
|
813 |
public int getWaitingConsumerCount() { |
814 |
// converse of size -- count valid non-data nodes |
815 |
for (;;) { |
816 |
int count = 0; |
817 |
Node<E> pred = traversalHead(); |
818 |
for (;;) { |
819 |
Node<E> q = pred.next; |
820 |
if (q == pred) // restart |
821 |
break; |
822 |
if (q == null || q.isData) |
823 |
return count; |
824 |
Object x = q.get(); |
825 |
if (x == null) { |
826 |
if (++count == Integer.MAX_VALUE) // saturated |
827 |
return count; |
828 |
} |
829 |
pred = q; |
830 |
} |
831 |
} |
832 |
} |
833 |
|
834 |
public boolean remove(Object o) { |
835 |
if (o == null) |
836 |
return false; |
837 |
for (;;) { |
838 |
Node<E> pred = traversalHead(); |
839 |
for (;;) { |
840 |
Node<E> q = pred.next; |
841 |
if (q == pred) // restart |
842 |
break; |
843 |
if (q == null || !q.isData) |
844 |
return false; |
845 |
Object x = q.get(); |
846 |
if (x != null && x != q && o.equals(x) && |
847 |
q.compareAndSet(x, q)) { |
848 |
clean(pred, q); |
849 |
return true; |
850 |
} |
851 |
pred = q; |
852 |
} |
853 |
} |
854 |
} |
855 |
|
856 |
/** |
857 |
* Always returns {@code Integer.MAX_VALUE} because a |
858 |
* {@code LinkedTransferQueue} is not capacity constrained. |
859 |
* |
860 |
* @return {@code Integer.MAX_VALUE} (as specified by |
861 |
* {@link BlockingQueue#remainingCapacity()}) |
862 |
*/ |
863 |
public int remainingCapacity() { |
864 |
return Integer.MAX_VALUE; |
865 |
} |
866 |
|
867 |
/** |
868 |
* Save the state to a stream (that is, serialize it). |
869 |
* |
870 |
* @serialData All of the elements (each an {@code E}) in |
871 |
* the proper order, followed by a null |
872 |
* @param s the stream |
873 |
*/ |
874 |
private void writeObject(java.io.ObjectOutputStream s) |
875 |
throws java.io.IOException { |
876 |
s.defaultWriteObject(); |
877 |
for (E e : this) |
878 |
s.writeObject(e); |
879 |
// Use trailing null as sentinel |
880 |
s.writeObject(null); |
881 |
} |
882 |
|
883 |
/** |
884 |
* Reconstitute the Queue instance from a stream (that is, |
885 |
* deserialize it). |
886 |
* |
887 |
* @param s the stream |
888 |
*/ |
889 |
private void readObject(java.io.ObjectInputStream s) |
890 |
throws java.io.IOException, ClassNotFoundException { |
891 |
s.defaultReadObject(); |
892 |
resetHeadAndTail(); |
893 |
for (;;) { |
894 |
@SuppressWarnings("unchecked") E item = (E) s.readObject(); |
895 |
if (item == null) |
896 |
break; |
897 |
else |
898 |
offer(item); |
899 |
} |
900 |
} |
901 |
|
902 |
// Support for resetting head/tail while deserializing |
903 |
private void resetHeadAndTail() { |
904 |
Node<E> dummy = new Node<E>(null, false); |
905 |
UNSAFE.putObjectVolatile(this, headOffset, |
906 |
new PaddedAtomicReference<Node<E>>(dummy)); |
907 |
UNSAFE.putObjectVolatile(this, tailOffset, |
908 |
new PaddedAtomicReference<Node<E>>(dummy)); |
909 |
UNSAFE.putObjectVolatile(this, cleanMeOffset, |
910 |
new PaddedAtomicReference<Node<E>>(null)); |
911 |
} |
912 |
|
913 |
// Unsafe mechanics |
914 |
|
915 |
private static final sun.misc.Unsafe UNSAFE = getUnsafe(); |
916 |
private static final long headOffset = |
917 |
objectFieldOffset(UNSAFE, "head", LinkedTransferQueue.class); |
918 |
private static final long tailOffset = |
919 |
objectFieldOffset(UNSAFE, "tail", LinkedTransferQueue.class); |
920 |
private static final long cleanMeOffset = |
921 |
objectFieldOffset(UNSAFE, "cleanMe", LinkedTransferQueue.class); |
922 |
|
923 |
|
924 |
static long objectFieldOffset(sun.misc.Unsafe UNSAFE, |
925 |
String field, Class<?> klazz) { |
926 |
try { |
927 |
return UNSAFE.objectFieldOffset(klazz.getDeclaredField(field)); |
928 |
} catch (NoSuchFieldException e) { |
929 |
// Convert Exception to corresponding Error |
930 |
NoSuchFieldError error = new NoSuchFieldError(field); |
931 |
error.initCause(e); |
932 |
throw error; |
933 |
} |
934 |
} |
935 |
|
936 |
/** |
937 |
* Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. |
938 |
* Replace with a simple call to Unsafe.getUnsafe when integrating |
939 |
* into a jdk. |
940 |
* |
941 |
* @return a sun.misc.Unsafe |
942 |
*/ |
943 |
private static sun.misc.Unsafe getUnsafe() { |
944 |
try { |
945 |
return sun.misc.Unsafe.getUnsafe(); |
946 |
} catch (SecurityException se) { |
947 |
try { |
948 |
return java.security.AccessController.doPrivileged |
949 |
(new java.security |
950 |
.PrivilegedExceptionAction<sun.misc.Unsafe>() { |
951 |
public sun.misc.Unsafe run() throws Exception { |
952 |
java.lang.reflect.Field f = sun.misc |
953 |
.Unsafe.class.getDeclaredField("theUnsafe"); |
954 |
f.setAccessible(true); |
955 |
return (sun.misc.Unsafe) f.get(null); |
956 |
}}); |
957 |
} catch (java.security.PrivilegedActionException e) { |
958 |
throw new RuntimeException("Could not initialize intrinsics", |
959 |
e.getCause()); |
960 |
} |
961 |
} |
962 |
} |
963 |
} |