1 |
/* |
2 |
* Written by Doug Lea with assistance from members of JCP JSR-166 |
3 |
* Expert Group and released to the public domain, as explained at |
4 |
* http://creativecommons.org/licenses/publicdomain |
5 |
*/ |
6 |
|
7 |
package jsr166y; |
8 |
import java.util.concurrent.*; |
9 |
import java.util.concurrent.locks.*; |
10 |
import java.util.concurrent.atomic.*; |
11 |
import java.util.*; |
12 |
import java.io.*; |
13 |
import sun.misc.Unsafe; |
14 |
import java.lang.reflect.*; |
15 |
|
16 |
/** |
17 |
* An unbounded {@linkplain TransferQueue} based on linked nodes. |
18 |
* This queue orders elements FIFO (first-in-first-out) with respect |
19 |
* to any given producer. The <em>head</em> of the queue is that |
20 |
* element that has been on the queue the longest time for some |
21 |
* producer. The <em>tail</em> of the queue is that element that has |
22 |
* been on the queue the shortest time for some producer. |
23 |
* |
24 |
* <p>Beware that, unlike in most collections, the {@code size} |
25 |
* method is <em>NOT</em> a constant-time operation. Because of the |
26 |
* asynchronous nature of these queues, determining the current number |
27 |
* of elements requires a traversal of the elements. |
28 |
* |
29 |
* <p>This class and its iterator implement all of the |
30 |
* <em>optional</em> methods of the {@link Collection} and {@link |
31 |
* Iterator} interfaces. |
32 |
* |
33 |
* <p>Memory consistency effects: As with other concurrent |
34 |
* collections, actions in a thread prior to placing an object into a |
35 |
* {@code LinkedTransferQueue} |
36 |
* <a href="package-summary.html#MemoryVisibility"><i>happen-before</i></a> |
37 |
* actions subsequent to the access or removal of that element from |
38 |
* the {@code LinkedTransferQueue} in another thread. |
39 |
* |
40 |
* <p>This class is a member of the |
41 |
* <a href="{@docRoot}/../technotes/guides/collections/index.html"> |
42 |
* Java Collections Framework</a>. |
43 |
* |
44 |
* @since 1.7 |
45 |
* @author Doug Lea |
46 |
* @param <E> the type of elements held in this collection |
47 |
* |
48 |
*/ |
49 |
public class LinkedTransferQueue<E> extends AbstractQueue<E> |
50 |
implements TransferQueue<E>, java.io.Serializable { |
51 |
private static final long serialVersionUID = -3223113410248163686L; |
52 |
|
53 |
/* |
54 |
* This class extends the approach used in FIFO-mode |
55 |
* SynchronousQueues. See the internal documentation, as well as |
56 |
* the PPoPP 2006 paper "Scalable Synchronous Queues" by Scherer, |
57 |
* Lea & Scott |
58 |
* (http://www.cs.rice.edu/~wns1/papers/2006-PPoPP-SQ.pdf) |
59 |
* |
60 |
* The main extension is to provide different Wait modes for the |
61 |
* main "xfer" method that puts or takes items. These don't |
62 |
* impact the basic dual-queue logic, but instead control whether |
63 |
* or how threads block upon insertion of request or data nodes |
64 |
* into the dual queue. It also uses slightly different |
65 |
* conventions for tracking whether nodes are off-list or |
66 |
* cancelled. |
67 |
*/ |
68 |
|
69 |
// Wait modes for xfer method |
70 |
static final int NOWAIT = 0; |
71 |
static final int TIMEOUT = 1; |
72 |
static final int WAIT = 2; |
73 |
|
74 |
/** The number of CPUs, for spin control */ |
75 |
static final int NCPUS = Runtime.getRuntime().availableProcessors(); |
76 |
|
77 |
/** |
78 |
* The number of times to spin before blocking in timed waits. |
79 |
* The value is empirically derived -- it works well across a |
80 |
* variety of processors and OSes. Empirically, the best value |
81 |
* seems not to vary with number of CPUs (beyond 2) so is just |
82 |
* a constant. |
83 |
*/ |
84 |
static final int maxTimedSpins = (NCPUS < 2)? 0 : 32; |
85 |
|
86 |
/** |
87 |
* The number of times to spin before blocking in untimed waits. |
88 |
* This is greater than timed value because untimed waits spin |
89 |
* faster since they don't need to check times on each spin. |
90 |
*/ |
91 |
static final int maxUntimedSpins = maxTimedSpins * 16; |
92 |
|
93 |
/** |
94 |
* The number of nanoseconds for which it is faster to spin |
95 |
* rather than to use timed park. A rough estimate suffices. |
96 |
*/ |
97 |
static final long spinForTimeoutThreshold = 1000L; |
98 |
|
99 |
/** |
100 |
* Node class for LinkedTransferQueue. Opportunistically |
101 |
* subclasses from AtomicReference to represent item. Uses Object, |
102 |
* not E, to allow setting item to "this" after use, to avoid |
103 |
* garbage retention. Similarly, setting the next field to this is |
104 |
* used as sentinel that node is off list. |
105 |
*/ |
106 |
static final class QNode extends AtomicReference<Object> { |
107 |
volatile QNode next; |
108 |
volatile Thread waiter; // to control park/unpark |
109 |
final boolean isData; |
110 |
QNode(Object item, boolean isData) { |
111 |
super(item); |
112 |
this.isData = isData; |
113 |
} |
114 |
|
115 |
static final AtomicReferenceFieldUpdater<QNode, QNode> |
116 |
nextUpdater = AtomicReferenceFieldUpdater.newUpdater |
117 |
(QNode.class, QNode.class, "next"); |
118 |
|
119 |
boolean casNext(QNode cmp, QNode val) { |
120 |
return nextUpdater.compareAndSet(this, cmp, val); |
121 |
} |
122 |
} |
123 |
|
124 |
/** |
125 |
* Padded version of AtomicReference used for head, tail and |
126 |
* cleanMe, to alleviate contention across threads CASing one vs |
127 |
* the other. |
128 |
*/ |
129 |
static final class PaddedAtomicReference<T> extends AtomicReference<T> { |
130 |
// enough padding for 64bytes with 4byte refs |
131 |
Object p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pa, pb, pc, pd, pe; |
132 |
PaddedAtomicReference(T r) { super(r); } |
133 |
} |
134 |
|
135 |
|
136 |
/** head of the queue */ |
137 |
private transient final PaddedAtomicReference<QNode> head; |
138 |
/** tail of the queue */ |
139 |
private transient final PaddedAtomicReference<QNode> tail; |
140 |
|
141 |
/** |
142 |
* Reference to a cancelled node that might not yet have been |
143 |
* unlinked from queue because it was the last inserted node |
144 |
* when it cancelled. |
145 |
*/ |
146 |
private transient final PaddedAtomicReference<QNode> cleanMe; |
147 |
|
148 |
/** |
149 |
* Tries to cas nh as new head; if successful, unlink |
150 |
* old head's next node to avoid garbage retention. |
151 |
*/ |
152 |
private boolean advanceHead(QNode h, QNode nh) { |
153 |
if (h == head.get() && head.compareAndSet(h, nh)) { |
154 |
h.next = h; // forget old next |
155 |
return true; |
156 |
} |
157 |
return false; |
158 |
} |
159 |
|
160 |
/** |
161 |
* Puts or takes an item. Used for most queue operations (except |
162 |
* poll() and tryTransfer()). See the similar code in |
163 |
* SynchronousQueue for detailed explanation. |
164 |
* @param e the item or if null, signifies that this is a take |
165 |
* @param mode the wait mode: NOWAIT, TIMEOUT, WAIT |
166 |
* @param nanos timeout in nanosecs, used only if mode is TIMEOUT |
167 |
* @return an item, or null on failure |
168 |
*/ |
169 |
private Object xfer(Object e, int mode, long nanos) { |
170 |
boolean isData = (e != null); |
171 |
QNode s = null; |
172 |
final PaddedAtomicReference<QNode> head = this.head; |
173 |
final PaddedAtomicReference<QNode> tail = this.tail; |
174 |
|
175 |
for (;;) { |
176 |
QNode t = tail.get(); |
177 |
QNode h = head.get(); |
178 |
|
179 |
if (t != null && (t == h || t.isData == isData)) { |
180 |
if (s == null) |
181 |
s = new QNode(e, isData); |
182 |
QNode last = t.next; |
183 |
if (last != null) { |
184 |
if (t == tail.get()) |
185 |
tail.compareAndSet(t, last); |
186 |
} |
187 |
else if (t.casNext(null, s)) { |
188 |
tail.compareAndSet(t, s); |
189 |
return awaitFulfill(t, s, e, mode, nanos); |
190 |
} |
191 |
} |
192 |
|
193 |
else if (h != null) { |
194 |
QNode first = h.next; |
195 |
if (t == tail.get() && first != null && |
196 |
advanceHead(h, first)) { |
197 |
Object x = first.get(); |
198 |
if (x != first && first.compareAndSet(x, e)) { |
199 |
LockSupport.unpark(first.waiter); |
200 |
return isData? e : x; |
201 |
} |
202 |
} |
203 |
} |
204 |
} |
205 |
} |
206 |
|
207 |
|
208 |
/** |
209 |
* Version of xfer for poll() and tryTransfer, which |
210 |
* simplifies control paths both here and in xfer |
211 |
*/ |
212 |
private Object fulfill(Object e) { |
213 |
boolean isData = (e != null); |
214 |
final PaddedAtomicReference<QNode> head = this.head; |
215 |
final PaddedAtomicReference<QNode> tail = this.tail; |
216 |
|
217 |
for (;;) { |
218 |
QNode t = tail.get(); |
219 |
QNode h = head.get(); |
220 |
|
221 |
if (t != null && (t == h || t.isData == isData)) { |
222 |
QNode last = t.next; |
223 |
if (t == tail.get()) { |
224 |
if (last != null) |
225 |
tail.compareAndSet(t, last); |
226 |
else |
227 |
return null; |
228 |
} |
229 |
} |
230 |
else if (h != null) { |
231 |
QNode first = h.next; |
232 |
if (t == tail.get() && |
233 |
first != null && |
234 |
advanceHead(h, first)) { |
235 |
Object x = first.get(); |
236 |
if (x != first && first.compareAndSet(x, e)) { |
237 |
LockSupport.unpark(first.waiter); |
238 |
return isData? e : x; |
239 |
} |
240 |
} |
241 |
} |
242 |
} |
243 |
} |
244 |
|
245 |
/** |
246 |
* Spins/blocks until node s is fulfilled or caller gives up, |
247 |
* depending on wait mode. |
248 |
* |
249 |
* @param pred the predecessor of waiting node |
250 |
* @param s the waiting node |
251 |
* @param e the comparison value for checking match |
252 |
* @param mode mode |
253 |
* @param nanos timeout value |
254 |
* @return matched item, or s if cancelled |
255 |
*/ |
256 |
private Object awaitFulfill(QNode pred, QNode s, Object e, |
257 |
int mode, long nanos) { |
258 |
if (mode == NOWAIT) |
259 |
return null; |
260 |
|
261 |
long lastTime = (mode == TIMEOUT)? System.nanoTime() : 0; |
262 |
Thread w = Thread.currentThread(); |
263 |
int spins = -1; // set to desired spin count below |
264 |
for (;;) { |
265 |
if (w.isInterrupted()) |
266 |
s.compareAndSet(e, s); |
267 |
Object x = s.get(); |
268 |
if (x != e) { // Node was matched or cancelled |
269 |
advanceHead(pred, s); // unlink if head |
270 |
if (x == s) { // was cancelled |
271 |
clean(pred, s); |
272 |
return null; |
273 |
} |
274 |
else if (x != null) { |
275 |
s.set(s); // avoid garbage retention |
276 |
return x; |
277 |
} |
278 |
else |
279 |
return e; |
280 |
} |
281 |
if (mode == TIMEOUT) { |
282 |
long now = System.nanoTime(); |
283 |
nanos -= now - lastTime; |
284 |
lastTime = now; |
285 |
if (nanos <= 0) { |
286 |
s.compareAndSet(e, s); // try to cancel |
287 |
continue; |
288 |
} |
289 |
} |
290 |
if (spins < 0) { |
291 |
QNode h = head.get(); // only spin if at head |
292 |
spins = ((h != null && h.next == s) ? |
293 |
(mode == TIMEOUT? |
294 |
maxTimedSpins : maxUntimedSpins) : 0); |
295 |
} |
296 |
if (spins > 0) |
297 |
--spins; |
298 |
else if (s.waiter == null) |
299 |
s.waiter = w; |
300 |
else if (mode != TIMEOUT) { |
301 |
LockSupport.park(this); |
302 |
s.waiter = null; |
303 |
spins = -1; |
304 |
} |
305 |
else if (nanos > spinForTimeoutThreshold) { |
306 |
LockSupport.parkNanos(this, nanos); |
307 |
s.waiter = null; |
308 |
spins = -1; |
309 |
} |
310 |
} |
311 |
} |
312 |
|
313 |
/** |
314 |
* Returns validated tail for use in cleaning methods |
315 |
*/ |
316 |
private QNode getValidatedTail() { |
317 |
for (;;) { |
318 |
QNode h = head.get(); |
319 |
QNode first = h.next; |
320 |
if (first != null && first.next == first) { // help advance |
321 |
advanceHead(h, first); |
322 |
continue; |
323 |
} |
324 |
QNode t = tail.get(); |
325 |
QNode last = t.next; |
326 |
if (t == tail.get()) { |
327 |
if (last != null) |
328 |
tail.compareAndSet(t, last); // help advance |
329 |
else |
330 |
return t; |
331 |
} |
332 |
} |
333 |
} |
334 |
|
335 |
/** |
336 |
* Gets rid of cancelled node s with original predecessor pred. |
337 |
* @param pred predecessor of cancelled node |
338 |
* @param s the cancelled node |
339 |
*/ |
340 |
private void clean(QNode pred, QNode s) { |
341 |
Thread w = s.waiter; |
342 |
if (w != null) { // Wake up thread |
343 |
s.waiter = null; |
344 |
if (w != Thread.currentThread()) |
345 |
LockSupport.unpark(w); |
346 |
} |
347 |
/* |
348 |
* At any given time, exactly one node on list cannot be |
349 |
* deleted -- the last inserted node. To accommodate this, if |
350 |
* we cannot delete s, we save its predecessor as "cleanMe", |
351 |
* processing the previously saved version first. At least one |
352 |
* of node s or the node previously saved can always be |
353 |
* processed, so this always terminates. |
354 |
*/ |
355 |
while (pred.next == s) { |
356 |
QNode oldpred = reclean(); // First, help get rid of cleanMe |
357 |
QNode t = getValidatedTail(); |
358 |
if (s != t) { // If not tail, try to unsplice |
359 |
QNode sn = s.next; // s.next == s means s already off list |
360 |
if (sn == s || pred.casNext(s, sn)) |
361 |
break; |
362 |
} |
363 |
else if (oldpred == pred || // Already saved |
364 |
(oldpred == null && cleanMe.compareAndSet(null, pred))) |
365 |
break; // Postpone cleaning |
366 |
} |
367 |
} |
368 |
|
369 |
/** |
370 |
* Tries to unsplice the cancelled node held in cleanMe that was |
371 |
* previously uncleanable because it was at tail. |
372 |
* @return current cleanMe node (or null) |
373 |
*/ |
374 |
private QNode reclean() { |
375 |
/* |
376 |
* cleanMe is, or at one time was, predecessor of cancelled |
377 |
* node s that was the tail so could not be unspliced. If s |
378 |
* is no longer the tail, try to unsplice if necessary and |
379 |
* make cleanMe slot available. This differs from similar |
380 |
* code in clean() because we must check that pred still |
381 |
* points to a cancelled node that must be unspliced -- if |
382 |
* not, we can (must) clear cleanMe without unsplicing. |
383 |
* This can loop only due to contention on casNext or |
384 |
* clearing cleanMe. |
385 |
*/ |
386 |
QNode pred; |
387 |
while ((pred = cleanMe.get()) != null) { |
388 |
QNode t = getValidatedTail(); |
389 |
QNode s = pred.next; |
390 |
if (s != t) { |
391 |
QNode sn; |
392 |
if (s == null || s == pred || s.get() != s || |
393 |
(sn = s.next) == s || pred.casNext(s, sn)) |
394 |
cleanMe.compareAndSet(pred, null); |
395 |
} |
396 |
else // s is still tail; cannot clean |
397 |
break; |
398 |
} |
399 |
return pred; |
400 |
} |
401 |
|
402 |
/** |
403 |
* Creates an initially empty {@code LinkedTransferQueue}. |
404 |
*/ |
405 |
public LinkedTransferQueue() { |
406 |
QNode dummy = new QNode(null, false); |
407 |
head = new PaddedAtomicReference<QNode>(dummy); |
408 |
tail = new PaddedAtomicReference<QNode>(dummy); |
409 |
cleanMe = new PaddedAtomicReference<QNode>(null); |
410 |
} |
411 |
|
412 |
/** |
413 |
* Creates a {@code LinkedTransferQueue} |
414 |
* initially containing the elements of the given collection, |
415 |
* added in traversal order of the collection's iterator. |
416 |
* @param c the collection of elements to initially contain |
417 |
* @throws NullPointerException if the specified collection or any |
418 |
* of its elements are null |
419 |
*/ |
420 |
public LinkedTransferQueue(Collection<? extends E> c) { |
421 |
this(); |
422 |
addAll(c); |
423 |
} |
424 |
|
425 |
public void put(E e) throws InterruptedException { |
426 |
if (e == null) throw new NullPointerException(); |
427 |
if (Thread.interrupted()) throw new InterruptedException(); |
428 |
xfer(e, NOWAIT, 0); |
429 |
} |
430 |
|
431 |
public boolean offer(E e, long timeout, TimeUnit unit) |
432 |
throws InterruptedException { |
433 |
if (e == null) throw new NullPointerException(); |
434 |
if (Thread.interrupted()) throw new InterruptedException(); |
435 |
xfer(e, NOWAIT, 0); |
436 |
return true; |
437 |
} |
438 |
|
439 |
public boolean offer(E e) { |
440 |
if (e == null) throw new NullPointerException(); |
441 |
xfer(e, NOWAIT, 0); |
442 |
return true; |
443 |
} |
444 |
|
445 |
public void transfer(E e) throws InterruptedException { |
446 |
if (e == null) throw new NullPointerException(); |
447 |
if (xfer(e, WAIT, 0) == null) { |
448 |
Thread.interrupted(); |
449 |
throw new InterruptedException(); |
450 |
} |
451 |
} |
452 |
|
453 |
public boolean tryTransfer(E e, long timeout, TimeUnit unit) |
454 |
throws InterruptedException { |
455 |
if (e == null) throw new NullPointerException(); |
456 |
if (xfer(e, TIMEOUT, unit.toNanos(timeout)) != null) |
457 |
return true; |
458 |
if (!Thread.interrupted()) |
459 |
return false; |
460 |
throw new InterruptedException(); |
461 |
} |
462 |
|
463 |
public boolean tryTransfer(E e) { |
464 |
if (e == null) throw new NullPointerException(); |
465 |
return fulfill(e) != null; |
466 |
} |
467 |
|
468 |
public E take() throws InterruptedException { |
469 |
Object e = xfer(null, WAIT, 0); |
470 |
if (e != null) |
471 |
return (E)e; |
472 |
Thread.interrupted(); |
473 |
throw new InterruptedException(); |
474 |
} |
475 |
|
476 |
public E poll(long timeout, TimeUnit unit) throws InterruptedException { |
477 |
Object e = xfer(null, TIMEOUT, unit.toNanos(timeout)); |
478 |
if (e != null || !Thread.interrupted()) |
479 |
return (E)e; |
480 |
throw new InterruptedException(); |
481 |
} |
482 |
|
483 |
public E poll() { |
484 |
return (E)fulfill(null); |
485 |
} |
486 |
|
487 |
public int drainTo(Collection<? super E> c) { |
488 |
if (c == null) |
489 |
throw new NullPointerException(); |
490 |
if (c == this) |
491 |
throw new IllegalArgumentException(); |
492 |
int n = 0; |
493 |
E e; |
494 |
while ( (e = poll()) != null) { |
495 |
c.add(e); |
496 |
++n; |
497 |
} |
498 |
return n; |
499 |
} |
500 |
|
501 |
public int drainTo(Collection<? super E> c, int maxElements) { |
502 |
if (c == null) |
503 |
throw new NullPointerException(); |
504 |
if (c == this) |
505 |
throw new IllegalArgumentException(); |
506 |
int n = 0; |
507 |
E e; |
508 |
while (n < maxElements && (e = poll()) != null) { |
509 |
c.add(e); |
510 |
++n; |
511 |
} |
512 |
return n; |
513 |
} |
514 |
|
515 |
// Traversal-based methods |
516 |
|
517 |
/** |
518 |
* Return head after performing any outstanding helping steps |
519 |
*/ |
520 |
private QNode traversalHead() { |
521 |
for (;;) { |
522 |
QNode t = tail.get(); |
523 |
QNode h = head.get(); |
524 |
if (h != null && t != null) { |
525 |
QNode last = t.next; |
526 |
QNode first = h.next; |
527 |
if (t == tail.get()) { |
528 |
if (last != null) |
529 |
tail.compareAndSet(t, last); |
530 |
else if (first != null) { |
531 |
Object x = first.get(); |
532 |
if (x == first) |
533 |
advanceHead(h, first); |
534 |
else |
535 |
return h; |
536 |
} |
537 |
else |
538 |
return h; |
539 |
} |
540 |
} |
541 |
} |
542 |
} |
543 |
|
544 |
|
545 |
public Iterator<E> iterator() { |
546 |
return new Itr(); |
547 |
} |
548 |
|
549 |
/** |
550 |
* Iterators. Basic strategy is to traverse list, treating |
551 |
* non-data (i.e., request) nodes as terminating list. |
552 |
* Once a valid data node is found, the item is cached |
553 |
* so that the next call to next() will return it even |
554 |
* if subsequently removed. |
555 |
*/ |
556 |
class Itr implements Iterator<E> { |
557 |
QNode nextNode; // Next node to return next |
558 |
QNode currentNode; // last returned node, for remove() |
559 |
QNode prevNode; // predecessor of last returned node |
560 |
E nextItem; // Cache of next item, once commited to in next |
561 |
|
562 |
Itr() { |
563 |
nextNode = traversalHead(); |
564 |
advance(); |
565 |
} |
566 |
|
567 |
E advance() { |
568 |
prevNode = currentNode; |
569 |
currentNode = nextNode; |
570 |
E x = nextItem; |
571 |
|
572 |
QNode p = nextNode.next; |
573 |
for (;;) { |
574 |
if (p == null || !p.isData) { |
575 |
nextNode = null; |
576 |
nextItem = null; |
577 |
return x; |
578 |
} |
579 |
Object item = p.get(); |
580 |
if (item != p && item != null) { |
581 |
nextNode = p; |
582 |
nextItem = (E)item; |
583 |
return x; |
584 |
} |
585 |
prevNode = p; |
586 |
p = p.next; |
587 |
} |
588 |
} |
589 |
|
590 |
public boolean hasNext() { |
591 |
return nextNode != null; |
592 |
} |
593 |
|
594 |
public E next() { |
595 |
if (nextNode == null) throw new NoSuchElementException(); |
596 |
return advance(); |
597 |
} |
598 |
|
599 |
public void remove() { |
600 |
QNode p = currentNode; |
601 |
QNode prev = prevNode; |
602 |
if (prev == null || p == null) |
603 |
throw new IllegalStateException(); |
604 |
Object x = p.get(); |
605 |
if (x != null && x != p && p.compareAndSet(x, p)) |
606 |
clean(prev, p); |
607 |
} |
608 |
} |
609 |
|
610 |
public E peek() { |
611 |
for (;;) { |
612 |
QNode h = traversalHead(); |
613 |
QNode p = h.next; |
614 |
if (p == null) |
615 |
return null; |
616 |
Object x = p.get(); |
617 |
if (p != x) { |
618 |
if (!p.isData) |
619 |
return null; |
620 |
if (x != null) |
621 |
return (E)x; |
622 |
} |
623 |
} |
624 |
} |
625 |
|
626 |
public boolean isEmpty() { |
627 |
for (;;) { |
628 |
QNode h = traversalHead(); |
629 |
QNode p = h.next; |
630 |
if (p == null) |
631 |
return true; |
632 |
Object x = p.get(); |
633 |
if (p != x) { |
634 |
if (!p.isData) |
635 |
return true; |
636 |
if (x != null) |
637 |
return false; |
638 |
} |
639 |
} |
640 |
} |
641 |
|
642 |
public boolean hasWaitingConsumer() { |
643 |
for (;;) { |
644 |
QNode h = traversalHead(); |
645 |
QNode p = h.next; |
646 |
if (p == null) |
647 |
return false; |
648 |
Object x = p.get(); |
649 |
if (p != x) |
650 |
return !p.isData; |
651 |
} |
652 |
} |
653 |
|
654 |
/** |
655 |
* Returns the number of elements in this queue. If this queue |
656 |
* contains more than {@code Integer.MAX_VALUE} elements, returns |
657 |
* {@code Integer.MAX_VALUE}. |
658 |
* |
659 |
* <p>Beware that, unlike in most collections, this method is |
660 |
* <em>NOT</em> a constant-time operation. Because of the |
661 |
* asynchronous nature of these queues, determining the current |
662 |
* number of elements requires an O(n) traversal. |
663 |
* |
664 |
* @return the number of elements in this queue |
665 |
*/ |
666 |
public int size() { |
667 |
int count = 0; |
668 |
QNode h = traversalHead(); |
669 |
for (QNode p = h.next; p != null && p.isData; p = p.next) { |
670 |
Object x = p.get(); |
671 |
if (x != null && x != p) { |
672 |
if (++count == Integer.MAX_VALUE) // saturated |
673 |
break; |
674 |
} |
675 |
} |
676 |
return count; |
677 |
} |
678 |
|
679 |
public int getWaitingConsumerCount() { |
680 |
int count = 0; |
681 |
QNode h = traversalHead(); |
682 |
for (QNode p = h.next; p != null && !p.isData; p = p.next) { |
683 |
if (p.get() == null) { |
684 |
if (++count == Integer.MAX_VALUE) |
685 |
break; |
686 |
} |
687 |
} |
688 |
return count; |
689 |
} |
690 |
|
691 |
public int remainingCapacity() { |
692 |
return Integer.MAX_VALUE; |
693 |
} |
694 |
|
695 |
/** |
696 |
* Save the state to a stream (that is, serialize it). |
697 |
* |
698 |
* @serialData All of the elements (each an {@code E}) in |
699 |
* the proper order, followed by a null |
700 |
* @param s the stream |
701 |
*/ |
702 |
private void writeObject(java.io.ObjectOutputStream s) |
703 |
throws java.io.IOException { |
704 |
s.defaultWriteObject(); |
705 |
for (Iterator<E> it = iterator(); it.hasNext(); ) |
706 |
s.writeObject(it.next()); |
707 |
// Use trailing null as sentinel |
708 |
s.writeObject(null); |
709 |
} |
710 |
|
711 |
/** |
712 |
* Reconstitute the Queue instance from a stream (that is, |
713 |
* deserialize it). |
714 |
* @param s the stream |
715 |
*/ |
716 |
private void readObject(java.io.ObjectInputStream s) |
717 |
throws java.io.IOException, ClassNotFoundException { |
718 |
s.defaultReadObject(); |
719 |
resetHeadAndTail(); |
720 |
for (;;) { |
721 |
E item = (E)s.readObject(); |
722 |
if (item == null) |
723 |
break; |
724 |
else |
725 |
offer(item); |
726 |
} |
727 |
} |
728 |
|
729 |
|
730 |
// Support for resetting head/tail while deserializing |
731 |
private void resetHeadAndTail() { |
732 |
QNode dummy = new QNode(null, false); |
733 |
_unsafe.putObjectVolatile(this, headOffset, |
734 |
new PaddedAtomicReference<QNode>(dummy)); |
735 |
_unsafe.putObjectVolatile(this, tailOffset, |
736 |
new PaddedAtomicReference<QNode>(dummy)); |
737 |
_unsafe.putObjectVolatile(this, cleanMeOffset, |
738 |
new PaddedAtomicReference<QNode>(null)); |
739 |
} |
740 |
|
741 |
// Temporary Unsafe mechanics for preliminary release |
742 |
private static Unsafe getUnsafe() throws Throwable { |
743 |
try { |
744 |
return Unsafe.getUnsafe(); |
745 |
} catch (SecurityException se) { |
746 |
try { |
747 |
return java.security.AccessController.doPrivileged |
748 |
(new java.security.PrivilegedExceptionAction<Unsafe>() { |
749 |
public Unsafe run() throws Exception { |
750 |
return getUnsafePrivileged(); |
751 |
}}); |
752 |
} catch (java.security.PrivilegedActionException e) { |
753 |
throw e.getCause(); |
754 |
} |
755 |
} |
756 |
} |
757 |
|
758 |
private static Unsafe getUnsafePrivileged() |
759 |
throws NoSuchFieldException, IllegalAccessException { |
760 |
Field f = Unsafe.class.getDeclaredField("theUnsafe"); |
761 |
f.setAccessible(true); |
762 |
return (Unsafe) f.get(null); |
763 |
} |
764 |
|
765 |
private static long fieldOffset(String fieldName) |
766 |
throws NoSuchFieldException { |
767 |
return _unsafe.objectFieldOffset |
768 |
(LinkedTransferQueue.class.getDeclaredField(fieldName)); |
769 |
} |
770 |
|
771 |
private static final Unsafe _unsafe; |
772 |
private static final long headOffset; |
773 |
private static final long tailOffset; |
774 |
private static final long cleanMeOffset; |
775 |
static { |
776 |
try { |
777 |
_unsafe = getUnsafe(); |
778 |
headOffset = fieldOffset("head"); |
779 |
tailOffset = fieldOffset("tail"); |
780 |
cleanMeOffset = fieldOffset("cleanMe"); |
781 |
} catch (Throwable e) { |
782 |
throw new RuntimeException("Could not initialize intrinsics", e); |
783 |
} |
784 |
} |
785 |
|
786 |
} |