1 |
/* |
2 |
* Written by Doug Lea, Bill Scherer, and Michael Scott with |
3 |
* assistance from members of JCP JSR-166 Expert Group and released to |
4 |
* the public domain, as explained at |
5 |
* http://creativecommons.org/licenses/publicdomain |
6 |
*/ |
7 |
|
8 |
package java.util.concurrent; |
9 |
import java.util.concurrent.locks.*; |
10 |
import java.util.concurrent.atomic.*; |
11 |
import java.util.*; |
12 |
|
13 |
/** |
14 |
* A {@linkplain BlockingQueue blocking queue} in which each insert |
15 |
* operation must wait for a corresponding remove operation by another |
16 |
* thread, and vice versa. A synchronous queue does not have any |
17 |
* internal capacity, not even a capacity of one. You cannot |
18 |
* <tt>peek</tt> at a synchronous queue because an element is only |
19 |
* present when you try to remove it; you cannot insert an element |
20 |
* (using any method) unless another thread is trying to remove it; |
21 |
* you cannot iterate as there is nothing to iterate. The |
22 |
* <em>head</em> of the queue is the element that the first queued |
23 |
* inserting thread is trying to add to the queue; if there is no such |
24 |
* queued thread then no element is available for removal and |
25 |
* <tt>poll()</tt> will return <tt>null</tt>. For purposes of other |
26 |
* <tt>Collection</tt> methods (for example <tt>contains</tt>), a |
27 |
* <tt>SynchronousQueue</tt> acts as an empty collection. This queue |
28 |
* does not permit <tt>null</tt> elements. |
29 |
* |
30 |
* <p>Synchronous queues are similar to rendezvous channels used in |
31 |
* CSP and Ada. They are well suited for handoff designs, in which an |
32 |
* object running in one thread must sync up with an object running |
33 |
* in another thread in order to hand it some information, event, or |
34 |
* task. |
35 |
* |
36 |
* <p> This class supports an optional fairness policy for ordering |
37 |
* waiting producer and consumer threads. By default, this ordering |
38 |
* is not guaranteed. However, a queue constructed with fairness set |
39 |
* to <tt>true</tt> grants threads access in FIFO order. |
40 |
* |
41 |
* <p>This class and its iterator implement all of the |
42 |
* <em>optional</em> methods of the {@link Collection} and {@link |
43 |
* Iterator} interfaces. |
44 |
* |
45 |
* <p>This class is a member of the |
46 |
* <a href="{@docRoot}/../guide/collections/index.html"> |
47 |
* Java Collections Framework</a>. |
48 |
* |
49 |
* @since 1.5 |
50 |
* @author Doug Lea and Bill Scherer and Michael Scott |
51 |
* @param <E> the type of elements held in this collection |
52 |
*/ |
53 |
public class SynchronousQueue<E> extends AbstractQueue<E> |
54 |
implements BlockingQueue<E>, java.io.Serializable { |
55 |
private static final long serialVersionUID = -3223113410248163686L; |
56 |
|
57 |
/* |
58 |
* This class implements extensions of the dual stack and dual |
59 |
* queue algorithms described in "Nonblocking Concurrent Objects |
60 |
* with Condition Synchronization", by W. N. Scherer III and |
61 |
* M. L. Scott. 18th Annual Conf. on Distributed Computing, |
62 |
* Oct. 2004 (see also |
63 |
* http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/duals.html). |
64 |
* The (Lifo) stack is used for non-fair mode, and the (Fifo) |
65 |
* queue for fair mode. The performance of the two is generally |
66 |
* similar. Fifo usually supports higher throughput under |
67 |
* contention but Lifo maintains higher thread locality in common |
68 |
* applications. |
69 |
* |
70 |
* A dual queue (and similarly stack) is one that at any given |
71 |
* time either holds "data" -- items provided by put operations, |
72 |
* or "requests" -- slots representing take operations, or is |
73 |
* empty. A call to "fulfill" (i.e., a call requesting an item |
74 |
* from a queue holding data or vice versa) dequeues a |
75 |
* complementary node. The most interesting feature of these |
76 |
* queues is that any operation can figure out which mode the |
77 |
* queue is in, and act accordingly without needing locks. |
78 |
* |
79 |
* Both the queue and stack extend abstract class Transferer |
80 |
* defining the single method transfer that does a put or a |
81 |
* take. These are unified into a single method because in dual |
82 |
* data structures, the put and take operations are symmetrical, |
83 |
* so nearly all code can be combined. The resulting transfer |
84 |
* methods are on the long side, but are easier to follow than |
85 |
* they would be if broken up into nearly-duplicated parts. |
86 |
* |
87 |
* The queue and stack data structures share many conceptual |
88 |
* similarities but very few concrete details. For simplicity, |
89 |
* they are kept distinct so that they can later evolve |
90 |
* separately. |
91 |
* |
92 |
* The algorithms here differ from the versions in the above paper |
93 |
* in extending them for use in synchronous queues, as well as |
94 |
* dealing with cancellation. The main differences include: |
95 |
* |
96 |
* 1. The orginal algorithms used bit-marked pointers, but |
97 |
* the ones here use mode bits in nodes, leading to a number |
98 |
* of further adaptations. |
99 |
* 2. SynchronousQueues must block threads waiting to become |
100 |
* fulfilled. |
101 |
* 3. Support for cancellation via timeout and interrupts, |
102 |
* including cleaning out cancelled nodes/threads |
103 |
* from lists to avoid garbage retention and memory depletion. |
104 |
* |
105 |
* Blocking is mainly accomplished using LockSupport park/unpark, |
106 |
* except that nodes that appear to be the next ones to become |
107 |
* fulfilled first spin a bit (on multiprocessors only). On very |
108 |
* busy synchronous queues, spinning can dramatically improve |
109 |
* throughput. And on less busy ones, the amount of spinning is |
110 |
* small enough not to be noticeable. |
111 |
* |
112 |
* Cleaning is done in different ways in queues vs stacks. For |
113 |
* queues, we can almost always remove a node immediately in O(1) |
114 |
* time (modulo retries for consistency checks) when it is |
115 |
* cancelled. But if it may be pinned as the current tail, it must |
116 |
* wait until some subsequent cancellation. For stacks, we need a |
117 |
* potentially O(n) traversal to be sure that we can remove the |
118 |
* node, but this can run concurrently with other threads |
119 |
* accessing the stack. |
120 |
* |
121 |
* While garbage collection takes care of most node reclamation |
122 |
* issues that otherwise complicate nonblocking algorithms, care |
123 |
* is made to "forget" references to data, other nodes, and |
124 |
* threads that might be held on to long-term by blocked |
125 |
* threads. In cases where setting to null would otherwise |
126 |
* conflict with main algorithms, this is done by changing a |
127 |
* node's link to now point to the node itself. This doesn't arise |
128 |
* much for Stack nodes (because blocked threads do not hang on to |
129 |
* old head pointers), but references in Queue nodes must be |
130 |
* agressively forgotten to avoid reachability of everything any |
131 |
* node has ever referred to since arrival. |
132 |
*/ |
133 |
|
134 |
/** |
135 |
* Shared internal API for dual stacks and queues. |
136 |
*/ |
137 |
static abstract class Transferer { |
138 |
/** |
139 |
* Perform a put or take. |
140 |
* @param e if non-null, the item to be handed to a consumer; |
141 |
* if null, requests that transfer return an item offered by |
142 |
* producer. |
143 |
* @param timed if this operation should timeout |
144 |
* @param nanos the timeout, in nanoseconds |
145 |
* @return if nonnull, the item provided or received; if null, |
146 |
* the operation failed due to timeout or interrupt -- the |
147 |
* caller can distinguish which of these occurred by checking |
148 |
* Thread.interrupted. |
149 |
*/ |
150 |
abstract Object transfer(Object e, boolean timed, long nanos); |
151 |
} |
152 |
|
153 |
/** The number of CPUs, for spin control */ |
154 |
static final int NCPUS = Runtime.getRuntime().availableProcessors(); |
155 |
|
156 |
/** |
157 |
* The number of times to spin before blocking in timed waits. |
158 |
* The value is empirically derived -- it works well across a |
159 |
* variety of processors and OSes. Empirically, the best value |
160 |
* seems not to vary with number of CPUs (beyond 2) so is just |
161 |
* a constant. |
162 |
*/ |
163 |
static final int maxTimedSpins = (NCPUS < 2)? 0 : 32; |
164 |
|
165 |
/** |
166 |
* The number of times to spin before blocking in untimed |
167 |
* waits. This is greater than timed value because untimed |
168 |
* waits spin faster since they don't need to check times on |
169 |
* each spin. |
170 |
*/ |
171 |
static final int maxUntimedSpins = maxTimedSpins * 16; |
172 |
|
173 |
/** |
174 |
* The number of nanoseconds for which it is faster to spin |
175 |
* rather than to use timed park. A rough estimate suffices. |
176 |
*/ |
177 |
static final long spinForTimeoutThreshold = 1000L; |
178 |
|
179 |
/** Dual stack */ |
180 |
static final class TransferStack extends Transferer { |
181 |
/* |
182 |
* This extends Scherer-Scott dual stack algorithm, differing, |
183 |
* among other ways, by using "covering" nodes rather than |
184 |
* bit-marked pointers: Fulfilling operations push on marker |
185 |
* nodes (with FULFILLING bit set in mode) to reserve a spot |
186 |
* to match a waiting node. |
187 |
*/ |
188 |
|
189 |
/* Modes for SNodes, ORed together in node fields */ |
190 |
/** Node represents an unfulfilled consumer */ |
191 |
static final int REQUEST = 0; |
192 |
/** Node represents an unfulfilled producer */ |
193 |
static final int DATA = 1; |
194 |
/** Node is fulfilling another unfulfilled DATA or REQUEST */ |
195 |
static final int FULFILLING = 2; |
196 |
|
197 |
/** Return true if m has fulfilling bit set */ |
198 |
static boolean isFulfilling(int m) { return (m & FULFILLING) != 0; } |
199 |
|
200 |
/** Node class for TransferStacks. */ |
201 |
static final class SNode { |
202 |
volatile SNode next; // next node in stack |
203 |
volatile SNode match; // the node matched to this |
204 |
volatile Thread waiter; // to control park/unpark |
205 |
Object item; // data; or null for REQUESTs |
206 |
int mode; |
207 |
// Note: item and mode fields don't need to be volatile |
208 |
// since they are always written before, and read after, |
209 |
// other volatile/atomic operations. |
210 |
|
211 |
SNode(Object item) { |
212 |
this.item = item; |
213 |
} |
214 |
|
215 |
static final AtomicReferenceFieldUpdater<SNode, SNode> |
216 |
nextUpdater = AtomicReferenceFieldUpdater.newUpdater |
217 |
(SNode.class, SNode.class, "next"); |
218 |
|
219 |
boolean casNext(SNode cmp, SNode val) { |
220 |
return (cmp == next && |
221 |
nextUpdater.compareAndSet(this, cmp, val)); |
222 |
} |
223 |
|
224 |
static final AtomicReferenceFieldUpdater<SNode, SNode> |
225 |
matchUpdater = AtomicReferenceFieldUpdater.newUpdater |
226 |
(SNode.class, SNode.class, "match"); |
227 |
|
228 |
/** |
229 |
* Try to match node s to this node, if so, waking up |
230 |
* thread. Fulfillers call tryMatch to identify their |
231 |
* waiters. Waiters block until they have been |
232 |
* matched. |
233 |
* @param s the node to match |
234 |
* @return true if successfully matched to s |
235 |
*/ |
236 |
boolean tryMatch(SNode s) { |
237 |
if (match == null && |
238 |
matchUpdater.compareAndSet(this, null, s)) { |
239 |
Thread w = waiter; |
240 |
if (w != null) { // waiters need at most one unpark |
241 |
waiter = null; |
242 |
LockSupport.unpark(w); |
243 |
} |
244 |
return true; |
245 |
} |
246 |
return match == s; |
247 |
} |
248 |
|
249 |
/** |
250 |
* Try to cancel a wait by matching node to itself. |
251 |
*/ |
252 |
void tryCancel() { |
253 |
matchUpdater.compareAndSet(this, null, this); |
254 |
} |
255 |
|
256 |
boolean isCancelled() { |
257 |
return match == this; |
258 |
} |
259 |
} |
260 |
|
261 |
/** The head (top) of the stack */ |
262 |
volatile SNode head; |
263 |
|
264 |
static final AtomicReferenceFieldUpdater<TransferStack, SNode> |
265 |
headUpdater = AtomicReferenceFieldUpdater.newUpdater |
266 |
(TransferStack.class, SNode.class, "head"); |
267 |
|
268 |
boolean casHead(SNode h, SNode nh) { |
269 |
return h == head && headUpdater.compareAndSet(this, h, nh); |
270 |
} |
271 |
|
272 |
/** |
273 |
* Creates or resets fields of a node. Called only from transfer |
274 |
* where the node to push on stack is lazily created and |
275 |
* reused when possible to help reduce intervals between reads |
276 |
* and CASes of head and to avoid surges of garbage when CASes |
277 |
* to push nodes fail due to contention. |
278 |
*/ |
279 |
static SNode snode(SNode s, Object e, SNode next, int mode) { |
280 |
if (s == null) s = new SNode(e); |
281 |
s.mode = mode; |
282 |
s.next = next; |
283 |
return s; |
284 |
} |
285 |
|
286 |
/** |
287 |
* Puts or takes an item. |
288 |
*/ |
289 |
Object transfer(Object e, boolean timed, long nanos) { |
290 |
/* |
291 |
* Basic algorithm is to loop trying one of three actions: |
292 |
* |
293 |
* 1. If apparently empty or already containing nodes of same |
294 |
* mode, try to push node on stack and wait for a match, |
295 |
* returning it, or null if cancelled. |
296 |
* |
297 |
* 2. If apparently containing node of complementary mode, |
298 |
* try to push a fulfilling node on to stack, match |
299 |
* with corresponding waiting node, pop both from |
300 |
* stack, and return matched item. The matching or |
301 |
* unlinking might not actually be necessary because of |
302 |
* another threads performing action 3: |
303 |
* |
304 |
* 3. If top of stack already holds another fulfilling node, |
305 |
* help it out by doing its match and/or pop |
306 |
* operations, and then continue. The code for helping |
307 |
* is essentially the same as for fulfilling, except |
308 |
* that it doesn't return the item. |
309 |
*/ |
310 |
|
311 |
SNode s = null; // constructed/reused as needed |
312 |
int mode = (e == null)? REQUEST : DATA; |
313 |
|
314 |
for (;;) { |
315 |
SNode h = head; |
316 |
if (h == null || h.mode == mode) { // empty or same-mode |
317 |
if (timed && nanos <= 0) { // can't wait |
318 |
if (h != null && h.isCancelled()) |
319 |
casHead(h, h.next); // pop cancelled node |
320 |
else |
321 |
return null; |
322 |
} else if (casHead(h, s = snode(s, e, h, mode))) { |
323 |
SNode m = awaitFulfill(s, timed, nanos); |
324 |
if (m == s) { // wait was cancelled |
325 |
clean(s); |
326 |
return null; |
327 |
} |
328 |
if ((h = head) != null && h.next == s) |
329 |
casHead(h, s.next); // help s's fulfiller |
330 |
return mode == REQUEST? m.item : s.item; |
331 |
} |
332 |
} else if (!isFulfilling(h.mode)) { // try to fulfill |
333 |
if (h.isCancelled()) // already cancelled |
334 |
casHead(h, h.next); // pop and retry |
335 |
else if (casHead(h, s=snode(s, e, h, FULFILLING|mode))) { |
336 |
for (;;) { // loop until matched or waiters disappear |
337 |
SNode m = s.next; // m is s's match |
338 |
if (m == null) { // all waiters are gone |
339 |
casHead(s, null); // pop fulfill node |
340 |
s = null; // use new node next time |
341 |
break; // restart main loop |
342 |
} |
343 |
SNode mn = m.next; |
344 |
if (m.tryMatch(s)) { |
345 |
casHead(s, mn); // pop both s and m |
346 |
return (mode == REQUEST)? m.item : s.item; |
347 |
} else // lost match |
348 |
s.casNext(m, mn); // help unlink |
349 |
} |
350 |
} |
351 |
} else { // help a fulfiller |
352 |
SNode m = h.next; // m is h's match |
353 |
if (m == null) // waiter is gone |
354 |
casHead(h, null); // pop fulfilling node |
355 |
else { |
356 |
SNode mn = m.next; |
357 |
if (m.tryMatch(h)) // help match |
358 |
casHead(h, mn); // pop both h and m |
359 |
else // lost match |
360 |
h.casNext(m, mn); // help unlink |
361 |
} |
362 |
} |
363 |
} |
364 |
} |
365 |
|
366 |
/** |
367 |
* Spins/blocks until node s is matched by a fulfill operation. |
368 |
* @param s the waiting node |
369 |
* @param timed true if timed wait |
370 |
* @param nanos timeout value |
371 |
* @return matched node, or s if cancelled |
372 |
*/ |
373 |
SNode awaitFulfill(SNode s, boolean timed, long nanos) { |
374 |
/* |
375 |
* When a node/thread is about to block, it sets its waiter |
376 |
* field and then rechecks state at least one more time |
377 |
* before actually parking, thus covering race vs |
378 |
* fulfiller noticing that waiter is nonnull so should be |
379 |
* woken. |
380 |
* |
381 |
* When invoked by nodes that appear at the point of call |
382 |
* to be at the head of the stack, calls to park are |
383 |
* preceded by spins to avoid blocking when producers and |
384 |
* consumers are arriving very close in time. This can |
385 |
* happen enough to bother only on multiprocessors. |
386 |
* |
387 |
* The order of checks for returning out of main loop |
388 |
* reflects fact that interrupts have precedence over |
389 |
* normal returns, which have precedence over |
390 |
* timeouts. (So, on timeout, one last check for match is |
391 |
* done before giving up.) Except that calls from untimed |
392 |
* SynchronousQueue.{poll/offer} don't check interrupts |
393 |
* and don't wait at all, so are trapped in transfer |
394 |
* method rather than calling awaitFulfill. |
395 |
*/ |
396 |
long lastTime = (timed)? System.nanoTime() : 0; |
397 |
Thread w = Thread.currentThread(); |
398 |
SNode h = head; |
399 |
int spins = (shouldSpin(s)? |
400 |
(timed? maxTimedSpins : maxUntimedSpins) : 0); |
401 |
for (;;) { |
402 |
if (w.isInterrupted()) |
403 |
s.tryCancel(); |
404 |
SNode m = s.match; |
405 |
if (m != null) |
406 |
return m; |
407 |
if (timed) { |
408 |
long now = System.nanoTime(); |
409 |
nanos -= now - lastTime; |
410 |
lastTime = now; |
411 |
if (nanos <= 0) { |
412 |
s.tryCancel(); |
413 |
continue; |
414 |
} |
415 |
} |
416 |
if (spins > 0) |
417 |
spins = shouldSpin(s)? (spins-1) : 0; |
418 |
else if (s.waiter == null) |
419 |
s.waiter = w; // establish waiter so can park next iter |
420 |
else if (!timed) |
421 |
LockSupport.park(this); |
422 |
else if (nanos > spinForTimeoutThreshold) |
423 |
LockSupport.parkNanos(this, nanos); |
424 |
} |
425 |
} |
426 |
|
427 |
/** |
428 |
* Returns true if node s is at head or there is an active |
429 |
* fulfiller. |
430 |
*/ |
431 |
boolean shouldSpin(SNode s) { |
432 |
SNode h = head; |
433 |
return (h == s || h == null || isFulfilling(h.mode)); |
434 |
} |
435 |
|
436 |
/** |
437 |
* Unlinks s from the stack. |
438 |
*/ |
439 |
void clean(SNode s) { |
440 |
s.item = null; // forget item |
441 |
s.waiter = null; // forget thread |
442 |
|
443 |
/* |
444 |
* At worst we may need to traverse entire stack to unlink |
445 |
* s. If there are multiple concurrent calls to clean, we |
446 |
* might not see s if another thread has already removed |
447 |
* it. But we can stop when we see any node known to |
448 |
* follow s. We use s.next unless it too is cancelled, in |
449 |
* which case we try the node one past. We don't check any |
450 |
* futher because we don't want to doubly traverse just to |
451 |
* find sentinel. |
452 |
*/ |
453 |
|
454 |
SNode past = s.next; |
455 |
if (past != null && past.isCancelled()) |
456 |
past = past.next; |
457 |
|
458 |
// Absorb cancelled nodes at head |
459 |
SNode p; |
460 |
while ((p = head) != null && p != past && p.isCancelled()) |
461 |
casHead(p, p.next); |
462 |
|
463 |
// Unsplice embedded nodes |
464 |
while (p != null && p != past) { |
465 |
SNode n = p.next; |
466 |
if (n != null && n.isCancelled()) |
467 |
p.casNext(n, n.next); |
468 |
else |
469 |
p = n; |
470 |
} |
471 |
} |
472 |
} |
473 |
|
474 |
/** Dual Queue. */ |
475 |
static final class TransferQueue extends Transferer { |
476 |
/* |
477 |
* This extends Scherer-Scott dual queue algorithm, differing, |
478 |
* among other ways, by using modes within nodes rather than |
479 |
* marked pointers. The algorithm is a little simpler than |
480 |
* that for stacks because fulfillers do not need explicit |
481 |
* nodes, and matching is done by CAS'ing QNode.item field |
482 |
* from nonnull to null (for put) or vice versa (for take). |
483 |
*/ |
484 |
|
485 |
/** Node class for TransferQueue. */ |
486 |
static final class QNode { |
487 |
volatile QNode next; // next node in queue |
488 |
volatile Object item; // CAS'ed to or from null |
489 |
volatile Thread waiter; // to control park/unpark |
490 |
final boolean isData; |
491 |
|
492 |
QNode(Object item, boolean isData) { |
493 |
this.item = item; |
494 |
this.isData = isData; |
495 |
} |
496 |
|
497 |
static final AtomicReferenceFieldUpdater<QNode, QNode> |
498 |
nextUpdater = AtomicReferenceFieldUpdater.newUpdater |
499 |
(QNode.class, QNode.class, "next"); |
500 |
|
501 |
boolean casNext(QNode cmp, QNode val) { |
502 |
return (next == cmp && |
503 |
nextUpdater.compareAndSet(this, cmp, val)); |
504 |
} |
505 |
|
506 |
static final AtomicReferenceFieldUpdater<QNode, Object> |
507 |
itemUpdater = AtomicReferenceFieldUpdater.newUpdater |
508 |
(QNode.class, Object.class, "item"); |
509 |
|
510 |
boolean casItem(Object cmp, Object val) { |
511 |
return (item == cmp && |
512 |
itemUpdater.compareAndSet(this, cmp, val)); |
513 |
} |
514 |
|
515 |
/** |
516 |
* Try to cancel by CAS'ing ref to this as item. |
517 |
*/ |
518 |
void tryCancel(Object cmp) { |
519 |
itemUpdater.compareAndSet(this, cmp, this); |
520 |
} |
521 |
|
522 |
boolean isCancelled() { |
523 |
return item == this; |
524 |
} |
525 |
|
526 |
/** |
527 |
* Returns true if this node is known to be off the queue |
528 |
* because its next pointer has been forgotten due to |
529 |
* an advanceHead operation. |
530 |
*/ |
531 |
boolean isOffList() { |
532 |
return next == this; |
533 |
} |
534 |
} |
535 |
|
536 |
/** Head of queue */ |
537 |
transient volatile QNode head; |
538 |
/** Tail of queue */ |
539 |
transient volatile QNode tail; |
540 |
/** |
541 |
* Reference to a cancelled node that might not yet have been |
542 |
* unlinked from queue because it was the last inserted node |
543 |
* when it cancelled. |
544 |
*/ |
545 |
transient volatile QNode cleanMe; |
546 |
|
547 |
TransferQueue() { |
548 |
QNode h = new QNode(null, false); // initialize to dummy node. |
549 |
head = h; |
550 |
tail = h; |
551 |
} |
552 |
|
553 |
static final AtomicReferenceFieldUpdater<TransferQueue, QNode> |
554 |
headUpdater = AtomicReferenceFieldUpdater.newUpdater |
555 |
(TransferQueue.class, QNode.class, "head"); |
556 |
|
557 |
/** |
558 |
* Tries to cas nh as new head; if successful unlink |
559 |
* old head's next node to avoid garbage retention. |
560 |
*/ |
561 |
void advanceHead(QNode h, QNode nh) { |
562 |
if (h == head && headUpdater.compareAndSet(this, h, nh)) |
563 |
h.next = h; // forget old next |
564 |
} |
565 |
|
566 |
static final AtomicReferenceFieldUpdater<TransferQueue, QNode> |
567 |
tailUpdater = AtomicReferenceFieldUpdater.newUpdater |
568 |
(TransferQueue.class, QNode.class, "tail"); |
569 |
|
570 |
/** |
571 |
* Tries to cas nt as new tail. |
572 |
*/ |
573 |
void advanceTail(QNode t, QNode nt) { |
574 |
if (tail == t) |
575 |
tailUpdater.compareAndSet(this, t, nt); |
576 |
} |
577 |
|
578 |
static final AtomicReferenceFieldUpdater<TransferQueue, QNode> |
579 |
cleanMeUpdater = AtomicReferenceFieldUpdater.newUpdater |
580 |
(TransferQueue.class, QNode.class, "cleanMe"); |
581 |
|
582 |
/** |
583 |
* Tries to CAS cleanMe slot. |
584 |
*/ |
585 |
boolean casCleanMe(QNode cmp, QNode val) { |
586 |
return (cleanMe == cmp && |
587 |
cleanMeUpdater.compareAndSet(this, cmp, val)); |
588 |
} |
589 |
|
590 |
/** |
591 |
* Puts or takes an item. |
592 |
*/ |
593 |
Object transfer(Object e, boolean timed, long nanos) { |
594 |
/* Basic algorithm is to loop trying to take either of |
595 |
* two actions: |
596 |
* |
597 |
* 1. If queue apparently empty or holding same-mode nodes, |
598 |
* try to add node to queue of waiters, wait to be |
599 |
* fulfilled (or cancelled) and return matching item. |
600 |
* |
601 |
* 2. If queue apparently contains waiting items, and this |
602 |
* call is of complementary mode, try to fulfill by CAS'ing |
603 |
* item field of waiting node and dequeuing it, and then |
604 |
* returning matching item. |
605 |
* |
606 |
* In each case, along the way, check for and try to help |
607 |
* advance head and tail on behalf of other stalled/slow |
608 |
* threads. |
609 |
* |
610 |
* The loop starts off with a null check guarding against |
611 |
* seeing uninitialized head or tail values. This never |
612 |
* happens in current SynchronousQueue, but could if |
613 |
* callers held non-volatile/final ref to the |
614 |
* transferer. The check is here anyway because it places |
615 |
* null checks at top of loop, which is usually faster |
616 |
* than having them implicitly interspersed. |
617 |
*/ |
618 |
|
619 |
QNode s = null; // constructed/reused as needed |
620 |
boolean isData = (e != null); |
621 |
|
622 |
for (;;) { |
623 |
QNode t = tail; |
624 |
QNode h = head; |
625 |
if (t == null || h == null) // saw unitialized values |
626 |
continue; // spin |
627 |
|
628 |
if (h == t || t.isData == isData) { // empty or same-mode |
629 |
QNode tn = t.next; |
630 |
if (t != tail) // inconsistent read |
631 |
continue; |
632 |
if (tn != null) { // lagging tail |
633 |
advanceTail(t, tn); |
634 |
continue; |
635 |
} |
636 |
if (timed && nanos <= 0) // can't wait |
637 |
return null; |
638 |
if (s == null) |
639 |
s = new QNode(e, isData); |
640 |
if (!t.casNext(null, s)) // failed to link in |
641 |
continue; |
642 |
|
643 |
advanceTail(t, s); // swing tail and wait |
644 |
Object x = awaitFulfill(s, e, timed, nanos); |
645 |
if (x == s) { // wait was cancelled |
646 |
clean(t, s); |
647 |
return null; |
648 |
} |
649 |
|
650 |
if (!s.isOffList()) { // not already unlinked |
651 |
advanceHead(t, s); // unlink if head |
652 |
if (x != null) // and forget fields |
653 |
s.item = s; |
654 |
s.waiter = null; |
655 |
} |
656 |
return (x != null)? x : e; |
657 |
|
658 |
} else { // complementary-mode |
659 |
QNode m = h.next; // node to fulfill |
660 |
if (t != tail || m == null || h != head) |
661 |
continue; // inconsistent read |
662 |
|
663 |
Object x = m.item; |
664 |
if (isData == (x != null) || // m already fulfilled |
665 |
x == m || // m cancelled |
666 |
!m.casItem(x, e)) { // lost CAS |
667 |
advanceHead(h, m); // dequeue and retry |
668 |
continue; |
669 |
} |
670 |
|
671 |
advanceHead(h, m); // successfully fulfilled |
672 |
LockSupport.unpark(m.waiter); |
673 |
return (x != null)? x : e; |
674 |
} |
675 |
} |
676 |
} |
677 |
|
678 |
/** |
679 |
* Spins/blocks until node s is fulfilled. |
680 |
* @param s the waiting node |
681 |
* @param e the comparison value for checking match |
682 |
* @param timed true if timed wait |
683 |
* @param nanos timeout value |
684 |
* @return matched item, or s if cancelled |
685 |
*/ |
686 |
Object awaitFulfill(QNode s, Object e, boolean timed, long nanos) { |
687 |
/* Same idea as TransferStack.awaitFulfill */ |
688 |
long lastTime = (timed)? System.nanoTime() : 0; |
689 |
Thread w = Thread.currentThread(); |
690 |
int spins = ((head.next == s) ? |
691 |
(timed? maxTimedSpins : maxUntimedSpins) : 0); |
692 |
for (;;) { |
693 |
if (w.isInterrupted()) |
694 |
s.tryCancel(e); |
695 |
Object x = s.item; |
696 |
if (x != e) |
697 |
return x; |
698 |
if (timed) { |
699 |
long now = System.nanoTime(); |
700 |
nanos -= now - lastTime; |
701 |
lastTime = now; |
702 |
if (nanos <= 0) { |
703 |
s.tryCancel(e); |
704 |
continue; |
705 |
} |
706 |
} |
707 |
if (spins > 0) |
708 |
--spins; |
709 |
else if (s.waiter == null) |
710 |
s.waiter = w; |
711 |
else if (!timed) |
712 |
LockSupport.park(this); |
713 |
else if (nanos > spinForTimeoutThreshold) |
714 |
LockSupport.parkNanos(this, nanos); |
715 |
} |
716 |
} |
717 |
|
718 |
/** |
719 |
* Gets rid of cancelled node s with original predecessor pred. |
720 |
*/ |
721 |
void clean(QNode pred, QNode s) { |
722 |
s.waiter = null; // forget thread |
723 |
/* |
724 |
* At any given time, exactly one node on list cannot be |
725 |
* deleted -- the last inserted node. To accommodate this, |
726 |
* if we cannot delete s, we save its predecessor as |
727 |
* "cleanMe", deleting the previously saved version |
728 |
* first. At least one of node s or the node previously |
729 |
* saved can always be deleted, so this always terminates. |
730 |
*/ |
731 |
while (pred.next == s) { // Return early if already unlinked |
732 |
QNode h = head; |
733 |
QNode hn = h.next; // Absorb cancelled first node as head |
734 |
if (hn != null && hn.isCancelled()) { |
735 |
advanceHead(h, hn); |
736 |
continue; |
737 |
} |
738 |
QNode t = tail; // Ensure consistent read for tail |
739 |
if (t == h) |
740 |
return; |
741 |
QNode tn = t.next; |
742 |
if (t != tail) |
743 |
continue; |
744 |
if (tn != null) { |
745 |
advanceTail(t, tn); |
746 |
continue; |
747 |
} |
748 |
if (s != t) { // If not tail, try to unsplice |
749 |
QNode sn = s.next; |
750 |
if (sn == s || pred.casNext(s, sn)) |
751 |
return; |
752 |
} |
753 |
QNode dp = cleanMe; |
754 |
if (dp != null) { // Try unlinking previous cancelled node |
755 |
QNode d = dp.next; |
756 |
QNode dn; |
757 |
if (d == null || // d is gone or |
758 |
d == dp || // d is off list or |
759 |
!d.isCancelled() || // d not cancelled or |
760 |
(d != t && // d not tail and |
761 |
(dn = d.next) != null && // has successor |
762 |
dn != d && // that is on list |
763 |
dp.casNext(d, dn))) // d unspliced |
764 |
casCleanMe(dp, null); |
765 |
if (dp == pred) |
766 |
return; // s is already saved node |
767 |
} else if (casCleanMe(null, pred)) |
768 |
return; // Postpone cleaning s |
769 |
} |
770 |
} |
771 |
} |
772 |
|
773 |
/** |
774 |
* The transferer. Set only in constructor, but cannot be declared |
775 |
* as final without further complicating serialization. Since |
776 |
* this is accessed only at most once per public method, there |
777 |
* isn't a noticeable performance penalty for using volatile |
778 |
* instead of final here. |
779 |
*/ |
780 |
private transient volatile Transferer transferer; |
781 |
|
782 |
/** |
783 |
* Creates a <tt>SynchronousQueue</tt> with nonfair access policy. |
784 |
*/ |
785 |
public SynchronousQueue() { |
786 |
this(false); |
787 |
} |
788 |
|
789 |
/** |
790 |
* Creates a <tt>SynchronousQueue</tt> with specified fairness policy. |
791 |
* @param fair if true, waiting threads contend in FIFO order for access; |
792 |
* otherwise the order is unspecified. |
793 |
*/ |
794 |
public SynchronousQueue(boolean fair) { |
795 |
transferer = (fair)? new TransferQueue() : new TransferStack(); |
796 |
} |
797 |
|
798 |
/** |
799 |
* Adds the specified element to this queue, waiting if necessary for |
800 |
* another thread to receive it. |
801 |
* |
802 |
* @throws InterruptedException {@inheritDoc} |
803 |
* @throws NullPointerException {@inheritDoc} |
804 |
*/ |
805 |
public void put(E o) throws InterruptedException { |
806 |
if (o == null) throw new NullPointerException(); |
807 |
if (transferer.transfer(o, false, 0) == null) |
808 |
throw new InterruptedException(); |
809 |
} |
810 |
|
811 |
/** |
812 |
* Inserts the specified element into this queue, waiting if necessary |
813 |
* up to the specified wait time for another thread to receive it. |
814 |
* |
815 |
* @return <tt>true</tt> if successful, or <tt>false</tt> if the |
816 |
* specified waiting time elapses before a consumer appears. |
817 |
* @throws InterruptedException {@inheritDoc} |
818 |
* @throws NullPointerException {@inheritDoc} |
819 |
*/ |
820 |
public boolean offer(E o, long timeout, TimeUnit unit) |
821 |
throws InterruptedException { |
822 |
if (o == null) throw new NullPointerException(); |
823 |
if (transferer.transfer(o, true, unit.toNanos(timeout)) != null) |
824 |
return true; |
825 |
if (!Thread.interrupted()) |
826 |
return false; |
827 |
throw new InterruptedException(); |
828 |
} |
829 |
|
830 |
/** |
831 |
* Inserts the specified element into this queue, if another thread is |
832 |
* waiting to receive it. |
833 |
* |
834 |
* @param e the element to add |
835 |
* @return <tt>true</tt> if the element was added to this queue, else |
836 |
* <tt>false</tt> |
837 |
* @throws NullPointerException if the specified element is null |
838 |
*/ |
839 |
public boolean offer(E e) { |
840 |
if (e == null) throw new NullPointerException(); |
841 |
return transferer.transfer(e, true, 0) != null; |
842 |
} |
843 |
|
844 |
/** |
845 |
* Retrieves and removes the head of this queue, waiting if necessary |
846 |
* for another thread to insert it. |
847 |
* |
848 |
* @return the head of this queue |
849 |
* @throws InterruptedException {@inheritDoc} |
850 |
*/ |
851 |
public E take() throws InterruptedException { |
852 |
Object e = transferer.transfer(null, false, 0); |
853 |
if (e != null) |
854 |
return (E)e; |
855 |
throw new InterruptedException(); |
856 |
} |
857 |
|
858 |
/** |
859 |
* Retrieves and removes the head of this queue, waiting |
860 |
* if necessary up to the specified wait time, for another thread |
861 |
* to insert it. |
862 |
* |
863 |
* @return the head of this queue, or <tt>null</tt> if the |
864 |
* specified waiting time elapses before an element is present. |
865 |
* @throws InterruptedException {@inheritDoc} |
866 |
*/ |
867 |
public E poll(long timeout, TimeUnit unit) throws InterruptedException { |
868 |
Object e = transferer.transfer(null, true, unit.toNanos(timeout)); |
869 |
if (e != null || !Thread.interrupted()) |
870 |
return (E)e; |
871 |
throw new InterruptedException(); |
872 |
} |
873 |
|
874 |
/** |
875 |
* Retrieves and removes the head of this queue, if another thread |
876 |
* is currently making an element available. |
877 |
* |
878 |
* @return the head of this queue, or <tt>null</tt> if no |
879 |
* element is available. |
880 |
*/ |
881 |
public E poll() { |
882 |
return (E)transferer.transfer(null, true, 0); |
883 |
} |
884 |
|
885 |
/** |
886 |
* Always returns <tt>true</tt>. |
887 |
* A <tt>SynchronousQueue</tt> has no internal capacity. |
888 |
* @return <tt>true</tt> |
889 |
*/ |
890 |
public boolean isEmpty() { |
891 |
return true; |
892 |
} |
893 |
|
894 |
/** |
895 |
* Always returns zero. |
896 |
* A <tt>SynchronousQueue</tt> has no internal capacity. |
897 |
* @return zero. |
898 |
*/ |
899 |
public int size() { |
900 |
return 0; |
901 |
} |
902 |
|
903 |
/** |
904 |
* Always returns zero. |
905 |
* A <tt>SynchronousQueue</tt> has no internal capacity. |
906 |
* @return zero. |
907 |
*/ |
908 |
public int remainingCapacity() { |
909 |
return 0; |
910 |
} |
911 |
|
912 |
/** |
913 |
* Does nothing. |
914 |
* A <tt>SynchronousQueue</tt> has no internal capacity. |
915 |
*/ |
916 |
public void clear() { |
917 |
} |
918 |
|
919 |
/** |
920 |
* Always returns <tt>false</tt>. |
921 |
* A <tt>SynchronousQueue</tt> has no internal capacity. |
922 |
* @param o the element |
923 |
* @return <tt>false</tt> |
924 |
*/ |
925 |
public boolean contains(Object o) { |
926 |
return false; |
927 |
} |
928 |
|
929 |
/** |
930 |
* Always returns <tt>false</tt>. |
931 |
* A <tt>SynchronousQueue</tt> has no internal capacity. |
932 |
* |
933 |
* @param o the element to remove |
934 |
* @return <tt>false</tt> |
935 |
*/ |
936 |
public boolean remove(Object o) { |
937 |
return false; |
938 |
} |
939 |
|
940 |
/** |
941 |
* Returns <tt>false</tt> unless given collection is empty. |
942 |
* A <tt>SynchronousQueue</tt> has no internal capacity. |
943 |
* @param c the collection |
944 |
* @return <tt>false</tt> unless given collection is empty |
945 |
*/ |
946 |
public boolean containsAll(Collection<?> c) { |
947 |
return c.isEmpty(); |
948 |
} |
949 |
|
950 |
/** |
951 |
* Always returns <tt>false</tt>. |
952 |
* A <tt>SynchronousQueue</tt> has no internal capacity. |
953 |
* @param c the collection |
954 |
* @return <tt>false</tt> |
955 |
*/ |
956 |
public boolean removeAll(Collection<?> c) { |
957 |
return false; |
958 |
} |
959 |
|
960 |
/** |
961 |
* Always returns <tt>false</tt>. |
962 |
* A <tt>SynchronousQueue</tt> has no internal capacity. |
963 |
* @param c the collection |
964 |
* @return <tt>false</tt> |
965 |
*/ |
966 |
public boolean retainAll(Collection<?> c) { |
967 |
return false; |
968 |
} |
969 |
|
970 |
/** |
971 |
* Always returns <tt>null</tt>. |
972 |
* A <tt>SynchronousQueue</tt> does not return elements |
973 |
* unless actively waited on. |
974 |
* @return <tt>null</tt> |
975 |
*/ |
976 |
public E peek() { |
977 |
return null; |
978 |
} |
979 |
|
980 |
static class EmptyIterator<E> implements Iterator<E> { |
981 |
public boolean hasNext() { |
982 |
return false; |
983 |
} |
984 |
public E next() { |
985 |
throw new NoSuchElementException(); |
986 |
} |
987 |
public void remove() { |
988 |
throw new IllegalStateException(); |
989 |
} |
990 |
} |
991 |
|
992 |
/** |
993 |
* Returns an empty iterator in which <tt>hasNext</tt> always returns |
994 |
* <tt>false</tt>. |
995 |
* |
996 |
* @return an empty iterator |
997 |
*/ |
998 |
public Iterator<E> iterator() { |
999 |
return new EmptyIterator<E>(); |
1000 |
} |
1001 |
|
1002 |
/** |
1003 |
* Returns a zero-length array. |
1004 |
* @return a zero-length array |
1005 |
*/ |
1006 |
public Object[] toArray() { |
1007 |
return new Object[0]; |
1008 |
} |
1009 |
|
1010 |
/** |
1011 |
* Sets the zeroeth element of the specified array to <tt>null</tt> |
1012 |
* (if the array has non-zero length) and returns it. |
1013 |
* |
1014 |
* @param a the array |
1015 |
* @return the specified array |
1016 |
* @throws NullPointerException if the specified array is null |
1017 |
*/ |
1018 |
public <T> T[] toArray(T[] a) { |
1019 |
if (a.length > 0) |
1020 |
a[0] = null; |
1021 |
return a; |
1022 |
} |
1023 |
|
1024 |
/** |
1025 |
* @throws UnsupportedOperationException {@inheritDoc} |
1026 |
* @throws ClassCastException {@inheritDoc} |
1027 |
* @throws NullPointerException {@inheritDoc} |
1028 |
* @throws IllegalArgumentException {@inheritDoc} |
1029 |
*/ |
1030 |
public int drainTo(Collection<? super E> c) { |
1031 |
if (c == null) |
1032 |
throw new NullPointerException(); |
1033 |
if (c == this) |
1034 |
throw new IllegalArgumentException(); |
1035 |
int n = 0; |
1036 |
E e; |
1037 |
while ( (e = poll()) != null) { |
1038 |
c.add(e); |
1039 |
++n; |
1040 |
} |
1041 |
return n; |
1042 |
} |
1043 |
|
1044 |
/** |
1045 |
* @throws UnsupportedOperationException {@inheritDoc} |
1046 |
* @throws ClassCastException {@inheritDoc} |
1047 |
* @throws NullPointerException {@inheritDoc} |
1048 |
* @throws IllegalArgumentException {@inheritDoc} |
1049 |
*/ |
1050 |
public int drainTo(Collection<? super E> c, int maxElements) { |
1051 |
if (c == null) |
1052 |
throw new NullPointerException(); |
1053 |
if (c == this) |
1054 |
throw new IllegalArgumentException(); |
1055 |
int n = 0; |
1056 |
E e; |
1057 |
while (n < maxElements && (e = poll()) != null) { |
1058 |
c.add(e); |
1059 |
++n; |
1060 |
} |
1061 |
return n; |
1062 |
} |
1063 |
|
1064 |
/* |
1065 |
* To cope with serialization strategy in the 1.5 version of |
1066 |
* SynchronousQueue, we declare some unused classes and fields |
1067 |
* that exist solely to enable serializability across versions. |
1068 |
* These fields are never used, so are initialized only if this |
1069 |
* object is ever serialized or deserialized. |
1070 |
*/ |
1071 |
|
1072 |
static class WaitQueue implements java.io.Serializable { } |
1073 |
static class LifoWaitQueue extends WaitQueue { |
1074 |
private static final long serialVersionUID = -3633113410248163686L; |
1075 |
} |
1076 |
static class FifoWaitQueue extends WaitQueue { |
1077 |
private static final long serialVersionUID = -3623113410248163686L; |
1078 |
} |
1079 |
private ReentrantLock qlock; |
1080 |
private WaitQueue waitingProducers; |
1081 |
private WaitQueue waitingConsumers; |
1082 |
|
1083 |
/** |
1084 |
* Save the state to a stream (that is, serialize it). |
1085 |
* |
1086 |
* @param s the stream |
1087 |
*/ |
1088 |
private void writeObject(java.io.ObjectOutputStream s) |
1089 |
throws java.io.IOException { |
1090 |
boolean fair = transferer instanceof TransferQueue; |
1091 |
if (fair) { |
1092 |
qlock = new ReentrantLock(true); |
1093 |
waitingProducers = new FifoWaitQueue(); |
1094 |
waitingConsumers = new FifoWaitQueue(); |
1095 |
} |
1096 |
else { |
1097 |
qlock = new ReentrantLock(); |
1098 |
waitingProducers = new LifoWaitQueue(); |
1099 |
waitingConsumers = new LifoWaitQueue(); |
1100 |
} |
1101 |
s.defaultWriteObject(); |
1102 |
} |
1103 |
|
1104 |
private void readObject(final java.io.ObjectInputStream s) |
1105 |
throws java.io.IOException, ClassNotFoundException { |
1106 |
s.defaultReadObject(); |
1107 |
if (waitingProducers instanceof FifoWaitQueue) |
1108 |
transferer = new TransferQueue(); |
1109 |
else |
1110 |
transferer = new TransferStack(); |
1111 |
} |
1112 |
|
1113 |
} |