5 |
|
*/ |
6 |
|
|
7 |
|
package jsr166y; |
8 |
+ |
|
9 |
|
import java.util.concurrent.*; |
9 |
– |
import java.util.concurrent.locks.*; |
10 |
– |
import java.util.concurrent.atomic.*; |
11 |
– |
import java.util.*; |
12 |
– |
import java.io.*; |
10 |
|
|
11 |
+ |
import java.util.AbstractQueue; |
12 |
+ |
import java.util.Collection; |
13 |
+ |
import java.util.ConcurrentModificationException; |
14 |
+ |
import java.util.Iterator; |
15 |
+ |
import java.util.NoSuchElementException; |
16 |
+ |
import java.util.Queue; |
17 |
+ |
import java.util.concurrent.locks.LockSupport; |
18 |
|
/** |
19 |
< |
* An unbounded {@linkplain TransferQueue} based on linked nodes. |
19 |
> |
* An unbounded {@link TransferQueue} based on linked nodes. |
20 |
|
* This queue orders elements FIFO (first-in-first-out) with respect |
21 |
|
* to any given producer. The <em>head</em> of the queue is that |
22 |
|
* element that has been on the queue the longest time for some |
23 |
|
* producer. The <em>tail</em> of the queue is that element that has |
24 |
|
* been on the queue the shortest time for some producer. |
25 |
|
* |
26 |
< |
* <p>Beware that, unlike in most collections, the <tt>size</tt> |
26 |
> |
* <p>Beware that, unlike in most collections, the {@code size} |
27 |
|
* method is <em>NOT</em> a constant-time operation. Because of the |
28 |
|
* asynchronous nature of these queues, determining the current number |
29 |
|
* of elements requires a traversal of the elements. |
43 |
|
* <a href="{@docRoot}/../technotes/guides/collections/index.html"> |
44 |
|
* Java Collections Framework</a>. |
45 |
|
* |
46 |
< |
* @since 1.5 |
46 |
> |
* @since 1.7 |
47 |
|
* @author Doug Lea |
48 |
|
* @param <E> the type of elements held in this collection |
45 |
– |
* |
49 |
|
*/ |
50 |
|
public class LinkedTransferQueue<E> extends AbstractQueue<E> |
51 |
|
implements TransferQueue<E>, java.io.Serializable { |
52 |
|
private static final long serialVersionUID = -3223113410248163686L; |
53 |
|
|
54 |
|
/* |
55 |
< |
* This is still a work in prgress... |
55 |
> |
* *** Overview of Dual Queues with Slack *** |
56 |
> |
* |
57 |
> |
* Dual Queues, introduced by Scherer and Scott |
58 |
> |
* (http://www.cs.rice.edu/~wns1/papers/2004-DISC-DDS.pdf) are |
59 |
> |
* (linked) queues in which nodes may represent either data or |
60 |
> |
* requests. When a thread tries to enqueue a data node, but |
61 |
> |
* encounters a request node, it instead "matches" and removes it; |
62 |
> |
* and vice versa for enqueuing requests. Blocking Dual Queues |
63 |
> |
* arrange that threads enqueuing unmatched requests block until |
64 |
> |
* other threads provide the match. Dual Synchronous Queues (see |
65 |
> |
* Scherer, Lea, & Scott |
66 |
> |
* http://www.cs.rochester.edu/u/scott/papers/2009_Scherer_CACM_SSQ.pdf) |
67 |
> |
* additionally arrange that threads enqueuing unmatched data also |
68 |
> |
* block. Dual Transfer Queues support all of these modes, as |
69 |
> |
* dictated by callers. |
70 |
> |
* |
71 |
> |
* A FIFO dual queue may be implemented using a variation of the |
72 |
> |
* Michael & Scott (M&S) lock-free queue algorithm |
73 |
> |
* (http://www.cs.rochester.edu/u/scott/papers/1996_PODC_queues.pdf). |
74 |
> |
* It maintains two pointer fields, "head", pointing to a |
75 |
> |
* (matched) node that in turn points to the first actual |
76 |
> |
* (unmatched) queue node (or null if empty); and "tail" that |
77 |
> |
* points to the last node on the queue (or again null if |
78 |
> |
* empty). For example, here is a possible queue with four data |
79 |
> |
* elements: |
80 |
> |
* |
81 |
> |
* head tail |
82 |
> |
* | | |
83 |
> |
* v v |
84 |
> |
* M -> U -> U -> U -> U |
85 |
> |
* |
86 |
> |
* The M&S queue algorithm is known to be prone to scalability and |
87 |
> |
* overhead limitations when maintaining (via CAS) these head and |
88 |
> |
* tail pointers. This has led to the development of |
89 |
> |
* contention-reducing variants such as elimination arrays (see |
90 |
> |
* Moir et al http://portal.acm.org/citation.cfm?id=1074013) and |
91 |
> |
* optimistic back pointers (see Ladan-Mozes & Shavit |
92 |
> |
* http://people.csail.mit.edu/edya/publications/OptimisticFIFOQueue-journal.pdf). |
93 |
> |
* However, the nature of dual queues enables a simpler tactic for |
94 |
> |
* improving M&S-style implementations when dual-ness is needed. |
95 |
> |
* |
96 |
> |
* In a dual queue, each node must atomically maintain its match |
97 |
> |
* status. While there are other possible variants, we implement |
98 |
> |
* this here as: for a data-mode node, matching entails CASing an |
99 |
> |
* "item" field from a non-null data value to null upon match, and |
100 |
> |
* vice-versa for request nodes, CASing from null to a data |
101 |
> |
* value. (Note that the linearization properties of this style of |
102 |
> |
* queue are easy to verify -- elements are made available by |
103 |
> |
* linking, and unavailable by matching.) Compared to plain M&S |
104 |
> |
* queues, this property of dual queues requires one additional |
105 |
> |
* successful atomic operation per enq/deq pair. But it also |
106 |
> |
* enables lower cost variants of queue maintenance mechanics. (A |
107 |
> |
* variation of this idea applies even for non-dual queues that |
108 |
> |
* support deletion of interior elements, such as |
109 |
> |
* j.u.c.ConcurrentLinkedQueue.) |
110 |
> |
* |
111 |
> |
* Once a node is matched, its match status can never again |
112 |
> |
* change. We may thus arrange that the linked list of them |
113 |
> |
* contain a prefix of zero or more matched nodes, followed by a |
114 |
> |
* suffix of zero or more unmatched nodes. (Note that we allow |
115 |
> |
* both the prefix and suffix to be zero length, which in turn |
116 |
> |
* means that we do not use a dummy header.) If we were not |
117 |
> |
* concerned with either time or space efficiency, we could |
118 |
> |
* correctly perform enqueue and dequeue operations by traversing |
119 |
> |
* from a pointer to the initial node; CASing the item of the |
120 |
> |
* first unmatched node on match and CASing the next field of the |
121 |
> |
* trailing node on appends. (Plus some special-casing when |
122 |
> |
* initially empty). While this would be a terrible idea in |
123 |
> |
* itself, it does have the benefit of not requiring ANY atomic |
124 |
> |
* updates on head/tail fields. |
125 |
> |
* |
126 |
> |
* We introduce here an approach that lies between the extremes of |
127 |
> |
* never versus always updating queue (head and tail) pointers. |
128 |
> |
* This offers a tradeoff between sometimes requiring extra |
129 |
> |
* traversal steps to locate the first and/or last unmatched |
130 |
> |
* nodes, versus the reduced overhead and contention of fewer |
131 |
> |
* updates to queue pointers. For example, a possible snapshot of |
132 |
> |
* a queue is: |
133 |
> |
* |
134 |
> |
* head tail |
135 |
> |
* | | |
136 |
> |
* v v |
137 |
> |
* M -> M -> U -> U -> U -> U |
138 |
> |
* |
139 |
> |
* The best value for this "slack" (the targeted maximum distance |
140 |
> |
* between the value of "head" and the first unmatched node, and |
141 |
> |
* similarly for "tail") is an empirical matter. We have found |
142 |
> |
* that using very small constants in the range of 1-3 work best |
143 |
> |
* over a range of platforms. Larger values introduce increasing |
144 |
> |
* costs of cache misses and risks of long traversal chains, while |
145 |
> |
* smaller values increase CAS contention and overhead. |
146 |
> |
* |
147 |
> |
* Dual queues with slack differ from plain M&S dual queues by |
148 |
> |
* virtue of only sometimes updating head or tail pointers when |
149 |
> |
* matching, appending, or even traversing nodes; in order to |
150 |
> |
* maintain a targeted slack. The idea of "sometimes" may be |
151 |
> |
* operationalized in several ways. The simplest is to use a |
152 |
> |
* per-operation counter incremented on each traversal step, and |
153 |
> |
* to try (via CAS) to update the associated queue pointer |
154 |
> |
* whenever the count exceeds a threshold. Another, that requires |
155 |
> |
* more overhead, is to use random number generators to update |
156 |
> |
* with a given probability per traversal step. |
157 |
> |
* |
158 |
> |
* In any strategy along these lines, because CASes updating |
159 |
> |
* fields may fail, the actual slack may exceed targeted |
160 |
> |
* slack. However, they may be retried at any time to maintain |
161 |
> |
* targets. Even when using very small slack values, this |
162 |
> |
* approach works well for dual queues because it allows all |
163 |
> |
* operations up to the point of matching or appending an item |
164 |
> |
* (hence potentially allowing progress by another thread) to be |
165 |
> |
* read-only, thus not introducing any further contention. As |
166 |
> |
* described below, we implement this by performing slack |
167 |
> |
* maintenance retries only after these points. |
168 |
> |
* |
169 |
> |
* As an accompaniment to such techniques, traversal overhead can |
170 |
> |
* be further reduced without increasing contention of head |
171 |
> |
* pointer updates: Threads may sometimes shortcut the "next" link |
172 |
> |
* path from the current "head" node to be closer to the currently |
173 |
> |
* known first unmatched node, and similarly for tail. Again, this |
174 |
> |
* may be triggered with using thresholds or randomization. |
175 |
> |
* |
176 |
> |
* These ideas must be further extended to avoid unbounded amounts |
177 |
> |
* of costly-to-reclaim garbage caused by the sequential "next" |
178 |
> |
* links of nodes starting at old forgotten head nodes: As first |
179 |
> |
* described in detail by Boehm |
180 |
> |
* (http://portal.acm.org/citation.cfm?doid=503272.503282) if a GC |
181 |
> |
* delays noticing that any arbitrarily old node has become |
182 |
> |
* garbage, all newer dead nodes will also be unreclaimed. |
183 |
> |
* (Similar issues arise in non-GC environments.) To cope with |
184 |
> |
* this in our implementation, upon CASing to advance the head |
185 |
> |
* pointer, we set the "next" link of the previous head to point |
186 |
> |
* only to itself; thus limiting the length of connected dead lists. |
187 |
> |
* (We also take similar care to wipe out possibly garbage |
188 |
> |
* retaining values held in other Node fields.) However, doing so |
189 |
> |
* adds some further complexity to traversal: If any "next" |
190 |
> |
* pointer links to itself, it indicates that the current thread |
191 |
> |
* has lagged behind a head-update, and so the traversal must |
192 |
> |
* continue from the "head". Traversals trying to find the |
193 |
> |
* current tail starting from "tail" may also encounter |
194 |
> |
* self-links, in which case they also continue at "head". |
195 |
> |
* |
196 |
> |
* It is tempting in slack-based scheme to not even use CAS for |
197 |
> |
* updates (similarly to Ladan-Mozes & Shavit). However, this |
198 |
> |
* cannot be done for head updates under the above link-forgetting |
199 |
> |
* mechanics because an update may leave head at a detached node. |
200 |
> |
* And while direct writes are possible for tail updates, they |
201 |
> |
* increase the risk of long retraversals, and hence long garbage |
202 |
> |
* chains, which can be much more costly than is worthwhile |
203 |
> |
* considering that the cost difference of performing a CAS vs |
204 |
> |
* write is smaller when they are not triggered on each operation |
205 |
> |
* (especially considering that writes and CASes equally require |
206 |
> |
* additional GC bookkeeping ("write barriers") that are sometimes |
207 |
> |
* more costly than the writes themselves because of contention). |
208 |
> |
* |
209 |
> |
* Removal of interior nodes (due to timed out or interrupted |
210 |
> |
* waits, or calls to remove(x) or Iterator.remove) can use a |
211 |
> |
* scheme roughly similar to that described in Scherer, Lea, and |
212 |
> |
* Scott's SynchronousQueue. Given a predecessor, we can unsplice |
213 |
> |
* any node except the (actual) tail of the queue. To avoid |
214 |
> |
* build-up of cancelled trailing nodes, upon a request to remove |
215 |
> |
* a trailing node, it is placed in field "cleanMe" to be |
216 |
> |
* unspliced upon the next call to unsplice any other node. |
217 |
> |
* Situations needing such mechanics are not common but do occur |
218 |
> |
* in practice; for example when an unbounded series of short |
219 |
> |
* timed calls to poll repeatedly time out but never otherwise |
220 |
> |
* fall off the list because of an untimed call to take at the |
221 |
> |
* front of the queue. Note that maintaining field cleanMe does |
222 |
> |
* not otherwise much impact garbage retention even if never |
223 |
> |
* cleared by some other call because the held node will |
224 |
> |
* eventually either directly or indirectly lead to a self-link |
225 |
> |
* once off the list. |
226 |
> |
* |
227 |
> |
* *** Overview of implementation *** |
228 |
|
* |
229 |
< |
* This class extends the approach used in FIFO-mode |
230 |
< |
* SynchronousQueues. See the internal documentation, as well as |
231 |
< |
* the PPoPP 2006 paper "Scalable Synchronous Queues" by Scherer, |
232 |
< |
* Lea & Scott |
233 |
< |
* (http://www.cs.rice.edu/~wns1/papers/2006-PPoPP-SQ.pdf) |
229 |
> |
* We use a threshold-based approach to updates, with a slack |
230 |
> |
* threshold of two -- that is, we update head/tail when the |
231 |
> |
* current pointer appears to be two or more steps away from the |
232 |
> |
* first/last node. The slack value is hard-wired: a path greater |
233 |
> |
* than one is naturally implemented by checking equality of |
234 |
> |
* traversal pointers except when the list has only one element, |
235 |
> |
* in which case we keep slack threshold at one. Avoiding tracking |
236 |
> |
* explicit counts across method calls slightly simplifies an |
237 |
> |
* already-messy implementation. Using randomization would |
238 |
> |
* probably work better if there were a low-quality dirt-cheap |
239 |
> |
* per-thread one available, but even ThreadLocalRandom is too |
240 |
> |
* heavy for these purposes. |
241 |
|
* |
242 |
< |
* The main extension is to provide different Wait modes |
243 |
< |
* for the main "xfer" method that puts or takes items. |
244 |
< |
* These don't impact the basic dual-queue logic, but instead |
245 |
< |
* control whether or how threads block upon insertion |
246 |
< |
* of request or data nodes into the dual queue. |
242 |
> |
* With such a small slack threshold value, it is rarely |
243 |
> |
* worthwhile to augment this with path short-circuiting; i.e., |
244 |
> |
* unsplicing nodes between head and the first unmatched node, or |
245 |
> |
* similarly for tail, rather than advancing head or tail |
246 |
> |
* proper. However, it is used (in awaitMatch) immediately before |
247 |
> |
* a waiting thread starts to block, as a final bit of helping at |
248 |
> |
* a point when contention with others is extremely unlikely |
249 |
> |
* (since if other threads that could release it are operating, |
250 |
> |
* then the current thread wouldn't be blocking). |
251 |
> |
* |
252 |
> |
* We allow both the head and tail fields to be null before any |
253 |
> |
* nodes are enqueued; initializing upon first append. This |
254 |
> |
* simplifies some other logic, as well as providing more |
255 |
> |
* efficient explicit control paths instead of letting JVMs insert |
256 |
> |
* implicit NullPointerExceptions when they are null. While not |
257 |
> |
* currently fully implemented, we also leave open the possibility |
258 |
> |
* of re-nulling these fields when empty (which is complicated to |
259 |
> |
* arrange, for little benefit.) |
260 |
> |
* |
261 |
> |
* All enqueue/dequeue operations are handled by the single method |
262 |
> |
* "xfer" with parameters indicating whether to act as some form |
263 |
> |
* of offer, put, poll, take, or transfer (each possibly with |
264 |
> |
* timeout). The relative complexity of using one monolithic |
265 |
> |
* method outweighs the code bulk and maintenance problems of |
266 |
> |
* using separate methods for each case. |
267 |
> |
* |
268 |
> |
* Operation consists of up to three phases. The first is |
269 |
> |
* implemented within method xfer, the second in tryAppend, and |
270 |
> |
* the third in method awaitMatch. |
271 |
> |
* |
272 |
> |
* 1. Try to match an existing node |
273 |
> |
* |
274 |
> |
* Starting at head, skip already-matched nodes until finding |
275 |
> |
* an unmatched node of opposite mode, if one exists, in which |
276 |
> |
* case matching it and returning, also if necessary updating |
277 |
> |
* head to one past the matched node (or the node itself if the |
278 |
> |
* list has no other unmatched nodes). If the CAS misses, then |
279 |
> |
* a loop retries advancing head by two steps until either |
280 |
> |
* success or the slack is at most two. By requiring that each |
281 |
> |
* attempt advances head by two (if applicable), we ensure that |
282 |
> |
* the slack does not grow without bound. Traversals also check |
283 |
> |
* if the initial head is now off-list, in which case they |
284 |
> |
* start at the new head. |
285 |
> |
* |
286 |
> |
* If no candidates are found and the call was untimed |
287 |
> |
* poll/offer, (argument "how" is NOW) return. |
288 |
> |
* |
289 |
> |
* 2. Try to append a new node (method tryAppend) |
290 |
> |
* |
291 |
> |
* Starting at current tail pointer, find the actual last node |
292 |
> |
* and try to append a new node (or if head was null, establish |
293 |
> |
* the first node). Nodes can be appended only if their |
294 |
> |
* predecessors are either already matched or are of the same |
295 |
> |
* mode. If we detect otherwise, then a new node with opposite |
296 |
> |
* mode must have been appended during traversal, so we must |
297 |
> |
* restart at phase 1. The traversal and update steps are |
298 |
> |
* otherwise similar to phase 1: Retrying upon CAS misses and |
299 |
> |
* checking for staleness. In particular, if a self-link is |
300 |
> |
* encountered, then we can safely jump to a node on the list |
301 |
> |
* by continuing the traversal at current head. |
302 |
> |
* |
303 |
> |
* On successful append, if the call was ASYNC, return. |
304 |
> |
* |
305 |
> |
* 3. Await match or cancellation (method awaitMatch) |
306 |
> |
* |
307 |
> |
* Wait for another thread to match node; instead cancelling if |
308 |
> |
* the current thread was interrupted or the wait timed out. On |
309 |
> |
* multiprocessors, we use front-of-queue spinning: If a node |
310 |
> |
* appears to be the first unmatched node in the queue, it |
311 |
> |
* spins a bit before blocking. In either case, before blocking |
312 |
> |
* it tries to unsplice any nodes between the current "head" |
313 |
> |
* and the first unmatched node. |
314 |
> |
* |
315 |
> |
* Front-of-queue spinning vastly improves performance of |
316 |
> |
* heavily contended queues. And so long as it is relatively |
317 |
> |
* brief and "quiet", spinning does not much impact performance |
318 |
> |
* of less-contended queues. During spins threads check their |
319 |
> |
* interrupt status and generate a thread-local random number |
320 |
> |
* to decide to occasionally perform a Thread.yield. While |
321 |
> |
* yield has underdefined specs, we assume that might it help, |
322 |
> |
* and will not hurt in limiting impact of spinning on busy |
323 |
> |
* systems. We also use smaller (1/2) spins for nodes that are |
324 |
> |
* not known to be front but whose predecessors have not |
325 |
> |
* blocked -- these "chained" spins avoid artifacts of |
326 |
> |
* front-of-queue rules which otherwise lead to alternating |
327 |
> |
* nodes spinning vs blocking. Further, front threads that |
328 |
> |
* represent phase changes (from data to request node or vice |
329 |
> |
* versa) compared to their predecessors receive additional |
330 |
> |
* chained spins, reflecting longer paths typically required to |
331 |
> |
* unblock threads during phase changes. |
332 |
|
*/ |
333 |
|
|
334 |
< |
// Wait modes for xfer method |
335 |
< |
static final int NOWAIT = 0; |
336 |
< |
static final int TIMEOUT = 1; |
70 |
< |
static final int WAIT = 2; |
71 |
< |
|
72 |
< |
/** The number of CPUs, for spin control */ |
73 |
< |
static final int NCPUS = Runtime.getRuntime().availableProcessors(); |
334 |
> |
/** True if on multiprocessor */ |
335 |
> |
private static final boolean MP = |
336 |
> |
Runtime.getRuntime().availableProcessors() > 1; |
337 |
|
|
338 |
|
/** |
339 |
< |
* The number of times to spin before blocking in timed waits. |
340 |
< |
* The value is empirically derived -- it works well across a |
341 |
< |
* variety of processors and OSes. Empirically, the best value |
342 |
< |
* seems not to vary with number of CPUs (beyond 2) so is just |
343 |
< |
* a constant. |
339 |
> |
* The number of times to spin (with randomly interspersed calls |
340 |
> |
* to Thread.yield) on multiprocessor before blocking when a node |
341 |
> |
* is apparently the first waiter in the queue. See above for |
342 |
> |
* explanation. Must be a power of two. The value is empirically |
343 |
> |
* derived -- it works pretty well across a variety of processors, |
344 |
> |
* numbers of CPUs, and OSes. |
345 |
|
*/ |
346 |
< |
static final int maxTimedSpins = (NCPUS < 2)? 0 : 32; |
346 |
> |
private static final int FRONT_SPINS = 1 << 7; |
347 |
|
|
348 |
|
/** |
349 |
< |
* The number of times to spin before blocking in untimed waits. |
350 |
< |
* This is greater than timed value because untimed waits spin |
351 |
< |
* faster since they don't need to check times on each spin. |
349 |
> |
* The number of times to spin before blocking when a node is |
350 |
> |
* preceded by another node that is apparently spinning. Also |
351 |
> |
* serves as an increment to FRONT_SPINS on phase changes, and as |
352 |
> |
* base average frequency for yielding during spins. Must be a |
353 |
> |
* power of two. |
354 |
|
*/ |
355 |
< |
static final int maxUntimedSpins = maxTimedSpins * 16; |
355 |
> |
private static final int CHAINED_SPINS = FRONT_SPINS >>> 1; |
356 |
|
|
357 |
|
/** |
358 |
< |
* The number of nanoseconds for which it is faster to spin |
359 |
< |
* rather than to use timed park. A rough estimate suffices. |
358 |
> |
* Queue nodes. Uses Object, not E, for items to allow forgetting |
359 |
> |
* them after use. Relies heavily on Unsafe mechanics to minimize |
360 |
> |
* unnecessary ordering constraints: Writes that intrinsically |
361 |
> |
* precede or follow CASes use simple relaxed forms. Other |
362 |
> |
* cleanups use releasing/lazy writes. |
363 |
|
*/ |
364 |
< |
static final long spinForTimeoutThreshold = 1000L; |
365 |
< |
|
366 |
< |
/** |
367 |
< |
* Node class for LinkedTransferQueue. Opportunistically subclasses from |
368 |
< |
* AtomicReference to represent item. Uses Object, not E, to allow |
369 |
< |
* setting item to "this" after use, to avoid garbage |
370 |
< |
* retention. Similarly, setting the next field to this is used as |
371 |
< |
* sentinel that node is off list. |
372 |
< |
*/ |
373 |
< |
static final class QNode extends AtomicReference<Object> { |
374 |
< |
volatile QNode next; |
375 |
< |
volatile Thread waiter; // to control park/unpark |
376 |
< |
final boolean isData; |
377 |
< |
QNode(Object item, boolean isData) { |
378 |
< |
super(item); |
364 |
> |
static final class Node<E> { |
365 |
> |
final boolean isData; // false if this is a request node |
366 |
> |
volatile Object item; // initially non-null if isData; CASed to match |
367 |
> |
volatile Node<E> next; |
368 |
> |
volatile Thread waiter; // null until waiting |
369 |
> |
|
370 |
> |
// CAS methods for fields |
371 |
> |
final boolean casNext(Node<E> cmp, Node<E> val) { |
372 |
> |
return UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val); |
373 |
> |
} |
374 |
> |
|
375 |
> |
final boolean casItem(Object cmp, Object val) { |
376 |
> |
assert cmp == null || cmp.getClass() != Node.class; |
377 |
> |
return UNSAFE.compareAndSwapObject(this, itemOffset, cmp, val); |
378 |
> |
} |
379 |
> |
|
380 |
> |
/** |
381 |
> |
* Creates a new node. Uses relaxed write because item can only |
382 |
> |
* be seen if followed by CAS. |
383 |
> |
*/ |
384 |
> |
Node(E item, boolean isData) { |
385 |
> |
UNSAFE.putObject(this, itemOffset, item); // relaxed write |
386 |
|
this.isData = isData; |
387 |
|
} |
388 |
|
|
389 |
< |
static final AtomicReferenceFieldUpdater<QNode, QNode> |
390 |
< |
nextUpdater = AtomicReferenceFieldUpdater.newUpdater |
391 |
< |
(QNode.class, QNode.class, "next"); |
392 |
< |
|
393 |
< |
boolean casNext(QNode cmp, QNode val) { |
394 |
< |
return nextUpdater.compareAndSet(this, cmp, val); |
389 |
> |
/** |
390 |
> |
* Links node to itself to avoid garbage retention. Called |
391 |
> |
* only after CASing head field, so uses relaxed write. |
392 |
> |
*/ |
393 |
> |
final void forgetNext() { |
394 |
> |
UNSAFE.putObject(this, nextOffset, this); |
395 |
> |
} |
396 |
> |
|
397 |
> |
/** |
398 |
> |
* Sets item to self (using a releasing/lazy write) and waiter |
399 |
> |
* to null, to avoid garbage retention after extracting or |
400 |
> |
* cancelling. |
401 |
> |
*/ |
402 |
> |
final void forgetContents() { |
403 |
> |
UNSAFE.putOrderedObject(this, itemOffset, this); |
404 |
> |
UNSAFE.putOrderedObject(this, waiterOffset, null); |
405 |
> |
} |
406 |
> |
|
407 |
> |
/** |
408 |
> |
* Returns true if this node has been matched, including the |
409 |
> |
* case of artificial matches due to cancellation. |
410 |
> |
*/ |
411 |
> |
final boolean isMatched() { |
412 |
> |
Object x = item; |
413 |
> |
return x == this || (x != null) != isData; |
414 |
> |
} |
415 |
> |
|
416 |
> |
/** |
417 |
> |
* Returns true if a node with the given mode cannot be |
418 |
> |
* appended to this node because this node is unmatched and |
419 |
> |
* has opposite data mode. |
420 |
> |
*/ |
421 |
> |
final boolean cannotPrecede(boolean haveData) { |
422 |
> |
boolean d = isData; |
423 |
> |
Object x; |
424 |
> |
return d != haveData && (x = item) != this && (x != null) == d; |
425 |
> |
} |
426 |
> |
|
427 |
> |
/** |
428 |
> |
* Tries to artificially match a data node -- used by remove. |
429 |
> |
*/ |
430 |
> |
final boolean tryMatchData() { |
431 |
> |
Object x = item; |
432 |
> |
if (x != null && x != this && casItem(x, null)) { |
433 |
> |
LockSupport.unpark(waiter); |
434 |
> |
return true; |
435 |
> |
} |
436 |
> |
return false; |
437 |
|
} |
120 |
– |
} |
438 |
|
|
439 |
< |
/** |
440 |
< |
* Padded version of AtomicReference used for head, tail and |
441 |
< |
* cleanMe, to alleviate contention across threads CASing one vs |
442 |
< |
* the other. |
443 |
< |
*/ |
444 |
< |
static final class PaddedAtomicReference<T> extends AtomicReference<T> { |
445 |
< |
// enough padding for 64bytes with 4byte refs |
446 |
< |
Object p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pa, pb, pc, pd, pe; |
447 |
< |
PaddedAtomicReference(T r) { super(r); } |
439 |
> |
// Unsafe mechanics |
440 |
> |
private static final sun.misc.Unsafe UNSAFE = getUnsafe(); |
441 |
> |
private static final long nextOffset = |
442 |
> |
objectFieldOffset(UNSAFE, "next", Node.class); |
443 |
> |
private static final long itemOffset = |
444 |
> |
objectFieldOffset(UNSAFE, "item", Node.class); |
445 |
> |
private static final long waiterOffset = |
446 |
> |
objectFieldOffset(UNSAFE, "waiter", Node.class); |
447 |
> |
|
448 |
> |
private static final long serialVersionUID = -3375979862319811754L; |
449 |
|
} |
450 |
|
|
451 |
+ |
/** head of the queue; null until first enqueue */ |
452 |
+ |
transient volatile Node<E> head; |
453 |
|
|
454 |
< |
private final QNode dummy = new QNode(null, false); |
455 |
< |
private final PaddedAtomicReference<QNode> head = |
136 |
< |
new PaddedAtomicReference<QNode>(dummy); |
137 |
< |
private final PaddedAtomicReference<QNode> tail = |
138 |
< |
new PaddedAtomicReference<QNode>(dummy); |
454 |
> |
/** predecessor of dangling unspliceable node */ |
455 |
> |
private transient volatile Node<E> cleanMe; // decl here reduces contention |
456 |
|
|
457 |
< |
/** |
458 |
< |
* Reference to a cancelled node that might not yet have been |
142 |
< |
* unlinked from queue because it was the last inserted node |
143 |
< |
* when it cancelled. |
144 |
< |
*/ |
145 |
< |
private final PaddedAtomicReference<QNode> cleanMe = |
146 |
< |
new PaddedAtomicReference<QNode>(null); |
457 |
> |
/** tail of the queue; null until first append */ |
458 |
> |
private transient volatile Node<E> tail; |
459 |
|
|
460 |
< |
/** |
461 |
< |
* Tries to cas nh as new head; if successful, unlink |
462 |
< |
* old head's next node to avoid garbage retention. |
460 |
> |
// CAS methods for fields |
461 |
> |
private boolean casTail(Node<E> cmp, Node<E> val) { |
462 |
> |
return UNSAFE.compareAndSwapObject(this, tailOffset, cmp, val); |
463 |
> |
} |
464 |
> |
|
465 |
> |
private boolean casHead(Node<E> cmp, Node<E> val) { |
466 |
> |
return UNSAFE.compareAndSwapObject(this, headOffset, cmp, val); |
467 |
> |
} |
468 |
> |
|
469 |
> |
private boolean casCleanMe(Node<E> cmp, Node<E> val) { |
470 |
> |
return UNSAFE.compareAndSwapObject(this, cleanMeOffset, cmp, val); |
471 |
> |
} |
472 |
> |
|
473 |
> |
/* |
474 |
> |
* Possible values for "how" argument in xfer method. Beware that |
475 |
> |
* the order of assigned numerical values matters. |
476 |
|
*/ |
477 |
< |
private boolean advanceHead(QNode h, QNode nh) { |
478 |
< |
if (h == head.get() && head.compareAndSet(h, nh)) { |
479 |
< |
h.next = h; // forget old next |
480 |
< |
return true; |
481 |
< |
} |
482 |
< |
return false; |
477 |
> |
private static final int NOW = 0; // for untimed poll, tryTransfer |
478 |
> |
private static final int ASYNC = 1; // for offer, put, add |
479 |
> |
private static final int SYNC = 2; // for transfer, take |
480 |
> |
private static final int TIMEOUT = 3; // for timed poll, tryTransfer |
481 |
> |
|
482 |
> |
@SuppressWarnings("unchecked") |
483 |
> |
static <E> E cast(Object item) { |
484 |
> |
assert item == null || item.getClass() != Node.class; |
485 |
> |
return (E) item; |
486 |
|
} |
487 |
< |
|
487 |
> |
|
488 |
|
/** |
489 |
< |
* Puts or takes an item. Used for most queue operations (except |
490 |
< |
* poll() and tryTransfer()) |
491 |
< |
* @param e the item or if null, signfies that this is a take |
492 |
< |
* @param mode the wait mode: NOWAIT, TIMEOUT, WAIT |
489 |
> |
* Implements all queuing methods. See above for explanation. |
490 |
> |
* |
491 |
> |
* @param e the item or null for take |
492 |
> |
* @param haveData true if this is a put, else a take |
493 |
> |
* @param how NOW, ASYNC, SYNC, or TIMEOUT |
494 |
|
* @param nanos timeout in nanosecs, used only if mode is TIMEOUT |
495 |
< |
* @return an item, or null on failure |
495 |
> |
* @return an item if matched, else e |
496 |
> |
* @throws NullPointerException if haveData mode but e is null |
497 |
|
*/ |
498 |
< |
private Object xfer(Object e, int mode, long nanos) { |
499 |
< |
boolean isData = (e != null); |
500 |
< |
QNode s = null; |
501 |
< |
final PaddedAtomicReference<QNode> head = this.head; |
172 |
< |
final PaddedAtomicReference<QNode> tail = this.tail; |
498 |
> |
private E xfer(E e, boolean haveData, int how, long nanos) { |
499 |
> |
if (haveData && (e == null)) |
500 |
> |
throw new NullPointerException(); |
501 |
> |
Node<E> s = null; // the node to append, if needed |
502 |
|
|
503 |
< |
for (;;) { |
175 |
< |
QNode t = tail.get(); |
176 |
< |
QNode h = head.get(); |
503 |
> |
retry: for (;;) { // restart on append race |
504 |
|
|
505 |
< |
if (t != null && (t == h || t.isData == isData)) { |
506 |
< |
if (s == null) |
507 |
< |
s = new QNode(e, isData); |
508 |
< |
QNode last = t.next; |
509 |
< |
if (last != null) { |
510 |
< |
if (t == tail.get()) |
511 |
< |
tail.compareAndSet(t, last); |
512 |
< |
} |
513 |
< |
else if (t.casNext(null, s)) { |
514 |
< |
tail.compareAndSet(t, s); |
515 |
< |
return awaitFulfill(t, s, e, mode, nanos); |
516 |
< |
} |
517 |
< |
} |
518 |
< |
|
519 |
< |
else if (h != null) { |
520 |
< |
QNode first = h.next; |
521 |
< |
if (t == tail.get() && first != null && |
522 |
< |
advanceHead(h, first)) { |
523 |
< |
Object x = first.get(); |
524 |
< |
if (x != first && first.compareAndSet(x, e)) { |
525 |
< |
LockSupport.unpark(first.waiter); |
526 |
< |
return isData? e : x; |
505 |
> |
for (Node<E> h = head, p = h; p != null;) { |
506 |
> |
// find & match first node |
507 |
> |
boolean isData = p.isData; |
508 |
> |
Object item = p.item; |
509 |
> |
if (item != p && (item != null) == isData) { // unmatched |
510 |
> |
if (isData == haveData) // can't match |
511 |
> |
break; |
512 |
> |
if (p.casItem(item, e)) { // match |
513 |
> |
for (Node<E> q = p; q != h;) { |
514 |
> |
Node<E> n = q.next; // update head by 2 |
515 |
> |
if (n != null) // unless singleton |
516 |
> |
q = n; |
517 |
> |
if (head == h && casHead(h, q)) { |
518 |
> |
h.forgetNext(); |
519 |
> |
break; |
520 |
> |
} // advance and retry |
521 |
> |
if ((h = head) == null || |
522 |
> |
(q = h.next) == null || !q.isMatched()) |
523 |
> |
break; // unless slack < 2 |
524 |
> |
} |
525 |
> |
LockSupport.unpark(p.waiter); |
526 |
> |
return this.<E>cast(item); |
527 |
|
} |
528 |
|
} |
529 |
+ |
Node<E> n = p.next; |
530 |
+ |
p = (p != n) ? n : (h = head); // Use head if p offlist |
531 |
+ |
} |
532 |
+ |
|
533 |
+ |
if (how >= ASYNC) { // No matches available |
534 |
+ |
if (s == null) |
535 |
+ |
s = new Node<E>(e, haveData); |
536 |
+ |
Node<E> pred = tryAppend(s, haveData); |
537 |
+ |
if (pred == null) |
538 |
+ |
continue retry; // lost race vs opposite mode |
539 |
+ |
if (how >= SYNC) |
540 |
+ |
return awaitMatch(s, pred, e, how, nanos); |
541 |
|
} |
542 |
+ |
return e; // not waiting |
543 |
|
} |
544 |
|
} |
545 |
|
|
206 |
– |
|
546 |
|
/** |
547 |
< |
* Version of xfer for poll() and tryTransfer, which |
548 |
< |
* simpifies control paths both here and in xfer |
547 |
> |
* Tries to append node s as tail. |
548 |
> |
* |
549 |
> |
* @param s the node to append |
550 |
> |
* @param haveData true if appending in data mode |
551 |
> |
* @return null on failure due to losing race with append in |
552 |
> |
* different mode, else s's predecessor, or s itself if no |
553 |
> |
* predecessor |
554 |
|
*/ |
555 |
< |
private Object fulfill(Object e) { |
556 |
< |
boolean isData = (e != null); |
557 |
< |
final PaddedAtomicReference<QNode> head = this.head; |
558 |
< |
final PaddedAtomicReference<QNode> tail = this.tail; |
559 |
< |
|
560 |
< |
for (;;) { |
217 |
< |
QNode t = tail.get(); |
218 |
< |
QNode h = head.get(); |
219 |
< |
|
220 |
< |
if (t != null && (t == h || t.isData == isData)) { |
221 |
< |
QNode last = t.next; |
222 |
< |
if (t == tail.get()) { |
223 |
< |
if (last != null) |
224 |
< |
tail.compareAndSet(t, last); |
225 |
< |
else |
226 |
< |
return null; |
227 |
< |
} |
555 |
> |
private Node<E> tryAppend(Node<E> s, boolean haveData) { |
556 |
> |
for (Node<E> t = tail, p = t;;) { // move p to last node and append |
557 |
> |
Node<E> n, u; // temps for reads of next & tail |
558 |
> |
if (p == null && (p = head) == null) { |
559 |
> |
if (casHead(null, s)) |
560 |
> |
return s; // initialize |
561 |
|
} |
562 |
< |
else if (h != null) { |
563 |
< |
QNode first = h.next; |
564 |
< |
if (t == tail.get() && |
565 |
< |
first != null && |
566 |
< |
advanceHead(h, first)) { |
567 |
< |
Object x = first.get(); |
568 |
< |
if (x != first && first.compareAndSet(x, e)) { |
569 |
< |
LockSupport.unpark(first.waiter); |
570 |
< |
return isData? e : x; |
571 |
< |
} |
562 |
> |
else if (p.cannotPrecede(haveData)) |
563 |
> |
return null; // lost race vs opposite mode |
564 |
> |
else if ((n = p.next) != null) // not last; keep traversing |
565 |
> |
p = p != t && t != (u = tail) ? (t = u) : // stale tail |
566 |
> |
(p != n) ? n : null; // restart if off list |
567 |
> |
else if (!p.casNext(null, s)) |
568 |
> |
p = p.next; // re-read on CAS failure |
569 |
> |
else { |
570 |
> |
if (p != t) { // update if slack now >= 2 |
571 |
> |
while ((tail != t || !casTail(t, s)) && |
572 |
> |
(t = tail) != null && |
573 |
> |
(s = t.next) != null && // advance and retry |
574 |
> |
(s = s.next) != null && s != t); |
575 |
|
} |
576 |
+ |
return p; |
577 |
|
} |
578 |
|
} |
579 |
|
} |
580 |
|
|
581 |
|
/** |
582 |
< |
* Spins/blocks until node s is fulfilled or caller gives up, |
246 |
< |
* depending on wait mode. |
582 |
> |
* Spins/yields/blocks until node s is matched or caller gives up. |
583 |
|
* |
248 |
– |
* @param pred the predecessor of waiting node |
584 |
|
* @param s the waiting node |
585 |
+ |
* @param pred the predecessor of s, or s itself if it has no |
586 |
+ |
* predecessor, or null if unknown (the null case does not occur |
587 |
+ |
* in any current calls but may in possible future extensions) |
588 |
|
* @param e the comparison value for checking match |
589 |
< |
* @param mode mode |
589 |
> |
* @param how either SYNC or TIMEOUT |
590 |
|
* @param nanos timeout value |
591 |
< |
* @return matched item, or s if cancelled |
591 |
> |
* @return matched item, or e if unmatched on interrupt or timeout |
592 |
|
*/ |
593 |
< |
private Object awaitFulfill(QNode pred, QNode s, Object e, |
594 |
< |
int mode, long nanos) { |
257 |
< |
if (mode == NOWAIT) |
258 |
< |
return null; |
259 |
< |
|
260 |
< |
long lastTime = (mode == TIMEOUT)? System.nanoTime() : 0; |
593 |
> |
private E awaitMatch(Node<E> s, Node<E> pred, E e, int how, long nanos) { |
594 |
> |
long lastTime = (how == TIMEOUT) ? System.nanoTime() : 0L; |
595 |
|
Thread w = Thread.currentThread(); |
596 |
< |
int spins = -1; // set to desired spin count below |
596 |
> |
int spins = -1; // initialized after first item and cancel checks |
597 |
> |
ThreadLocalRandom randomYields = null; // bound if needed |
598 |
> |
|
599 |
|
for (;;) { |
600 |
< |
if (w.isInterrupted()) |
601 |
< |
s.compareAndSet(e, s); |
602 |
< |
Object x = s.get(); |
603 |
< |
if (x != e) { // Node was matched or cancelled |
604 |
< |
advanceHead(pred, s); // unlink if head |
605 |
< |
if (x == s) // was cancelled |
606 |
< |
return clean(pred, s); |
607 |
< |
else if (x != null) { |
608 |
< |
s.set(s); // avoid garbage retention |
609 |
< |
return x; |
274 |
< |
} |
275 |
< |
else |
276 |
< |
return e; |
600 |
> |
Object item = s.item; |
601 |
> |
if (item != e) { // matched |
602 |
> |
assert item != s; |
603 |
> |
s.forgetContents(); // avoid garbage |
604 |
> |
return this.<E>cast(item); |
605 |
> |
} |
606 |
> |
if ((w.isInterrupted() || (how == TIMEOUT && nanos <= 0)) && |
607 |
> |
s.casItem(e, s)) { // cancel |
608 |
> |
unsplice(pred, s); |
609 |
> |
return e; |
610 |
|
} |
611 |
|
|
612 |
< |
if (mode == TIMEOUT) { |
612 |
> |
if (spins < 0) { // establish spins at/near front |
613 |
> |
if ((spins = spinsFor(pred, s.isData)) > 0) |
614 |
> |
randomYields = ThreadLocalRandom.current(); |
615 |
> |
} |
616 |
> |
else if (spins > 0) { // spin |
617 |
> |
if (--spins == 0) |
618 |
> |
shortenHeadPath(); // reduce slack before blocking |
619 |
> |
else if (randomYields.nextInt(CHAINED_SPINS) == 0) |
620 |
> |
Thread.yield(); // occasionally yield |
621 |
> |
} |
622 |
> |
else if (s.waiter == null) { |
623 |
> |
s.waiter = w; // request unpark then recheck |
624 |
> |
} |
625 |
> |
else if (how == TIMEOUT) { |
626 |
|
long now = System.nanoTime(); |
627 |
< |
nanos -= now - lastTime; |
627 |
> |
if ((nanos -= now - lastTime) > 0) |
628 |
> |
LockSupport.parkNanos(this, nanos); |
629 |
|
lastTime = now; |
283 |
– |
if (nanos <= 0) { |
284 |
– |
s.compareAndSet(e, s); // try to cancel |
285 |
– |
continue; |
286 |
– |
} |
630 |
|
} |
631 |
< |
if (spins < 0) { |
632 |
< |
QNode h = head.get(); // only spin if at head |
290 |
< |
spins = ((h != null && h.next == s) ? |
291 |
< |
(mode == TIMEOUT? |
292 |
< |
maxTimedSpins : maxUntimedSpins) : 0); |
293 |
< |
} |
294 |
< |
if (spins > 0) |
295 |
< |
--spins; |
296 |
< |
else if (s.waiter == null) |
297 |
< |
s.waiter = w; |
298 |
< |
else if (mode != TIMEOUT) { |
299 |
< |
// LockSupport.park(this); |
300 |
< |
LockSupport.park(); // allows run on java5 |
631 |
> |
else { |
632 |
> |
LockSupport.park(this); |
633 |
|
s.waiter = null; |
634 |
< |
spins = -1; |
634 |
> |
spins = -1; // spin if front upon wakeup |
635 |
|
} |
636 |
< |
else if (nanos > spinForTimeoutThreshold) { |
637 |
< |
// LockSupport.parkNanos(this, nanos); |
638 |
< |
LockSupport.parkNanos(nanos); |
639 |
< |
s.waiter = null; |
640 |
< |
spins = -1; |
636 |
> |
} |
637 |
> |
} |
638 |
> |
|
639 |
> |
/** |
640 |
> |
* Returns spin/yield value for a node with given predecessor and |
641 |
> |
* data mode. See above for explanation. |
642 |
> |
*/ |
643 |
> |
private static int spinsFor(Node<?> pred, boolean haveData) { |
644 |
> |
if (MP && pred != null) { |
645 |
> |
if (pred.isData != haveData) // phase change |
646 |
> |
return FRONT_SPINS + CHAINED_SPINS; |
647 |
> |
if (pred.isMatched()) // probably at front |
648 |
> |
return FRONT_SPINS; |
649 |
> |
if (pred.waiter == null) // pred apparently spinning |
650 |
> |
return CHAINED_SPINS; |
651 |
> |
} |
652 |
> |
return 0; |
653 |
> |
} |
654 |
> |
|
655 |
> |
/** |
656 |
> |
* Tries (once) to unsplice nodes between head and first unmatched |
657 |
> |
* or trailing node; failing on contention. |
658 |
> |
*/ |
659 |
> |
private void shortenHeadPath() { |
660 |
> |
Node<E> h, hn, p, q; |
661 |
> |
if ((p = h = head) != null && h.isMatched() && |
662 |
> |
(q = hn = h.next) != null) { |
663 |
> |
Node<E> n; |
664 |
> |
while ((n = q.next) != q) { |
665 |
> |
if (n == null || !q.isMatched()) { |
666 |
> |
if (hn != q && h.next == hn) |
667 |
> |
h.casNext(hn, q); |
668 |
> |
break; |
669 |
> |
} |
670 |
> |
p = q; |
671 |
> |
q = n; |
672 |
|
} |
673 |
|
} |
674 |
|
} |
675 |
|
|
676 |
+ |
/* -------------- Traversal methods -------------- */ |
677 |
+ |
|
678 |
|
/** |
679 |
< |
* Gets rid of cancelled node s with original predecessor pred. |
680 |
< |
* @return null (to simplify use by callers) |
679 |
> |
* Returns the first unmatched node of the given mode, or null if |
680 |
> |
* none. Used by methods isEmpty, hasWaitingConsumer. |
681 |
|
*/ |
682 |
< |
private Object clean(QNode pred, QNode s) { |
683 |
< |
Thread w = s.waiter; |
684 |
< |
if (w != null) { // Wake up thread |
685 |
< |
s.waiter = null; |
686 |
< |
if (w != Thread.currentThread()) |
687 |
< |
LockSupport.unpark(w); |
682 |
> |
private Node<E> firstOfMode(boolean data) { |
683 |
> |
for (Node<E> p = head; p != null; ) { |
684 |
> |
if (!p.isMatched()) |
685 |
> |
return (p.isData == data) ? p : null; |
686 |
> |
Node<E> n = p.next; |
687 |
> |
p = (n != p) ? n : head; |
688 |
|
} |
689 |
< |
|
690 |
< |
for (;;) { |
691 |
< |
if (pred.next != s) // already cleaned |
692 |
< |
return null; |
693 |
< |
QNode h = head.get(); |
694 |
< |
QNode hn = h.next; // Absorb cancelled first node as head |
695 |
< |
if (hn != null && hn.next == hn) { |
696 |
< |
advanceHead(h, hn); |
697 |
< |
continue; |
698 |
< |
} |
699 |
< |
QNode t = tail.get(); // Ensure consistent read for tail |
700 |
< |
if (t == h) |
701 |
< |
return null; |
702 |
< |
QNode tn = t.next; |
703 |
< |
if (t != tail.get()) |
704 |
< |
continue; |
705 |
< |
if (tn != null) { // Help advance tail |
706 |
< |
tail.compareAndSet(t, tn); |
707 |
< |
continue; |
708 |
< |
} |
709 |
< |
if (s != t) { // If not tail, try to unsplice |
710 |
< |
QNode sn = s.next; |
711 |
< |
if (sn == s || pred.casNext(s, sn)) |
712 |
< |
return null; |
713 |
< |
} |
714 |
< |
QNode dp = cleanMe.get(); |
715 |
< |
if (dp != null) { // Try unlinking previous cancelled node |
716 |
< |
QNode d = dp.next; |
717 |
< |
QNode dn; |
718 |
< |
if (d == null || // d is gone or |
719 |
< |
d == dp || // d is off list or |
720 |
< |
d.get() != d || // d not cancelled or |
721 |
< |
(d != t && // d not tail and |
722 |
< |
(dn = d.next) != null && // has successor |
723 |
< |
dn != d && // that is on list |
724 |
< |
dp.casNext(d, dn))) // d unspliced |
725 |
< |
cleanMe.compareAndSet(dp, null); |
726 |
< |
if (dp == pred) |
727 |
< |
return null; // s is already saved node |
728 |
< |
} |
729 |
< |
else if (cleanMe.compareAndSet(null, pred)) |
730 |
< |
return null; // Postpone cleaning s |
689 |
> |
return null; |
690 |
> |
} |
691 |
> |
|
692 |
> |
/** |
693 |
> |
* Returns the item in the first unmatched node with isData; or |
694 |
> |
* null if none. Used by peek. |
695 |
> |
*/ |
696 |
> |
private E firstDataItem() { |
697 |
> |
for (Node<E> p = head; p != null; ) { |
698 |
> |
boolean isData = p.isData; |
699 |
> |
Object item = p.item; |
700 |
> |
if (item != p && (item != null) == isData) |
701 |
> |
return isData ? this.<E>cast(item) : null; |
702 |
> |
Node<E> n = p.next; |
703 |
> |
p = (n != p) ? n : head; |
704 |
> |
} |
705 |
> |
return null; |
706 |
> |
} |
707 |
> |
|
708 |
> |
/** |
709 |
> |
* Traverses and counts unmatched nodes of the given mode. |
710 |
> |
* Used by methods size and getWaitingConsumerCount. |
711 |
> |
*/ |
712 |
> |
private int countOfMode(boolean data) { |
713 |
> |
int count = 0; |
714 |
> |
for (Node<E> p = head; p != null; ) { |
715 |
> |
if (!p.isMatched()) { |
716 |
> |
if (p.isData != data) |
717 |
> |
return 0; |
718 |
> |
if (++count == Integer.MAX_VALUE) // saturated |
719 |
> |
break; |
720 |
> |
} |
721 |
> |
Node<E> n = p.next; |
722 |
> |
if (n != p) |
723 |
> |
p = n; |
724 |
> |
else { |
725 |
> |
count = 0; |
726 |
> |
p = head; |
727 |
> |
} |
728 |
> |
} |
729 |
> |
return count; |
730 |
> |
} |
731 |
> |
|
732 |
> |
final class Itr implements Iterator<E> { |
733 |
> |
private Node<E> nextNode; // next node to return item for |
734 |
> |
private E nextItem; // the corresponding item |
735 |
> |
private Node<E> lastRet; // last returned node, to support remove |
736 |
> |
|
737 |
> |
/** |
738 |
> |
* Moves to next node after prev, or first node if prev null. |
739 |
> |
*/ |
740 |
> |
private void advance(Node<E> prev) { |
741 |
> |
lastRet = prev; |
742 |
> |
Node<E> p; |
743 |
> |
if (prev == null || (p = prev.next) == prev) |
744 |
> |
p = head; |
745 |
> |
while (p != null) { |
746 |
> |
Object item = p.item; |
747 |
> |
if (p.isData) { |
748 |
> |
if (item != null && item != p) { |
749 |
> |
nextItem = LinkedTransferQueue.this.<E>cast(item); |
750 |
> |
nextNode = p; |
751 |
> |
return; |
752 |
> |
} |
753 |
> |
} |
754 |
> |
else if (item == null) |
755 |
> |
break; |
756 |
> |
Node<E> n = p.next; |
757 |
> |
p = (n != p) ? n : head; |
758 |
> |
} |
759 |
> |
nextNode = null; |
760 |
> |
} |
761 |
> |
|
762 |
> |
Itr() { |
763 |
> |
advance(null); |
764 |
> |
} |
765 |
> |
|
766 |
> |
public final boolean hasNext() { |
767 |
> |
return nextNode != null; |
768 |
> |
} |
769 |
> |
|
770 |
> |
public final E next() { |
771 |
> |
Node<E> p = nextNode; |
772 |
> |
if (p == null) throw new NoSuchElementException(); |
773 |
> |
E e = nextItem; |
774 |
> |
advance(p); |
775 |
> |
return e; |
776 |
> |
} |
777 |
> |
|
778 |
> |
public final void remove() { |
779 |
> |
Node<E> p = lastRet; |
780 |
> |
if (p == null) throw new IllegalStateException(); |
781 |
> |
lastRet = null; |
782 |
> |
findAndRemoveNode(p); |
783 |
> |
} |
784 |
> |
} |
785 |
> |
|
786 |
> |
/* -------------- Removal methods -------------- */ |
787 |
> |
|
788 |
> |
/** |
789 |
> |
* Unsplices (now or later) the given deleted/cancelled node with |
790 |
> |
* the given predecessor. |
791 |
> |
* |
792 |
> |
* @param pred predecessor of node to be unspliced |
793 |
> |
* @param s the node to be unspliced |
794 |
> |
*/ |
795 |
> |
private void unsplice(Node<E> pred, Node<E> s) { |
796 |
> |
s.forgetContents(); // clear unneeded fields |
797 |
> |
/* |
798 |
> |
* At any given time, exactly one node on list cannot be |
799 |
> |
* unlinked -- the last inserted node. To accommodate this, if |
800 |
> |
* we cannot unlink s, we save its predecessor as "cleanMe", |
801 |
> |
* processing the previously saved version first. Because only |
802 |
> |
* one node in the list can have a null next, at least one of |
803 |
> |
* node s or the node previously saved can always be |
804 |
> |
* processed, so this always terminates. |
805 |
> |
*/ |
806 |
> |
if (pred != null && pred != s) { |
807 |
> |
while (pred.next == s) { |
808 |
> |
Node<E> oldpred = (cleanMe == null) ? null : reclean(); |
809 |
> |
Node<E> n = s.next; |
810 |
> |
if (n != null) { |
811 |
> |
if (n != s) |
812 |
> |
pred.casNext(s, n); |
813 |
> |
break; |
814 |
> |
} |
815 |
> |
if (oldpred == pred || // Already saved |
816 |
> |
(oldpred == null && casCleanMe(null, pred))) |
817 |
> |
break; // Postpone cleaning |
818 |
> |
} |
819 |
> |
} |
820 |
> |
} |
821 |
> |
|
822 |
> |
/** |
823 |
> |
* Tries to unsplice the deleted/cancelled node held in cleanMe |
824 |
> |
* that was previously uncleanable because it was at tail. |
825 |
> |
* |
826 |
> |
* @return current cleanMe node (or null) |
827 |
> |
*/ |
828 |
> |
private Node<E> reclean() { |
829 |
> |
/* |
830 |
> |
* cleanMe is, or at one time was, predecessor of a cancelled |
831 |
> |
* node s that was the tail so could not be unspliced. If it |
832 |
> |
* is no longer the tail, try to unsplice if necessary and |
833 |
> |
* make cleanMe slot available. This differs from similar |
834 |
> |
* code in unsplice() because we must check that pred still |
835 |
> |
* points to a matched node that can be unspliced -- if not, |
836 |
> |
* we can (must) clear cleanMe without unsplicing. This can |
837 |
> |
* loop only due to contention. |
838 |
> |
*/ |
839 |
> |
Node<E> pred; |
840 |
> |
while ((pred = cleanMe) != null) { |
841 |
> |
Node<E> s = pred.next; |
842 |
> |
Node<E> n; |
843 |
> |
if (s == null || s == pred || !s.isMatched()) |
844 |
> |
casCleanMe(pred, null); // already gone |
845 |
> |
else if ((n = s.next) != null) { |
846 |
> |
if (n != s) |
847 |
> |
pred.casNext(s, n); |
848 |
> |
casCleanMe(pred, null); |
849 |
> |
} |
850 |
> |
else |
851 |
> |
break; |
852 |
|
} |
853 |
+ |
return pred; |
854 |
|
} |
855 |
< |
|
855 |
> |
|
856 |
|
/** |
857 |
< |
* Creates an initially empty <tt>LinkedTransferQueue</tt>. |
857 |
> |
* Main implementation of Iterator.remove(). Find |
858 |
> |
* and unsplice the given node. |
859 |
> |
*/ |
860 |
> |
final void findAndRemoveNode(Node<E> s) { |
861 |
> |
if (s.tryMatchData()) { |
862 |
> |
Node<E> pred = null; |
863 |
> |
Node<E> p = head; |
864 |
> |
while (p != null) { |
865 |
> |
if (p == s) { |
866 |
> |
unsplice(pred, p); |
867 |
> |
break; |
868 |
> |
} |
869 |
> |
if (!p.isData && !p.isMatched()) |
870 |
> |
break; |
871 |
> |
pred = p; |
872 |
> |
if ((p = p.next) == pred) { // stale |
873 |
> |
pred = null; |
874 |
> |
p = head; |
875 |
> |
} |
876 |
> |
} |
877 |
> |
} |
878 |
> |
} |
879 |
> |
|
880 |
> |
/** |
881 |
> |
* Main implementation of remove(Object) |
882 |
> |
*/ |
883 |
> |
private boolean findAndRemove(Object e) { |
884 |
> |
if (e != null) { |
885 |
> |
Node<E> pred = null; |
886 |
> |
Node<E> p = head; |
887 |
> |
while (p != null) { |
888 |
> |
Object item = p.item; |
889 |
> |
if (p.isData) { |
890 |
> |
if (item != null && item != p && e.equals(item) && |
891 |
> |
p.tryMatchData()) { |
892 |
> |
unsplice(pred, p); |
893 |
> |
return true; |
894 |
> |
} |
895 |
> |
} |
896 |
> |
else if (item == null) |
897 |
> |
break; |
898 |
> |
pred = p; |
899 |
> |
if ((p = p.next) == pred) { |
900 |
> |
pred = null; |
901 |
> |
p = head; |
902 |
> |
} |
903 |
> |
} |
904 |
> |
} |
905 |
> |
return false; |
906 |
> |
} |
907 |
> |
|
908 |
> |
|
909 |
> |
/** |
910 |
> |
* Creates an initially empty {@code LinkedTransferQueue}. |
911 |
|
*/ |
912 |
|
public LinkedTransferQueue() { |
913 |
|
} |
914 |
|
|
915 |
|
/** |
916 |
< |
* Creates a <tt>LinkedTransferQueue</tt> |
916 |
> |
* Creates a {@code LinkedTransferQueue} |
917 |
|
* initially containing the elements of the given collection, |
918 |
|
* added in traversal order of the collection's iterator. |
919 |
+ |
* |
920 |
|
* @param c the collection of elements to initially contain |
921 |
|
* @throws NullPointerException if the specified collection or any |
922 |
|
* of its elements are null |
923 |
|
*/ |
924 |
|
public LinkedTransferQueue(Collection<? extends E> c) { |
925 |
+ |
this(); |
926 |
|
addAll(c); |
927 |
|
} |
928 |
|
|
929 |
< |
public void put(E e) throws InterruptedException { |
930 |
< |
if (e == null) throw new NullPointerException(); |
931 |
< |
if (Thread.interrupted()) throw new InterruptedException(); |
932 |
< |
xfer(e, NOWAIT, 0); |
929 |
> |
/** |
930 |
> |
* Inserts the specified element at the tail of this queue. |
931 |
> |
* As the queue is unbounded, this method will never block. |
932 |
> |
* |
933 |
> |
* @throws NullPointerException if the specified element is null |
934 |
> |
*/ |
935 |
> |
public void put(E e) { |
936 |
> |
xfer(e, true, ASYNC, 0); |
937 |
|
} |
938 |
|
|
939 |
< |
public boolean offer(E e, long timeout, TimeUnit unit) |
940 |
< |
throws InterruptedException { |
941 |
< |
if (e == null) throw new NullPointerException(); |
942 |
< |
if (Thread.interrupted()) throw new InterruptedException(); |
943 |
< |
xfer(e, NOWAIT, 0); |
939 |
> |
/** |
940 |
> |
* Inserts the specified element at the tail of this queue. |
941 |
> |
* As the queue is unbounded, this method will never block or |
942 |
> |
* return {@code false}. |
943 |
> |
* |
944 |
> |
* @return {@code true} (as specified by |
945 |
> |
* {@link BlockingQueue#offer(Object,long,TimeUnit) BlockingQueue.offer}) |
946 |
> |
* @throws NullPointerException if the specified element is null |
947 |
> |
*/ |
948 |
> |
public boolean offer(E e, long timeout, TimeUnit unit) { |
949 |
> |
xfer(e, true, ASYNC, 0); |
950 |
|
return true; |
951 |
|
} |
952 |
|
|
953 |
+ |
/** |
954 |
+ |
* Inserts the specified element at the tail of this queue. |
955 |
+ |
* As the queue is unbounded, this method will never return {@code false}. |
956 |
+ |
* |
957 |
+ |
* @return {@code true} (as specified by |
958 |
+ |
* {@link BlockingQueue#offer(Object) BlockingQueue.offer}) |
959 |
+ |
* @throws NullPointerException if the specified element is null |
960 |
+ |
*/ |
961 |
|
public boolean offer(E e) { |
962 |
< |
if (e == null) throw new NullPointerException(); |
403 |
< |
xfer(e, NOWAIT, 0); |
962 |
> |
xfer(e, true, ASYNC, 0); |
963 |
|
return true; |
964 |
|
} |
965 |
|
|
966 |
+ |
/** |
967 |
+ |
* Inserts the specified element at the tail of this queue. |
968 |
+ |
* As the queue is unbounded, this method will never throw |
969 |
+ |
* {@link IllegalStateException} or return {@code false}. |
970 |
+ |
* |
971 |
+ |
* @return {@code true} (as specified by {@link Collection#add}) |
972 |
+ |
* @throws NullPointerException if the specified element is null |
973 |
+ |
*/ |
974 |
+ |
public boolean add(E e) { |
975 |
+ |
xfer(e, true, ASYNC, 0); |
976 |
+ |
return true; |
977 |
+ |
} |
978 |
+ |
|
979 |
+ |
/** |
980 |
+ |
* Transfers the element to a waiting consumer immediately, if possible. |
981 |
+ |
* |
982 |
+ |
* <p>More precisely, transfers the specified element immediately |
983 |
+ |
* if there exists a consumer already waiting to receive it (in |
984 |
+ |
* {@link #take} or timed {@link #poll(long,TimeUnit) poll}), |
985 |
+ |
* otherwise returning {@code false} without enqueuing the element. |
986 |
+ |
* |
987 |
+ |
* @throws NullPointerException if the specified element is null |
988 |
+ |
*/ |
989 |
+ |
public boolean tryTransfer(E e) { |
990 |
+ |
return xfer(e, true, NOW, 0) == null; |
991 |
+ |
} |
992 |
+ |
|
993 |
+ |
/** |
994 |
+ |
* Transfers the element to a consumer, waiting if necessary to do so. |
995 |
+ |
* |
996 |
+ |
* <p>More precisely, transfers the specified element immediately |
997 |
+ |
* if there exists a consumer already waiting to receive it (in |
998 |
+ |
* {@link #take} or timed {@link #poll(long,TimeUnit) poll}), |
999 |
+ |
* else inserts the specified element at the tail of this queue |
1000 |
+ |
* and waits until the element is received by a consumer. |
1001 |
+ |
* |
1002 |
+ |
* @throws NullPointerException if the specified element is null |
1003 |
+ |
*/ |
1004 |
|
public void transfer(E e) throws InterruptedException { |
1005 |
< |
if (e == null) throw new NullPointerException(); |
1006 |
< |
if (xfer(e, WAIT, 0) == null) { |
410 |
< |
Thread.interrupted(); |
1005 |
> |
if (xfer(e, true, SYNC, 0) != null) { |
1006 |
> |
Thread.interrupted(); // failure possible only due to interrupt |
1007 |
|
throw new InterruptedException(); |
1008 |
< |
} |
1008 |
> |
} |
1009 |
|
} |
1010 |
|
|
1011 |
+ |
/** |
1012 |
+ |
* Transfers the element to a consumer if it is possible to do so |
1013 |
+ |
* before the timeout elapses. |
1014 |
+ |
* |
1015 |
+ |
* <p>More precisely, transfers the specified element immediately |
1016 |
+ |
* if there exists a consumer already waiting to receive it (in |
1017 |
+ |
* {@link #take} or timed {@link #poll(long,TimeUnit) poll}), |
1018 |
+ |
* else inserts the specified element at the tail of this queue |
1019 |
+ |
* and waits until the element is received by a consumer, |
1020 |
+ |
* returning {@code false} if the specified wait time elapses |
1021 |
+ |
* before the element can be transferred. |
1022 |
+ |
* |
1023 |
+ |
* @throws NullPointerException if the specified element is null |
1024 |
+ |
*/ |
1025 |
|
public boolean tryTransfer(E e, long timeout, TimeUnit unit) |
1026 |
|
throws InterruptedException { |
1027 |
< |
if (e == null) throw new NullPointerException(); |
418 |
< |
if (xfer(e, TIMEOUT, unit.toNanos(timeout)) != null) |
1027 |
> |
if (xfer(e, true, TIMEOUT, unit.toNanos(timeout)) == null) |
1028 |
|
return true; |
1029 |
|
if (!Thread.interrupted()) |
1030 |
|
return false; |
1031 |
|
throw new InterruptedException(); |
1032 |
|
} |
1033 |
|
|
425 |
– |
public boolean tryTransfer(E e) { |
426 |
– |
if (e == null) throw new NullPointerException(); |
427 |
– |
return fulfill(e) != null; |
428 |
– |
} |
429 |
– |
|
1034 |
|
public E take() throws InterruptedException { |
1035 |
< |
Object e = xfer(null, WAIT, 0); |
1035 |
> |
E e = xfer(null, false, SYNC, 0); |
1036 |
|
if (e != null) |
1037 |
< |
return (E)e; |
1038 |
< |
Thread.interrupted(); |
1037 |
> |
return e; |
1038 |
> |
Thread.interrupted(); |
1039 |
|
throw new InterruptedException(); |
1040 |
|
} |
1041 |
|
|
1042 |
|
public E poll(long timeout, TimeUnit unit) throws InterruptedException { |
1043 |
< |
Object e = xfer(null, TIMEOUT, unit.toNanos(timeout)); |
1043 |
> |
E e = xfer(null, false, TIMEOUT, unit.toNanos(timeout)); |
1044 |
|
if (e != null || !Thread.interrupted()) |
1045 |
< |
return (E)e; |
1045 |
> |
return e; |
1046 |
|
throw new InterruptedException(); |
1047 |
|
} |
1048 |
|
|
1049 |
|
public E poll() { |
1050 |
< |
return (E)fulfill(null); |
1050 |
> |
return xfer(null, false, NOW, 0); |
1051 |
|
} |
1052 |
|
|
1053 |
+ |
/** |
1054 |
+ |
* @throws NullPointerException {@inheritDoc} |
1055 |
+ |
* @throws IllegalArgumentException {@inheritDoc} |
1056 |
+ |
*/ |
1057 |
|
public int drainTo(Collection<? super E> c) { |
1058 |
|
if (c == null) |
1059 |
|
throw new NullPointerException(); |
1068 |
|
return n; |
1069 |
|
} |
1070 |
|
|
1071 |
+ |
/** |
1072 |
+ |
* @throws NullPointerException {@inheritDoc} |
1073 |
+ |
* @throws IllegalArgumentException {@inheritDoc} |
1074 |
+ |
*/ |
1075 |
|
public int drainTo(Collection<? super E> c, int maxElements) { |
1076 |
|
if (c == null) |
1077 |
|
throw new NullPointerException(); |
1086 |
|
return n; |
1087 |
|
} |
1088 |
|
|
477 |
– |
// Traversal-based methods |
478 |
– |
|
1089 |
|
/** |
1090 |
< |
* Return head after performing any outstanding helping steps |
1090 |
> |
* Returns an iterator over the elements in this queue in proper |
1091 |
> |
* sequence, from head to tail. |
1092 |
> |
* |
1093 |
> |
* <p>The returned iterator is a "weakly consistent" iterator that |
1094 |
> |
* will never throw |
1095 |
> |
* {@link ConcurrentModificationException ConcurrentModificationException}, |
1096 |
> |
* and guarantees to traverse elements as they existed upon |
1097 |
> |
* construction of the iterator, and may (but is not guaranteed |
1098 |
> |
* to) reflect any modifications subsequent to construction. |
1099 |
> |
* |
1100 |
> |
* @return an iterator over the elements in this queue in proper sequence |
1101 |
|
*/ |
482 |
– |
private QNode traversalHead() { |
483 |
– |
for (;;) { |
484 |
– |
QNode t = tail.get(); |
485 |
– |
QNode h = head.get(); |
486 |
– |
if (h != null && t != null) { |
487 |
– |
QNode last = t.next; |
488 |
– |
QNode first = h.next; |
489 |
– |
if (t == tail.get()) { |
490 |
– |
if (last != null) |
491 |
– |
tail.compareAndSet(t, last); |
492 |
– |
else if (first != null) { |
493 |
– |
Object x = first.get(); |
494 |
– |
if (x == first) |
495 |
– |
advanceHead(h, first); |
496 |
– |
else |
497 |
– |
return h; |
498 |
– |
} |
499 |
– |
else |
500 |
– |
return h; |
501 |
– |
} |
502 |
– |
} |
503 |
– |
} |
504 |
– |
} |
505 |
– |
|
506 |
– |
|
1102 |
|
public Iterator<E> iterator() { |
1103 |
|
return new Itr(); |
1104 |
|
} |
1105 |
|
|
511 |
– |
/** |
512 |
– |
* Iterators. Basic strategy os to travers list, treating |
513 |
– |
* non-data (i.e., request) nodes as terminating list. |
514 |
– |
* Once a valid data node is found, the item is cached |
515 |
– |
* so that the next call to next() will return it even |
516 |
– |
* if subsequently removed. |
517 |
– |
*/ |
518 |
– |
class Itr implements Iterator<E> { |
519 |
– |
QNode nextNode; // Next node to return next |
520 |
– |
QNode currentNode; // last returned node, for remove() |
521 |
– |
QNode prevNode; // predecessor of last returned node |
522 |
– |
E nextItem; // Cache of next item, once commited to in next |
523 |
– |
|
524 |
– |
Itr() { |
525 |
– |
nextNode = traversalHead(); |
526 |
– |
advance(); |
527 |
– |
} |
528 |
– |
|
529 |
– |
E advance() { |
530 |
– |
prevNode = currentNode; |
531 |
– |
currentNode = nextNode; |
532 |
– |
E x = nextItem; |
533 |
– |
|
534 |
– |
QNode p = nextNode.next; |
535 |
– |
for (;;) { |
536 |
– |
if (p == null || !p.isData) { |
537 |
– |
nextNode = null; |
538 |
– |
nextItem = null; |
539 |
– |
return x; |
540 |
– |
} |
541 |
– |
Object item = p.get(); |
542 |
– |
if (item != p && item != null) { |
543 |
– |
nextNode = p; |
544 |
– |
nextItem = (E)item; |
545 |
– |
return x; |
546 |
– |
} |
547 |
– |
prevNode = p; |
548 |
– |
p = p.next; |
549 |
– |
} |
550 |
– |
} |
551 |
– |
|
552 |
– |
public boolean hasNext() { |
553 |
– |
return nextNode != null; |
554 |
– |
} |
555 |
– |
|
556 |
– |
public E next() { |
557 |
– |
if (nextNode == null) throw new NoSuchElementException(); |
558 |
– |
return advance(); |
559 |
– |
} |
560 |
– |
|
561 |
– |
public void remove() { |
562 |
– |
QNode p = currentNode; |
563 |
– |
QNode prev = prevNode; |
564 |
– |
if (prev == null || p == null) |
565 |
– |
throw new IllegalStateException(); |
566 |
– |
Object x = p.get(); |
567 |
– |
if (x != null && x != p && p.compareAndSet(x, p)) |
568 |
– |
clean(prev, p); |
569 |
– |
} |
570 |
– |
} |
571 |
– |
|
1106 |
|
public E peek() { |
1107 |
< |
for (;;) { |
574 |
< |
QNode h = traversalHead(); |
575 |
< |
QNode p = h.next; |
576 |
< |
if (p == null) |
577 |
< |
return null; |
578 |
< |
Object x = p.get(); |
579 |
< |
if (p != x) { |
580 |
< |
if (!p.isData) |
581 |
< |
return null; |
582 |
< |
if (x != null) |
583 |
< |
return (E)x; |
584 |
< |
} |
585 |
< |
} |
1107 |
> |
return firstDataItem(); |
1108 |
|
} |
1109 |
|
|
1110 |
+ |
/** |
1111 |
+ |
* Returns {@code true} if this queue contains no elements. |
1112 |
+ |
* |
1113 |
+ |
* @return {@code true} if this queue contains no elements |
1114 |
+ |
*/ |
1115 |
|
public boolean isEmpty() { |
1116 |
< |
for (;;) { |
590 |
< |
QNode h = traversalHead(); |
591 |
< |
QNode p = h.next; |
592 |
< |
if (p == null) |
593 |
< |
return true; |
594 |
< |
Object x = p.get(); |
595 |
< |
if (p != x) { |
596 |
< |
if (!p.isData) |
597 |
< |
return true; |
598 |
< |
if (x != null) |
599 |
< |
return false; |
600 |
< |
} |
601 |
< |
} |
1116 |
> |
return firstOfMode(true) == null; |
1117 |
|
} |
1118 |
|
|
1119 |
|
public boolean hasWaitingConsumer() { |
1120 |
< |
for (;;) { |
606 |
< |
QNode h = traversalHead(); |
607 |
< |
QNode p = h.next; |
608 |
< |
if (p == null) |
609 |
< |
return false; |
610 |
< |
Object x = p.get(); |
611 |
< |
if (p != x) |
612 |
< |
return !p.isData; |
613 |
< |
} |
1120 |
> |
return firstOfMode(false) != null; |
1121 |
|
} |
1122 |
< |
|
1122 |
> |
|
1123 |
|
/** |
1124 |
|
* Returns the number of elements in this queue. If this queue |
1125 |
< |
* contains more than <tt>Integer.MAX_VALUE</tt> elements, returns |
1126 |
< |
* <tt>Integer.MAX_VALUE</tt>. |
1125 |
> |
* contains more than {@code Integer.MAX_VALUE} elements, returns |
1126 |
> |
* {@code Integer.MAX_VALUE}. |
1127 |
|
* |
1128 |
|
* <p>Beware that, unlike in most collections, this method is |
1129 |
|
* <em>NOT</em> a constant-time operation. Because of the |
1133 |
|
* @return the number of elements in this queue |
1134 |
|
*/ |
1135 |
|
public int size() { |
1136 |
< |
int count = 0; |
630 |
< |
QNode h = traversalHead(); |
631 |
< |
for (QNode p = h.next; p != null && p.isData; p = p.next) { |
632 |
< |
Object x = p.get(); |
633 |
< |
if (x != null && x != p) { |
634 |
< |
if (++count == Integer.MAX_VALUE) // saturated |
635 |
< |
break; |
636 |
< |
} |
637 |
< |
} |
638 |
< |
return count; |
1136 |
> |
return countOfMode(true); |
1137 |
|
} |
1138 |
|
|
1139 |
|
public int getWaitingConsumerCount() { |
1140 |
< |
int count = 0; |
1141 |
< |
QNode h = traversalHead(); |
1142 |
< |
for (QNode p = h.next; p != null && !p.isData; p = p.next) { |
1143 |
< |
if (p.get() == null) { |
1144 |
< |
if (++count == Integer.MAX_VALUE) |
1145 |
< |
break; |
1146 |
< |
} |
1147 |
< |
} |
1148 |
< |
return count; |
1140 |
> |
return countOfMode(false); |
1141 |
> |
} |
1142 |
> |
|
1143 |
> |
/** |
1144 |
> |
* Removes a single instance of the specified element from this queue, |
1145 |
> |
* if it is present. More formally, removes an element {@code e} such |
1146 |
> |
* that {@code o.equals(e)}, if this queue contains one or more such |
1147 |
> |
* elements. |
1148 |
> |
* Returns {@code true} if this queue contained the specified element |
1149 |
> |
* (or equivalently, if this queue changed as a result of the call). |
1150 |
> |
* |
1151 |
> |
* @param o element to be removed from this queue, if present |
1152 |
> |
* @return {@code true} if this queue changed as a result of the call |
1153 |
> |
*/ |
1154 |
> |
public boolean remove(Object o) { |
1155 |
> |
return findAndRemove(o); |
1156 |
|
} |
1157 |
|
|
1158 |
+ |
/** |
1159 |
+ |
* Always returns {@code Integer.MAX_VALUE} because a |
1160 |
+ |
* {@code LinkedTransferQueue} is not capacity constrained. |
1161 |
+ |
* |
1162 |
+ |
* @return {@code Integer.MAX_VALUE} (as specified by |
1163 |
+ |
* {@link BlockingQueue#remainingCapacity()}) |
1164 |
+ |
*/ |
1165 |
|
public int remainingCapacity() { |
1166 |
|
return Integer.MAX_VALUE; |
1167 |
|
} |
1168 |
|
|
1169 |
|
/** |
1170 |
< |
* Save the state to a stream (that is, serialize it). |
1170 |
> |
* Saves the state to a stream (that is, serializes it). |
1171 |
|
* |
1172 |
< |
* @serialData All of the elements (each an <tt>E</tt>) in |
1172 |
> |
* @serialData All of the elements (each an {@code E}) in |
1173 |
|
* the proper order, followed by a null |
1174 |
|
* @param s the stream |
1175 |
|
*/ |
1176 |
|
private void writeObject(java.io.ObjectOutputStream s) |
1177 |
|
throws java.io.IOException { |
1178 |
|
s.defaultWriteObject(); |
1179 |
< |
for (Iterator<E> it = iterator(); it.hasNext(); ) |
1180 |
< |
s.writeObject(it.next()); |
1179 |
> |
for (E e : this) |
1180 |
> |
s.writeObject(e); |
1181 |
|
// Use trailing null as sentinel |
1182 |
|
s.writeObject(null); |
1183 |
|
} |
1184 |
|
|
1185 |
|
/** |
1186 |
< |
* Reconstitute the Queue instance from a stream (that is, |
1187 |
< |
* deserialize it). |
1186 |
> |
* Reconstitutes the Queue instance from a stream (that is, |
1187 |
> |
* deserializes it). |
1188 |
> |
* |
1189 |
|
* @param s the stream |
1190 |
|
*/ |
1191 |
|
private void readObject(java.io.ObjectInputStream s) |
1192 |
|
throws java.io.IOException, ClassNotFoundException { |
1193 |
|
s.defaultReadObject(); |
1194 |
|
for (;;) { |
1195 |
< |
E item = (E)s.readObject(); |
1195 |
> |
@SuppressWarnings("unchecked") E item = (E) s.readObject(); |
1196 |
|
if (item == null) |
1197 |
|
break; |
1198 |
|
else |
1199 |
|
offer(item); |
1200 |
|
} |
1201 |
|
} |
1202 |
+ |
|
1203 |
+ |
// Unsafe mechanics |
1204 |
+ |
|
1205 |
+ |
private static final sun.misc.Unsafe UNSAFE = getUnsafe(); |
1206 |
+ |
private static final long headOffset = |
1207 |
+ |
objectFieldOffset(UNSAFE, "head", LinkedTransferQueue.class); |
1208 |
+ |
private static final long tailOffset = |
1209 |
+ |
objectFieldOffset(UNSAFE, "tail", LinkedTransferQueue.class); |
1210 |
+ |
private static final long cleanMeOffset = |
1211 |
+ |
objectFieldOffset(UNSAFE, "cleanMe", LinkedTransferQueue.class); |
1212 |
+ |
|
1213 |
+ |
static long objectFieldOffset(sun.misc.Unsafe UNSAFE, |
1214 |
+ |
String field, Class<?> klazz) { |
1215 |
+ |
try { |
1216 |
+ |
return UNSAFE.objectFieldOffset(klazz.getDeclaredField(field)); |
1217 |
+ |
} catch (NoSuchFieldException e) { |
1218 |
+ |
// Convert Exception to corresponding Error |
1219 |
+ |
NoSuchFieldError error = new NoSuchFieldError(field); |
1220 |
+ |
error.initCause(e); |
1221 |
+ |
throw error; |
1222 |
+ |
} |
1223 |
+ |
} |
1224 |
+ |
|
1225 |
+ |
/** |
1226 |
+ |
* Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. |
1227 |
+ |
* Replace with a simple call to Unsafe.getUnsafe when integrating |
1228 |
+ |
* into a jdk. |
1229 |
+ |
* |
1230 |
+ |
* @return a sun.misc.Unsafe |
1231 |
+ |
*/ |
1232 |
+ |
static sun.misc.Unsafe getUnsafe() { |
1233 |
+ |
try { |
1234 |
+ |
return sun.misc.Unsafe.getUnsafe(); |
1235 |
+ |
} catch (SecurityException se) { |
1236 |
+ |
try { |
1237 |
+ |
return java.security.AccessController.doPrivileged |
1238 |
+ |
(new java.security |
1239 |
+ |
.PrivilegedExceptionAction<sun.misc.Unsafe>() { |
1240 |
+ |
public sun.misc.Unsafe run() throws Exception { |
1241 |
+ |
java.lang.reflect.Field f = sun.misc |
1242 |
+ |
.Unsafe.class.getDeclaredField("theUnsafe"); |
1243 |
+ |
f.setAccessible(true); |
1244 |
+ |
return (sun.misc.Unsafe) f.get(null); |
1245 |
+ |
}}); |
1246 |
+ |
} catch (java.security.PrivilegedActionException e) { |
1247 |
+ |
throw new RuntimeException("Could not initialize intrinsics", |
1248 |
+ |
e.getCause()); |
1249 |
+ |
} |
1250 |
+ |
} |
1251 |
+ |
} |
1252 |
+ |
|
1253 |
|
} |