10 |
|
|
11 |
|
import java.util.AbstractQueue; |
12 |
|
import java.util.Collection; |
13 |
+ |
import java.util.ConcurrentModificationException; |
14 |
|
import java.util.Iterator; |
15 |
|
import java.util.NoSuchElementException; |
16 |
+ |
import java.util.Queue; |
17 |
|
import java.util.concurrent.locks.LockSupport; |
16 |
– |
import java.util.concurrent.atomic.AtomicReference; |
17 |
– |
import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; |
18 |
– |
|
18 |
|
/** |
19 |
< |
* An unbounded {@linkplain TransferQueue} based on linked nodes. |
19 |
> |
* An unbounded {@link TransferQueue} based on linked nodes. |
20 |
|
* This queue orders elements FIFO (first-in-first-out) with respect |
21 |
|
* to any given producer. The <em>head</em> of the queue is that |
22 |
|
* element that has been on the queue the longest time for some |
52 |
|
private static final long serialVersionUID = -3223113410248163686L; |
53 |
|
|
54 |
|
/* |
55 |
< |
* This class extends the approach used in FIFO-mode |
56 |
< |
* SynchronousQueues. See the internal documentation, as well as |
57 |
< |
* the PPoPP 2006 paper "Scalable Synchronous Queues" by Scherer, |
58 |
< |
* Lea & Scott |
59 |
< |
* (http://www.cs.rice.edu/~wns1/papers/2006-PPoPP-SQ.pdf) |
55 |
> |
* *** Overview of Dual Queues with Slack *** |
56 |
> |
* |
57 |
> |
* Dual Queues, introduced by Scherer and Scott |
58 |
> |
* (http://www.cs.rice.edu/~wns1/papers/2004-DISC-DDS.pdf) are |
59 |
> |
* (linked) queues in which nodes may represent either data or |
60 |
> |
* requests. When a thread tries to enqueue a data node, but |
61 |
> |
* encounters a request node, it instead "matches" and removes it; |
62 |
> |
* and vice versa for enqueuing requests. Blocking Dual Queues |
63 |
> |
* arrange that threads enqueuing unmatched requests block until |
64 |
> |
* other threads provide the match. Dual Synchronous Queues (see |
65 |
> |
* Scherer, Lea, & Scott |
66 |
> |
* http://www.cs.rochester.edu/u/scott/papers/2009_Scherer_CACM_SSQ.pdf) |
67 |
> |
* additionally arrange that threads enqueuing unmatched data also |
68 |
> |
* block. Dual Transfer Queues support all of these modes, as |
69 |
> |
* dictated by callers. |
70 |
> |
* |
71 |
> |
* A FIFO dual queue may be implemented using a variation of the |
72 |
> |
* Michael & Scott (M&S) lock-free queue algorithm |
73 |
> |
* (http://www.cs.rochester.edu/u/scott/papers/1996_PODC_queues.pdf). |
74 |
> |
* It maintains two pointer fields, "head", pointing to a |
75 |
> |
* (matched) node that in turn points to the first actual |
76 |
> |
* (unmatched) queue node (or null if empty); and "tail" that |
77 |
> |
* points to the last node on the queue (or again null if |
78 |
> |
* empty). For example, here is a possible queue with four data |
79 |
> |
* elements: |
80 |
> |
* |
81 |
> |
* head tail |
82 |
> |
* | | |
83 |
> |
* v v |
84 |
> |
* M -> U -> U -> U -> U |
85 |
> |
* |
86 |
> |
* The M&S queue algorithm is known to be prone to scalability and |
87 |
> |
* overhead limitations when maintaining (via CAS) these head and |
88 |
> |
* tail pointers. This has led to the development of |
89 |
> |
* contention-reducing variants such as elimination arrays (see |
90 |
> |
* Moir et al http://portal.acm.org/citation.cfm?id=1074013) and |
91 |
> |
* optimistic back pointers (see Ladan-Mozes & Shavit |
92 |
> |
* http://people.csail.mit.edu/edya/publications/OptimisticFIFOQueue-journal.pdf). |
93 |
> |
* However, the nature of dual queues enables a simpler tactic for |
94 |
> |
* improving M&S-style implementations when dual-ness is needed. |
95 |
> |
* |
96 |
> |
* In a dual queue, each node must atomically maintain its match |
97 |
> |
* status. While there are other possible variants, we implement |
98 |
> |
* this here as: for a data-mode node, matching entails CASing an |
99 |
> |
* "item" field from a non-null data value to null upon match, and |
100 |
> |
* vice-versa for request nodes, CASing from null to a data |
101 |
> |
* value. (Note that the linearization properties of this style of |
102 |
> |
* queue are easy to verify -- elements are made available by |
103 |
> |
* linking, and unavailable by matching.) Compared to plain M&S |
104 |
> |
* queues, this property of dual queues requires one additional |
105 |
> |
* successful atomic operation per enq/deq pair. But it also |
106 |
> |
* enables lower cost variants of queue maintenance mechanics. (A |
107 |
> |
* variation of this idea applies even for non-dual queues that |
108 |
> |
* support deletion of interior elements, such as |
109 |
> |
* j.u.c.ConcurrentLinkedQueue.) |
110 |
> |
* |
111 |
> |
* Once a node is matched, its match status can never again |
112 |
> |
* change. We may thus arrange that the linked list of them |
113 |
> |
* contain a prefix of zero or more matched nodes, followed by a |
114 |
> |
* suffix of zero or more unmatched nodes. (Note that we allow |
115 |
> |
* both the prefix and suffix to be zero length, which in turn |
116 |
> |
* means that we do not use a dummy header.) If we were not |
117 |
> |
* concerned with either time or space efficiency, we could |
118 |
> |
* correctly perform enqueue and dequeue operations by traversing |
119 |
> |
* from a pointer to the initial node; CASing the item of the |
120 |
> |
* first unmatched node on match and CASing the next field of the |
121 |
> |
* trailing node on appends. (Plus some special-casing when |
122 |
> |
* initially empty). While this would be a terrible idea in |
123 |
> |
* itself, it does have the benefit of not requiring ANY atomic |
124 |
> |
* updates on head/tail fields. |
125 |
> |
* |
126 |
> |
* We introduce here an approach that lies between the extremes of |
127 |
> |
* never versus always updating queue (head and tail) pointers. |
128 |
> |
* This offers a tradeoff between sometimes requiring extra |
129 |
> |
* traversal steps to locate the first and/or last unmatched |
130 |
> |
* nodes, versus the reduced overhead and contention of fewer |
131 |
> |
* updates to queue pointers. For example, a possible snapshot of |
132 |
> |
* a queue is: |
133 |
> |
* |
134 |
> |
* head tail |
135 |
> |
* | | |
136 |
> |
* v v |
137 |
> |
* M -> M -> U -> U -> U -> U |
138 |
> |
* |
139 |
> |
* The best value for this "slack" (the targeted maximum distance |
140 |
> |
* between the value of "head" and the first unmatched node, and |
141 |
> |
* similarly for "tail") is an empirical matter. We have found |
142 |
> |
* that using very small constants in the range of 1-3 work best |
143 |
> |
* over a range of platforms. Larger values introduce increasing |
144 |
> |
* costs of cache misses and risks of long traversal chains, while |
145 |
> |
* smaller values increase CAS contention and overhead. |
146 |
> |
* |
147 |
> |
* Dual queues with slack differ from plain M&S dual queues by |
148 |
> |
* virtue of only sometimes updating head or tail pointers when |
149 |
> |
* matching, appending, or even traversing nodes; in order to |
150 |
> |
* maintain a targeted slack. The idea of "sometimes" may be |
151 |
> |
* operationalized in several ways. The simplest is to use a |
152 |
> |
* per-operation counter incremented on each traversal step, and |
153 |
> |
* to try (via CAS) to update the associated queue pointer |
154 |
> |
* whenever the count exceeds a threshold. Another, that requires |
155 |
> |
* more overhead, is to use random number generators to update |
156 |
> |
* with a given probability per traversal step. |
157 |
> |
* |
158 |
> |
* In any strategy along these lines, because CASes updating |
159 |
> |
* fields may fail, the actual slack may exceed targeted |
160 |
> |
* slack. However, they may be retried at any time to maintain |
161 |
> |
* targets. Even when using very small slack values, this |
162 |
> |
* approach works well for dual queues because it allows all |
163 |
> |
* operations up to the point of matching or appending an item |
164 |
> |
* (hence potentially allowing progress by another thread) to be |
165 |
> |
* read-only, thus not introducing any further contention. As |
166 |
> |
* described below, we implement this by performing slack |
167 |
> |
* maintenance retries only after these points. |
168 |
> |
* |
169 |
> |
* As an accompaniment to such techniques, traversal overhead can |
170 |
> |
* be further reduced without increasing contention of head |
171 |
> |
* pointer updates: Threads may sometimes shortcut the "next" link |
172 |
> |
* path from the current "head" node to be closer to the currently |
173 |
> |
* known first unmatched node, and similarly for tail. Again, this |
174 |
> |
* may be triggered with using thresholds or randomization. |
175 |
> |
* |
176 |
> |
* These ideas must be further extended to avoid unbounded amounts |
177 |
> |
* of costly-to-reclaim garbage caused by the sequential "next" |
178 |
> |
* links of nodes starting at old forgotten head nodes: As first |
179 |
> |
* described in detail by Boehm |
180 |
> |
* (http://portal.acm.org/citation.cfm?doid=503272.503282) if a GC |
181 |
> |
* delays noticing that any arbitrarily old node has become |
182 |
> |
* garbage, all newer dead nodes will also be unreclaimed. |
183 |
> |
* (Similar issues arise in non-GC environments.) To cope with |
184 |
> |
* this in our implementation, upon CASing to advance the head |
185 |
> |
* pointer, we set the "next" link of the previous head to point |
186 |
> |
* only to itself; thus limiting the length of connected dead lists. |
187 |
> |
* (We also take similar care to wipe out possibly garbage |
188 |
> |
* retaining values held in other Node fields.) However, doing so |
189 |
> |
* adds some further complexity to traversal: If any "next" |
190 |
> |
* pointer links to itself, it indicates that the current thread |
191 |
> |
* has lagged behind a head-update, and so the traversal must |
192 |
> |
* continue from the "head". Traversals trying to find the |
193 |
> |
* current tail starting from "tail" may also encounter |
194 |
> |
* self-links, in which case they also continue at "head". |
195 |
> |
* |
196 |
> |
* It is tempting in slack-based scheme to not even use CAS for |
197 |
> |
* updates (similarly to Ladan-Mozes & Shavit). However, this |
198 |
> |
* cannot be done for head updates under the above link-forgetting |
199 |
> |
* mechanics because an update may leave head at a detached node. |
200 |
> |
* And while direct writes are possible for tail updates, they |
201 |
> |
* increase the risk of long retraversals, and hence long garbage |
202 |
> |
* chains, which can be much more costly than is worthwhile |
203 |
> |
* considering that the cost difference of performing a CAS vs |
204 |
> |
* write is smaller when they are not triggered on each operation |
205 |
> |
* (especially considering that writes and CASes equally require |
206 |
> |
* additional GC bookkeeping ("write barriers") that are sometimes |
207 |
> |
* more costly than the writes themselves because of contention). |
208 |
> |
* |
209 |
> |
* *** Overview of implementation *** |
210 |
> |
* |
211 |
> |
* We use a threshold-based approach to updates, with a slack |
212 |
> |
* threshold of two -- that is, we update head/tail when the |
213 |
> |
* current pointer appears to be two or more steps away from the |
214 |
> |
* first/last node. The slack value is hard-wired: a path greater |
215 |
> |
* than one is naturally implemented by checking equality of |
216 |
> |
* traversal pointers except when the list has only one element, |
217 |
> |
* in which case we keep slack threshold at one. Avoiding tracking |
218 |
> |
* explicit counts across method calls slightly simplifies an |
219 |
> |
* already-messy implementation. Using randomization would |
220 |
> |
* probably work better if there were a low-quality dirt-cheap |
221 |
> |
* per-thread one available, but even ThreadLocalRandom is too |
222 |
> |
* heavy for these purposes. |
223 |
> |
* |
224 |
> |
* With such a small slack threshold value, it is not worthwhile |
225 |
> |
* to augment this with path short-circuiting (i.e., unsplicing |
226 |
> |
* interior nodes) except in the case of cancellation/removal (see |
227 |
> |
* below). |
228 |
> |
* |
229 |
> |
* We allow both the head and tail fields to be null before any |
230 |
> |
* nodes are enqueued; initializing upon first append. This |
231 |
> |
* simplifies some other logic, as well as providing more |
232 |
> |
* efficient explicit control paths instead of letting JVMs insert |
233 |
> |
* implicit NullPointerExceptions when they are null. While not |
234 |
> |
* currently fully implemented, we also leave open the possibility |
235 |
> |
* of re-nulling these fields when empty (which is complicated to |
236 |
> |
* arrange, for little benefit.) |
237 |
> |
* |
238 |
> |
* All enqueue/dequeue operations are handled by the single method |
239 |
> |
* "xfer" with parameters indicating whether to act as some form |
240 |
> |
* of offer, put, poll, take, or transfer (each possibly with |
241 |
> |
* timeout). The relative complexity of using one monolithic |
242 |
> |
* method outweighs the code bulk and maintenance problems of |
243 |
> |
* using separate methods for each case. |
244 |
> |
* |
245 |
> |
* Operation consists of up to three phases. The first is |
246 |
> |
* implemented within method xfer, the second in tryAppend, and |
247 |
> |
* the third in method awaitMatch. |
248 |
> |
* |
249 |
> |
* 1. Try to match an existing node |
250 |
> |
* |
251 |
> |
* Starting at head, skip already-matched nodes until finding |
252 |
> |
* an unmatched node of opposite mode, if one exists, in which |
253 |
> |
* case matching it and returning, also if necessary updating |
254 |
> |
* head to one past the matched node (or the node itself if the |
255 |
> |
* list has no other unmatched nodes). If the CAS misses, then |
256 |
> |
* a loop retries advancing head by two steps until either |
257 |
> |
* success or the slack is at most two. By requiring that each |
258 |
> |
* attempt advances head by two (if applicable), we ensure that |
259 |
> |
* the slack does not grow without bound. Traversals also check |
260 |
> |
* if the initial head is now off-list, in which case they |
261 |
> |
* start at the new head. |
262 |
> |
* |
263 |
> |
* If no candidates are found and the call was untimed |
264 |
> |
* poll/offer, (argument "how" is NOW) return. |
265 |
> |
* |
266 |
> |
* 2. Try to append a new node (method tryAppend) |
267 |
> |
* |
268 |
> |
* Starting at current tail pointer, find the actual last node |
269 |
> |
* and try to append a new node (or if head was null, establish |
270 |
> |
* the first node). Nodes can be appended only if their |
271 |
> |
* predecessors are either already matched or are of the same |
272 |
> |
* mode. If we detect otherwise, then a new node with opposite |
273 |
> |
* mode must have been appended during traversal, so we must |
274 |
> |
* restart at phase 1. The traversal and update steps are |
275 |
> |
* otherwise similar to phase 1: Retrying upon CAS misses and |
276 |
> |
* checking for staleness. In particular, if a self-link is |
277 |
> |
* encountered, then we can safely jump to a node on the list |
278 |
> |
* by continuing the traversal at current head. |
279 |
> |
* |
280 |
> |
* On successful append, if the call was ASYNC, return. |
281 |
> |
* |
282 |
> |
* 3. Await match or cancellation (method awaitMatch) |
283 |
> |
* |
284 |
> |
* Wait for another thread to match node; instead cancelling if |
285 |
> |
* the current thread was interrupted or the wait timed out. On |
286 |
> |
* multiprocessors, we use front-of-queue spinning: If a node |
287 |
> |
* appears to be the first unmatched node in the queue, it |
288 |
> |
* spins a bit before blocking. In either case, before blocking |
289 |
> |
* it tries to unsplice any nodes between the current "head" |
290 |
> |
* and the first unmatched node. |
291 |
> |
* |
292 |
> |
* Front-of-queue spinning vastly improves performance of |
293 |
> |
* heavily contended queues. And so long as it is relatively |
294 |
> |
* brief and "quiet", spinning does not much impact performance |
295 |
> |
* of less-contended queues. During spins threads check their |
296 |
> |
* interrupt status and generate a thread-local random number |
297 |
> |
* to decide to occasionally perform a Thread.yield. While |
298 |
> |
* yield has underdefined specs, we assume that might it help, |
299 |
> |
* and will not hurt in limiting impact of spinning on busy |
300 |
> |
* systems. We also use smaller (1/2) spins for nodes that are |
301 |
> |
* not known to be front but whose predecessors have not |
302 |
> |
* blocked -- these "chained" spins avoid artifacts of |
303 |
> |
* front-of-queue rules which otherwise lead to alternating |
304 |
> |
* nodes spinning vs blocking. Further, front threads that |
305 |
> |
* represent phase changes (from data to request node or vice |
306 |
> |
* versa) compared to their predecessors receive additional |
307 |
> |
* chained spins, reflecting longer paths typically required to |
308 |
> |
* unblock threads during phase changes. |
309 |
> |
* |
310 |
> |
* |
311 |
> |
* ** Unlinking removed interior nodes ** |
312 |
> |
* |
313 |
> |
* In addition to minimizing garbage retention via self-linking |
314 |
> |
* described above, we also unlink removed interior nodes. These |
315 |
> |
* may arise due to timed out or interrupted waits, or calls to |
316 |
> |
* remove(x) or Iterator.remove. Normally, given a node that was |
317 |
> |
* at one time known to be the predecessor of some node s that is |
318 |
> |
* to be removed, we can unsplice s by CASing the next field of |
319 |
> |
* its predecessor if it still points to s (otherwise s must |
320 |
> |
* already have been removed or is now offlist). But there are two |
321 |
> |
* situations in which we cannot guarantee to make node s |
322 |
> |
* unreachable in this way: (1) If s is the trailing node of list |
323 |
> |
* (i.e., with null next), then it is pinned as the target node |
324 |
> |
* for appends, so can only be removed later when other nodes are |
325 |
> |
* appended. (2) We cannot necessarily unlink s given a |
326 |
> |
* predecessor node that is matched (including the case of being |
327 |
> |
* cancelled): the predecessor may already be unspliced, in which |
328 |
> |
* case some previous reachable node may still point to s. |
329 |
> |
* (For further explanation see Herlihy & Shavit "The Art of |
330 |
> |
* Multiprocessor Programming" chapter 9). Although, in both |
331 |
> |
* cases, we can rule out the need for further action if either s |
332 |
> |
* or its predecessor are (or can be made to be) at, or fall off |
333 |
> |
* from, the head of list. |
334 |
> |
* |
335 |
> |
* Without taking these into account, it would be possible for an |
336 |
> |
* unbounded number of supposedly removed nodes to remain |
337 |
> |
* reachable. Situations leading to such buildup are uncommon but |
338 |
> |
* can occur in practice; for example when a series of short timed |
339 |
> |
* calls to poll repeatedly time out but never otherwise fall off |
340 |
> |
* the list because of an untimed call to take at the front of the |
341 |
> |
* queue. |
342 |
> |
* |
343 |
> |
* When these cases arise, rather than always retraversing the |
344 |
> |
* entire list to find an actual predecessor to unlink (which |
345 |
> |
* won't help for case (1) anyway), we record a conservative |
346 |
> |
* estimate of possible unsplice failures (in "sweepVotes"). We |
347 |
> |
* trigger a full sweep when the estimate exceeds a threshold |
348 |
> |
* indicating the maximum number of estimated removal failures to |
349 |
> |
* tolerate before sweeping through, unlinking cancelled nodes |
350 |
> |
* that were not unlinked upon initial removal. We perform sweeps |
351 |
> |
* by the thread hitting threshold (rather than background threads |
352 |
> |
* or by spreading work to other threads) because in the main |
353 |
> |
* contexts in which removal occurs, the caller is already |
354 |
> |
* timed-out, cancelled, or performing a potentially O(n) |
355 |
> |
* operation (i.e., remove(x)), none of which are time-critical |
356 |
> |
* enough to warrant the overhead that alternatives would impose |
357 |
> |
* on other threads. |
358 |
|
* |
359 |
< |
* The main extension is to provide different Wait modes for the |
360 |
< |
* main "xfer" method that puts or takes items. These don't |
361 |
< |
* impact the basic dual-queue logic, but instead control whether |
362 |
< |
* or how threads block upon insertion of request or data nodes |
363 |
< |
* into the dual queue. It also uses slightly different |
364 |
< |
* conventions for tracking whether nodes are off-list or |
365 |
< |
* cancelled. |
359 |
> |
* Because the sweepVotes estimate is conservative, and because |
360 |
> |
* nodes become unlinked "naturally" as they fall off the head of |
361 |
> |
* the queue, and because we allow votes to accumulate even while |
362 |
> |
* sweeps are in progress, there are typically significantly fewer |
363 |
> |
* such nodes than estimated. Choice of a threshold value |
364 |
> |
* balances the likelihood of wasted effort and contention, versus |
365 |
> |
* providing a worst-case bound on retention of interior nodes in |
366 |
> |
* quiescent queues. The value defined below was chosen |
367 |
> |
* empirically to balance these under various timeout scenarios. |
368 |
> |
* |
369 |
> |
* Note that we cannot self-link unlinked interior nodes during |
370 |
> |
* sweeps. However, the associated garbage chains terminate when |
371 |
> |
* some successor ultimately falls off the head of the list and is |
372 |
> |
* self-linked. |
373 |
|
*/ |
374 |
|
|
375 |
< |
// Wait modes for xfer method |
376 |
< |
static final int NOWAIT = 0; |
377 |
< |
static final int TIMEOUT = 1; |
74 |
< |
static final int WAIT = 2; |
75 |
< |
|
76 |
< |
/** The number of CPUs, for spin control */ |
77 |
< |
static final int NCPUS = Runtime.getRuntime().availableProcessors(); |
375 |
> |
/** True if on multiprocessor */ |
376 |
> |
private static final boolean MP = |
377 |
> |
Runtime.getRuntime().availableProcessors() > 1; |
378 |
|
|
379 |
|
/** |
380 |
< |
* The number of times to spin before blocking in timed waits. |
381 |
< |
* The value is empirically derived -- it works well across a |
382 |
< |
* variety of processors and OSes. Empirically, the best value |
383 |
< |
* seems not to vary with number of CPUs (beyond 2) so is just |
384 |
< |
* a constant. |
380 |
> |
* The number of times to spin (with randomly interspersed calls |
381 |
> |
* to Thread.yield) on multiprocessor before blocking when a node |
382 |
> |
* is apparently the first waiter in the queue. See above for |
383 |
> |
* explanation. Must be a power of two. The value is empirically |
384 |
> |
* derived -- it works pretty well across a variety of processors, |
385 |
> |
* numbers of CPUs, and OSes. |
386 |
|
*/ |
387 |
< |
static final int maxTimedSpins = (NCPUS < 2) ? 0 : 32; |
387 |
> |
private static final int FRONT_SPINS = 1 << 7; |
388 |
|
|
389 |
|
/** |
390 |
< |
* The number of times to spin before blocking in untimed waits. |
391 |
< |
* This is greater than timed value because untimed waits spin |
392 |
< |
* faster since they don't need to check times on each spin. |
390 |
> |
* The number of times to spin before blocking when a node is |
391 |
> |
* preceded by another node that is apparently spinning. Also |
392 |
> |
* serves as an increment to FRONT_SPINS on phase changes, and as |
393 |
> |
* base average frequency for yielding during spins. Must be a |
394 |
> |
* power of two. |
395 |
|
*/ |
396 |
< |
static final int maxUntimedSpins = maxTimedSpins * 16; |
396 |
> |
private static final int CHAINED_SPINS = FRONT_SPINS >>> 1; |
397 |
|
|
398 |
|
/** |
399 |
< |
* The number of nanoseconds for which it is faster to spin |
400 |
< |
* rather than to use timed park. A rough estimate suffices. |
399 |
> |
* The maximum number of estimated removal failures (sweepVotes) |
400 |
> |
* to tolerate before sweeping through the queue unlinking |
401 |
> |
* cancelled nodes that were not unlinked upon initial |
402 |
> |
* removal. See above for explanation. The value must be at least |
403 |
> |
* two to avoid useless sweeps when removing trailing nodes. |
404 |
|
*/ |
405 |
< |
static final long spinForTimeoutThreshold = 1000L; |
405 |
> |
static final int SWEEP_THRESHOLD = 32; |
406 |
|
|
407 |
|
/** |
408 |
< |
* Node class for LinkedTransferQueue. Opportunistically |
409 |
< |
* subclasses from AtomicReference to represent item. Uses Object, |
410 |
< |
* not E, to allow setting item to "this" after use, to avoid |
411 |
< |
* garbage retention. Similarly, setting the next field to this is |
106 |
< |
* used as sentinel that node is off list. |
408 |
> |
* Queue nodes. Uses Object, not E, for items to allow forgetting |
409 |
> |
* them after use. Relies heavily on Unsafe mechanics to minimize |
410 |
> |
* unnecessary ordering constraints: Writes that are intrinsically |
411 |
> |
* ordered wrt other accesses or CASes use simple relaxed forms. |
412 |
|
*/ |
413 |
< |
static final class Node<E> extends AtomicReference<Object> { |
414 |
< |
volatile Node<E> next; |
415 |
< |
volatile Thread waiter; // to control park/unpark |
416 |
< |
final boolean isData; |
413 |
> |
static final class Node { |
414 |
> |
final boolean isData; // false if this is a request node |
415 |
> |
volatile Object item; // initially non-null if isData; CASed to match |
416 |
> |
volatile Node next; |
417 |
> |
volatile Thread waiter; // null until waiting |
418 |
|
|
419 |
< |
Node(E item, boolean isData) { |
420 |
< |
super(item); |
419 |
> |
// CAS methods for fields |
420 |
> |
final boolean casNext(Node cmp, Node val) { |
421 |
> |
return UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val); |
422 |
> |
} |
423 |
> |
|
424 |
> |
final boolean casItem(Object cmp, Object val) { |
425 |
> |
assert cmp == null || cmp.getClass() != Node.class; |
426 |
> |
return UNSAFE.compareAndSwapObject(this, itemOffset, cmp, val); |
427 |
> |
} |
428 |
> |
|
429 |
> |
/** |
430 |
> |
* Creates a new node. Uses relaxed write because item can only |
431 |
> |
* be seen if followed by CAS. |
432 |
> |
*/ |
433 |
> |
Node(Object item, boolean isData) { |
434 |
> |
UNSAFE.putObject(this, itemOffset, item); // relaxed write |
435 |
|
this.isData = isData; |
436 |
|
} |
437 |
|
|
438 |
< |
@SuppressWarnings("rawtypes") |
439 |
< |
static final AtomicReferenceFieldUpdater<Node, Node> |
440 |
< |
nextUpdater = AtomicReferenceFieldUpdater.newUpdater |
441 |
< |
(Node.class, Node.class, "next"); |
438 |
> |
/** |
439 |
> |
* Links node to itself to avoid garbage retention. Called |
440 |
> |
* only after CASing head field, so uses relaxed write. |
441 |
> |
*/ |
442 |
> |
final void forgetNext() { |
443 |
> |
UNSAFE.putObject(this, nextOffset, this); |
444 |
> |
} |
445 |
> |
|
446 |
> |
/** |
447 |
> |
* Sets item to self and waiter to null, to avoid garbage |
448 |
> |
* retention after matching or cancelling. Uses relaxed writes |
449 |
> |
* bacause order is already constrained in the only calling |
450 |
> |
* contexts: item is forgotten only after volatile/atomic |
451 |
> |
* mechanics that extract items. Similarly, clearing waiter |
452 |
> |
* follows either CAS or return from park (if ever parked; |
453 |
> |
* else we don't care). |
454 |
> |
*/ |
455 |
> |
final void forgetContents() { |
456 |
> |
UNSAFE.putObject(this, itemOffset, this); |
457 |
> |
UNSAFE.putObject(this, waiterOffset, null); |
458 |
> |
} |
459 |
> |
|
460 |
> |
/** |
461 |
> |
* Returns true if this node has been matched, including the |
462 |
> |
* case of artificial matches due to cancellation. |
463 |
> |
*/ |
464 |
> |
final boolean isMatched() { |
465 |
> |
Object x = item; |
466 |
> |
return (x == this) || ((x == null) == isData); |
467 |
> |
} |
468 |
> |
|
469 |
> |
/** |
470 |
> |
* Returns true if this is an unmatched request node. |
471 |
> |
*/ |
472 |
> |
final boolean isUnmatchedRequest() { |
473 |
> |
return !isData && item == null; |
474 |
> |
} |
475 |
|
|
476 |
< |
final boolean casNext(Node<E> cmp, Node<E> val) { |
477 |
< |
return nextUpdater.compareAndSet(this, cmp, val); |
476 |
> |
/** |
477 |
> |
* Returns true if a node with the given mode cannot be |
478 |
> |
* appended to this node because this node is unmatched and |
479 |
> |
* has opposite data mode. |
480 |
> |
*/ |
481 |
> |
final boolean cannotPrecede(boolean haveData) { |
482 |
> |
boolean d = isData; |
483 |
> |
Object x; |
484 |
> |
return d != haveData && (x = item) != this && (x != null) == d; |
485 |
|
} |
486 |
|
|
487 |
< |
final void clearNext() { |
488 |
< |
nextUpdater.lazySet(this, this); |
487 |
> |
/** |
488 |
> |
* Tries to artificially match a data node -- used by remove. |
489 |
> |
*/ |
490 |
> |
final boolean tryMatchData() { |
491 |
> |
assert isData; |
492 |
> |
Object x = item; |
493 |
> |
if (x != null && x != this && casItem(x, null)) { |
494 |
> |
LockSupport.unpark(waiter); |
495 |
> |
return true; |
496 |
> |
} |
497 |
> |
return false; |
498 |
|
} |
499 |
|
|
500 |
+ |
// Unsafe mechanics |
501 |
+ |
private static final sun.misc.Unsafe UNSAFE = getUnsafe(); |
502 |
+ |
private static final long nextOffset = |
503 |
+ |
objectFieldOffset(UNSAFE, "next", Node.class); |
504 |
+ |
private static final long itemOffset = |
505 |
+ |
objectFieldOffset(UNSAFE, "item", Node.class); |
506 |
+ |
private static final long waiterOffset = |
507 |
+ |
objectFieldOffset(UNSAFE, "waiter", Node.class); |
508 |
+ |
|
509 |
|
private static final long serialVersionUID = -3375979862319811754L; |
510 |
|
} |
511 |
|
|
512 |
< |
/** |
513 |
< |
* Padded version of AtomicReference used for head, tail and |
136 |
< |
* cleanMe, to alleviate contention across threads CASing one vs |
137 |
< |
* the other. |
138 |
< |
*/ |
139 |
< |
static final class PaddedAtomicReference<T> extends AtomicReference<T> { |
140 |
< |
// enough padding for 64bytes with 4byte refs |
141 |
< |
Object p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pa, pb, pc, pd, pe; |
142 |
< |
PaddedAtomicReference(T r) { super(r); } |
143 |
< |
private static final long serialVersionUID = 8170090609809740854L; |
144 |
< |
} |
512 |
> |
/** head of the queue; null until first enqueue */ |
513 |
> |
transient volatile Node head; |
514 |
|
|
515 |
+ |
/** tail of the queue; null until first append */ |
516 |
+ |
private transient volatile Node tail; |
517 |
|
|
518 |
< |
/** head of the queue */ |
519 |
< |
private transient final PaddedAtomicReference<Node<E>> head; |
518 |
> |
/** The number of apparent failures to unsplice removed nodes */ |
519 |
> |
private transient volatile int sweepVotes; |
520 |
|
|
521 |
< |
/** tail of the queue */ |
522 |
< |
private transient final PaddedAtomicReference<Node<E>> tail; |
521 |
> |
// CAS methods for fields |
522 |
> |
private boolean casTail(Node cmp, Node val) { |
523 |
> |
return UNSAFE.compareAndSwapObject(this, tailOffset, cmp, val); |
524 |
> |
} |
525 |
|
|
526 |
< |
/** |
527 |
< |
* Reference to a cancelled node that might not yet have been |
528 |
< |
* unlinked from queue because it was the last inserted node |
156 |
< |
* when it cancelled. |
157 |
< |
*/ |
158 |
< |
private transient final PaddedAtomicReference<Node<E>> cleanMe; |
526 |
> |
private boolean casHead(Node cmp, Node val) { |
527 |
> |
return UNSAFE.compareAndSwapObject(this, headOffset, cmp, val); |
528 |
> |
} |
529 |
|
|
530 |
< |
/** |
531 |
< |
* Tries to cas nh as new head; if successful, unlink |
532 |
< |
* old head's next node to avoid garbage retention. |
530 |
> |
private boolean casSweepVotes(int cmp, int val) { |
531 |
> |
return UNSAFE.compareAndSwapInt(this, sweepVotesOffset, cmp, val); |
532 |
> |
} |
533 |
> |
|
534 |
> |
/* |
535 |
> |
* Possible values for "how" argument in xfer method. |
536 |
|
*/ |
537 |
< |
private boolean advanceHead(Node<E> h, Node<E> nh) { |
538 |
< |
if (h == head.get() && head.compareAndSet(h, nh)) { |
539 |
< |
h.clearNext(); // forget old next |
540 |
< |
return true; |
541 |
< |
} |
542 |
< |
return false; |
537 |
> |
private static final int NOW = 0; // for untimed poll, tryTransfer |
538 |
> |
private static final int ASYNC = 1; // for offer, put, add |
539 |
> |
private static final int SYNC = 2; // for transfer, take |
540 |
> |
private static final int TIMED = 3; // for timed poll, tryTransfer |
541 |
> |
|
542 |
> |
@SuppressWarnings("unchecked") |
543 |
> |
static <E> E cast(Object item) { |
544 |
> |
assert item == null || item.getClass() != Node.class; |
545 |
> |
return (E) item; |
546 |
|
} |
547 |
|
|
548 |
|
/** |
549 |
< |
* Puts or takes an item. Used for most queue operations (except |
550 |
< |
* poll() and tryTransfer()). See the similar code in |
551 |
< |
* SynchronousQueue for detailed explanation. |
552 |
< |
* |
553 |
< |
* @param e the item or if null, signifies that this is a take |
554 |
< |
* @param mode the wait mode: NOWAIT, TIMEOUT, WAIT |
555 |
< |
* @param nanos timeout in nanosecs, used only if mode is TIMEOUT |
556 |
< |
* @return an item, or null on failure |
557 |
< |
*/ |
558 |
< |
private E xfer(E e, int mode, long nanos) { |
559 |
< |
boolean isData = (e != null); |
560 |
< |
Node<E> s = null; |
561 |
< |
final PaddedAtomicReference<Node<E>> head = this.head; |
186 |
< |
final PaddedAtomicReference<Node<E>> tail = this.tail; |
549 |
> |
* Implements all queuing methods. See above for explanation. |
550 |
> |
* |
551 |
> |
* @param e the item or null for take |
552 |
> |
* @param haveData true if this is a put, else a take |
553 |
> |
* @param how NOW, ASYNC, SYNC, or TIMED |
554 |
> |
* @param nanos timeout in nanosecs, used only if mode is TIMED |
555 |
> |
* @return an item if matched, else e |
556 |
> |
* @throws NullPointerException if haveData mode but e is null |
557 |
> |
*/ |
558 |
> |
private E xfer(E e, boolean haveData, int how, long nanos) { |
559 |
> |
if (haveData && (e == null)) |
560 |
> |
throw new NullPointerException(); |
561 |
> |
Node s = null; // the node to append, if needed |
562 |
|
|
563 |
< |
for (;;) { |
189 |
< |
Node<E> t = tail.get(); |
190 |
< |
Node<E> h = head.get(); |
563 |
> |
retry: for (;;) { // restart on append race |
564 |
|
|
565 |
< |
if (t != null && (t == h || t.isData == isData)) { |
566 |
< |
if (s == null) |
567 |
< |
s = new Node<E>(e, isData); |
568 |
< |
Node<E> last = t.next; |
569 |
< |
if (last != null) { |
570 |
< |
if (t == tail.get()) |
571 |
< |
tail.compareAndSet(t, last); |
572 |
< |
} |
573 |
< |
else if (t.casNext(null, s)) { |
574 |
< |
tail.compareAndSet(t, s); |
575 |
< |
return awaitFulfill(t, s, e, mode, nanos); |
565 |
> |
for (Node h = head, p = h; p != null;) { // find & match first node |
566 |
> |
boolean isData = p.isData; |
567 |
> |
Object item = p.item; |
568 |
> |
if (item != p && (item != null) == isData) { // unmatched |
569 |
> |
if (isData == haveData) // can't match |
570 |
> |
break; |
571 |
> |
if (p.casItem(item, e)) { // match |
572 |
> |
for (Node q = p; q != h;) { |
573 |
> |
Node n = q.next; // update by 2 unless singleton |
574 |
> |
if (head == h && casHead(h, n == null? q : n)) { |
575 |
> |
h.forgetNext(); |
576 |
> |
break; |
577 |
> |
} // advance and retry |
578 |
> |
if ((h = head) == null || |
579 |
> |
(q = h.next) == null || !q.isMatched()) |
580 |
> |
break; // unless slack < 2 |
581 |
> |
} |
582 |
> |
LockSupport.unpark(p.waiter); |
583 |
> |
return this.<E>cast(item); |
584 |
> |
} |
585 |
|
} |
586 |
+ |
Node n = p.next; |
587 |
+ |
p = (p != n) ? n : (h = head); // Use head if p offlist |
588 |
|
} |
589 |
|
|
590 |
< |
else if (h != null) { |
591 |
< |
Node<E> first = h.next; |
592 |
< |
if (t == tail.get() && first != null && |
593 |
< |
advanceHead(h, first)) { |
594 |
< |
Object x = first.get(); |
595 |
< |
if (x != first && first.compareAndSet(x, e)) { |
596 |
< |
LockSupport.unpark(first.waiter); |
597 |
< |
return isData ? e : (E) x; |
214 |
< |
} |
215 |
< |
} |
590 |
> |
if (how != NOW) { // No matches available |
591 |
> |
if (s == null) |
592 |
> |
s = new Node(e, haveData); |
593 |
> |
Node pred = tryAppend(s, haveData); |
594 |
> |
if (pred == null) |
595 |
> |
continue retry; // lost race vs opposite mode |
596 |
> |
if (how != ASYNC) |
597 |
> |
return awaitMatch(s, pred, e, (how == TIMED), nanos); |
598 |
|
} |
599 |
+ |
return e; // not waiting |
600 |
|
} |
601 |
|
} |
602 |
|
|
220 |
– |
|
603 |
|
/** |
604 |
< |
* Version of xfer for poll() and tryTransfer, which |
605 |
< |
* simplifies control paths both here and in xfer. |
606 |
< |
*/ |
607 |
< |
private E fulfill(E e) { |
608 |
< |
boolean isData = (e != null); |
609 |
< |
final PaddedAtomicReference<Node<E>> head = this.head; |
610 |
< |
final PaddedAtomicReference<Node<E>> tail = this.tail; |
611 |
< |
|
612 |
< |
for (;;) { |
613 |
< |
Node<E> t = tail.get(); |
614 |
< |
Node<E> h = head.get(); |
615 |
< |
|
616 |
< |
if (t != null && (t == h || t.isData == isData)) { |
617 |
< |
Node<E> last = t.next; |
618 |
< |
if (t == tail.get()) { |
619 |
< |
if (last != null) |
620 |
< |
tail.compareAndSet(t, last); |
621 |
< |
else |
622 |
< |
return null; |
623 |
< |
} |
624 |
< |
} |
625 |
< |
else if (h != null) { |
626 |
< |
Node<E> first = h.next; |
627 |
< |
if (t == tail.get() && |
628 |
< |
first != null && |
629 |
< |
advanceHead(h, first)) { |
630 |
< |
Object x = first.get(); |
631 |
< |
if (x != first && first.compareAndSet(x, e)) { |
250 |
< |
LockSupport.unpark(first.waiter); |
251 |
< |
return isData ? e : (E) x; |
252 |
< |
} |
604 |
> |
* Tries to append node s as tail. |
605 |
> |
* |
606 |
> |
* @param s the node to append |
607 |
> |
* @param haveData true if appending in data mode |
608 |
> |
* @return null on failure due to losing race with append in |
609 |
> |
* different mode, else s's predecessor, or s itself if no |
610 |
> |
* predecessor |
611 |
> |
*/ |
612 |
> |
private Node tryAppend(Node s, boolean haveData) { |
613 |
> |
for (Node t = tail, p = t;;) { // move p to last node and append |
614 |
> |
Node n, u; // temps for reads of next & tail |
615 |
> |
if (p == null && (p = head) == null) { |
616 |
> |
if (casHead(null, s)) |
617 |
> |
return s; // initialize |
618 |
> |
} |
619 |
> |
else if (p.cannotPrecede(haveData)) |
620 |
> |
return null; // lost race vs opposite mode |
621 |
> |
else if ((n = p.next) != null) // not last; keep traversing |
622 |
> |
p = p != t && t != (u = tail) ? (t = u) : // stale tail |
623 |
> |
(p != n) ? n : null; // restart if off list |
624 |
> |
else if (!p.casNext(null, s)) |
625 |
> |
p = p.next; // re-read on CAS failure |
626 |
> |
else { |
627 |
> |
if (p != t) { // update if slack now >= 2 |
628 |
> |
while ((tail != t || !casTail(t, s)) && |
629 |
> |
(t = tail) != null && |
630 |
> |
(s = t.next) != null && // advance and retry |
631 |
> |
(s = s.next) != null && s != t); |
632 |
|
} |
633 |
+ |
return p; |
634 |
|
} |
635 |
|
} |
636 |
|
} |
637 |
|
|
638 |
|
/** |
639 |
< |
* Spins/blocks until node s is fulfilled or caller gives up, |
260 |
< |
* depending on wait mode. |
639 |
> |
* Spins/yields/blocks until node s is matched or caller gives up. |
640 |
|
* |
262 |
– |
* @param pred the predecessor of waiting node |
641 |
|
* @param s the waiting node |
642 |
+ |
* @param pred the predecessor of s, or s itself if it has no |
643 |
+ |
* predecessor, or null if unknown (the null case does not occur |
644 |
+ |
* in any current calls but may in possible future extensions) |
645 |
|
* @param e the comparison value for checking match |
646 |
< |
* @param mode mode |
647 |
< |
* @param nanos timeout value |
648 |
< |
* @return matched item, or s if cancelled |
649 |
< |
*/ |
650 |
< |
private E awaitFulfill(Node<E> pred, Node<E> s, E e, |
651 |
< |
int mode, long nanos) { |
271 |
< |
if (mode == NOWAIT) |
272 |
< |
return null; |
273 |
< |
|
274 |
< |
long lastTime = (mode == TIMEOUT) ? System.nanoTime() : 0; |
646 |
> |
* @param timed if true, wait only until timeout elapses |
647 |
> |
* @param nanos timeout in nanosecs, used only if timed is true |
648 |
> |
* @return matched item, or e if unmatched on interrupt or timeout |
649 |
> |
*/ |
650 |
> |
private E awaitMatch(Node s, Node pred, E e, boolean timed, long nanos) { |
651 |
> |
long lastTime = timed ? System.nanoTime() : 0L; |
652 |
|
Thread w = Thread.currentThread(); |
653 |
< |
int spins = -1; // set to desired spin count below |
653 |
> |
int spins = -1; // initialized after first item and cancel checks |
654 |
> |
ThreadLocalRandom randomYields = null; // bound if needed |
655 |
> |
|
656 |
|
for (;;) { |
657 |
< |
if (w.isInterrupted()) |
658 |
< |
s.compareAndSet(e, s); |
659 |
< |
Object x = s.get(); |
660 |
< |
if (x != e) { // Node was matched or cancelled |
661 |
< |
advanceHead(pred, s); // unlink if head |
662 |
< |
if (x == s) { // was cancelled |
663 |
< |
clean(pred, s); |
664 |
< |
return null; |
665 |
< |
} |
666 |
< |
else if (x != null) { |
667 |
< |
s.set(s); // avoid garbage retention |
668 |
< |
return (E) x; |
669 |
< |
} |
670 |
< |
else |
671 |
< |
return e; |
657 |
> |
Object item = s.item; |
658 |
> |
if (item != e) { // matched |
659 |
> |
assert item != s; |
660 |
> |
s.forgetContents(); // avoid garbage |
661 |
> |
return this.<E>cast(item); |
662 |
> |
} |
663 |
> |
if ((w.isInterrupted() || (timed && nanos <= 0)) && |
664 |
> |
s.casItem(e, s)) { // cancel |
665 |
> |
unsplice(pred, s); |
666 |
> |
return e; |
667 |
> |
} |
668 |
> |
|
669 |
> |
if (spins < 0) { // establish spins at/near front |
670 |
> |
if ((spins = spinsFor(pred, s.isData)) > 0) |
671 |
> |
randomYields = ThreadLocalRandom.current(); |
672 |
> |
} |
673 |
> |
else if (spins > 0) { // spin |
674 |
> |
--spins; |
675 |
> |
if (randomYields.nextInt(CHAINED_SPINS) == 0) |
676 |
> |
Thread.yield(); // occasionally yield |
677 |
|
} |
678 |
< |
if (mode == TIMEOUT) { |
678 |
> |
else if (s.waiter == null) { |
679 |
> |
s.waiter = w; // request unpark then recheck |
680 |
> |
} |
681 |
> |
else if (timed) { |
682 |
|
long now = System.nanoTime(); |
683 |
< |
nanos -= now - lastTime; |
683 |
> |
if ((nanos -= now - lastTime) > 0) |
684 |
> |
LockSupport.parkNanos(this, nanos); |
685 |
|
lastTime = now; |
298 |
– |
if (nanos <= 0) { |
299 |
– |
s.compareAndSet(e, s); // try to cancel |
300 |
– |
continue; |
301 |
– |
} |
686 |
|
} |
687 |
< |
if (spins < 0) { |
304 |
< |
Node<E> h = head.get(); // only spin if at head |
305 |
< |
spins = ((h != null && h.next == s) ? |
306 |
< |
((mode == TIMEOUT) ? |
307 |
< |
maxTimedSpins : maxUntimedSpins) : 0); |
308 |
< |
} |
309 |
< |
if (spins > 0) |
310 |
< |
--spins; |
311 |
< |
else if (s.waiter == null) |
312 |
< |
s.waiter = w; |
313 |
< |
else if (mode != TIMEOUT) { |
687 |
> |
else { |
688 |
|
LockSupport.park(this); |
315 |
– |
s.waiter = null; |
316 |
– |
spins = -1; |
317 |
– |
} |
318 |
– |
else if (nanos > spinForTimeoutThreshold) { |
319 |
– |
LockSupport.parkNanos(this, nanos); |
320 |
– |
s.waiter = null; |
321 |
– |
spins = -1; |
689 |
|
} |
690 |
|
} |
691 |
|
} |
692 |
|
|
693 |
|
/** |
694 |
< |
* Returns validated tail for use in cleaning methods. |
694 |
> |
* Returns spin/yield value for a node with given predecessor and |
695 |
> |
* data mode. See above for explanation. |
696 |
|
*/ |
697 |
< |
private Node<E> getValidatedTail() { |
698 |
< |
for (;;) { |
699 |
< |
Node<E> h = head.get(); |
700 |
< |
Node<E> first = h.next; |
701 |
< |
if (first != null && first.next == first) { // help advance |
702 |
< |
advanceHead(h, first); |
703 |
< |
continue; |
704 |
< |
} |
705 |
< |
Node<E> t = tail.get(); |
706 |
< |
Node<E> last = t.next; |
707 |
< |
if (t == tail.get()) { |
708 |
< |
if (last != null) |
709 |
< |
tail.compareAndSet(t, last); // help advance |
710 |
< |
else |
711 |
< |
return t; |
697 |
> |
private static int spinsFor(Node pred, boolean haveData) { |
698 |
> |
if (MP && pred != null) { |
699 |
> |
if (pred.isData != haveData) // phase change |
700 |
> |
return FRONT_SPINS + CHAINED_SPINS; |
701 |
> |
if (pred.isMatched()) // probably at front |
702 |
> |
return FRONT_SPINS; |
703 |
> |
if (pred.waiter == null) // pred apparently spinning |
704 |
> |
return CHAINED_SPINS; |
705 |
> |
} |
706 |
> |
return 0; |
707 |
> |
} |
708 |
> |
|
709 |
> |
/* -------------- Traversal methods -------------- */ |
710 |
> |
|
711 |
> |
/** |
712 |
> |
* Returns the successor of p, or the head node if p.next has been |
713 |
> |
* linked to self, which will only be true if traversing with a |
714 |
> |
* stale pointer that is now off the list. |
715 |
> |
*/ |
716 |
> |
final Node succ(Node p) { |
717 |
> |
Node next = p.next; |
718 |
> |
return (p == next) ? head : next; |
719 |
> |
} |
720 |
> |
|
721 |
> |
/** |
722 |
> |
* Returns the first unmatched node of the given mode, or null if |
723 |
> |
* none. Used by methods isEmpty, hasWaitingConsumer. |
724 |
> |
*/ |
725 |
> |
private Node firstOfMode(boolean isData) { |
726 |
> |
for (Node p = head; p != null; p = succ(p)) { |
727 |
> |
if (!p.isMatched()) |
728 |
> |
return (p.isData == isData) ? p : null; |
729 |
> |
} |
730 |
> |
return null; |
731 |
> |
} |
732 |
> |
|
733 |
> |
/** |
734 |
> |
* Returns the item in the first unmatched node with isData; or |
735 |
> |
* null if none. Used by peek. |
736 |
> |
*/ |
737 |
> |
private E firstDataItem() { |
738 |
> |
for (Node p = head; p != null; p = succ(p)) { |
739 |
> |
Object item = p.item; |
740 |
> |
if (p.isData) { |
741 |
> |
if (item != null && item != p) |
742 |
> |
return this.<E>cast(item); |
743 |
|
} |
744 |
+ |
else if (item == null) |
745 |
+ |
return null; |
746 |
|
} |
747 |
+ |
return null; |
748 |
|
} |
749 |
|
|
750 |
|
/** |
751 |
< |
* Gets rid of cancelled node s with original predecessor pred. |
752 |
< |
* |
351 |
< |
* @param pred predecessor of cancelled node |
352 |
< |
* @param s the cancelled node |
751 |
> |
* Traverses and counts unmatched nodes of the given mode. |
752 |
> |
* Used by methods size and getWaitingConsumerCount. |
753 |
|
*/ |
754 |
< |
private void clean(Node<E> pred, Node<E> s) { |
755 |
< |
Thread w = s.waiter; |
756 |
< |
if (w != null) { // Wake up thread |
757 |
< |
s.waiter = null; |
758 |
< |
if (w != Thread.currentThread()) |
759 |
< |
LockSupport.unpark(w); |
754 |
> |
private int countOfMode(boolean data) { |
755 |
> |
int count = 0; |
756 |
> |
for (Node p = head; p != null; ) { |
757 |
> |
if (!p.isMatched()) { |
758 |
> |
if (p.isData != data) |
759 |
> |
return 0; |
760 |
> |
if (++count == Integer.MAX_VALUE) // saturated |
761 |
> |
break; |
762 |
> |
} |
763 |
> |
Node n = p.next; |
764 |
> |
if (n != p) |
765 |
> |
p = n; |
766 |
> |
else { |
767 |
> |
count = 0; |
768 |
> |
p = head; |
769 |
> |
} |
770 |
|
} |
771 |
+ |
return count; |
772 |
+ |
} |
773 |
|
|
774 |
< |
if (pred == null) |
775 |
< |
return; |
774 |
> |
final class Itr implements Iterator<E> { |
775 |
> |
private Node nextNode; // next node to return item for |
776 |
> |
private E nextItem; // the corresponding item |
777 |
> |
private Node lastRet; // last returned node, to support remove |
778 |
> |
private Node lastPred; // predecessor to unlink lastRet |
779 |
|
|
780 |
< |
/* |
781 |
< |
* At any given time, exactly one node on list cannot be |
367 |
< |
* deleted -- the last inserted node. To accommodate this, if |
368 |
< |
* we cannot delete s, we save its predecessor as "cleanMe", |
369 |
< |
* processing the previously saved version first. At least one |
370 |
< |
* of node s or the node previously saved can always be |
371 |
< |
* processed, so this always terminates. |
780 |
> |
/** |
781 |
> |
* Moves to next node after prev, or first node if prev null. |
782 |
|
*/ |
783 |
< |
while (pred.next == s) { |
784 |
< |
Node<E> oldpred = reclean(); // First, help get rid of cleanMe |
785 |
< |
Node<E> t = getValidatedTail(); |
786 |
< |
if (s != t) { // If not tail, try to unsplice |
787 |
< |
Node<E> sn = s.next; // s.next == s means s already off list |
788 |
< |
if (sn == s || pred.casNext(s, sn)) |
783 |
> |
private void advance(Node prev) { |
784 |
> |
lastPred = lastRet; |
785 |
> |
lastRet = prev; |
786 |
> |
for (Node p = (prev == null) ? head : succ(prev); |
787 |
> |
p != null; p = succ(p)) { |
788 |
> |
Object item = p.item; |
789 |
> |
if (p.isData) { |
790 |
> |
if (item != null && item != p) { |
791 |
> |
nextItem = LinkedTransferQueue.this.<E>cast(item); |
792 |
> |
nextNode = p; |
793 |
> |
return; |
794 |
> |
} |
795 |
> |
} |
796 |
> |
else if (item == null) |
797 |
|
break; |
798 |
|
} |
799 |
< |
else if (oldpred == pred || // Already saved |
800 |
< |
(oldpred == null && cleanMe.compareAndSet(null, pred))) |
801 |
< |
break; // Postpone cleaning |
799 |
> |
nextNode = null; |
800 |
> |
} |
801 |
> |
|
802 |
> |
Itr() { |
803 |
> |
advance(null); |
804 |
> |
} |
805 |
> |
|
806 |
> |
public final boolean hasNext() { |
807 |
> |
return nextNode != null; |
808 |
> |
} |
809 |
> |
|
810 |
> |
public final E next() { |
811 |
> |
Node p = nextNode; |
812 |
> |
if (p == null) throw new NoSuchElementException(); |
813 |
> |
E e = nextItem; |
814 |
> |
advance(p); |
815 |
> |
return e; |
816 |
> |
} |
817 |
> |
|
818 |
> |
public final void remove() { |
819 |
> |
Node p = lastRet; |
820 |
> |
if (p == null) throw new IllegalStateException(); |
821 |
> |
if (p.tryMatchData()) |
822 |
> |
unsplice(lastPred, p); |
823 |
|
} |
824 |
|
} |
825 |
|
|
826 |
+ |
/* -------------- Removal methods -------------- */ |
827 |
+ |
|
828 |
|
/** |
829 |
< |
* Tries to unsplice the cancelled node held in cleanMe that was |
830 |
< |
* previously uncleanable because it was at tail. |
829 |
> |
* Unsplices (now or later) the given deleted/cancelled node with |
830 |
> |
* the given predecessor. |
831 |
|
* |
832 |
< |
* @return current cleanMe node (or null) |
832 |
> |
* @param pred a node that was at one time known to be the |
833 |
> |
* predecessor of s, or null or s itself if s is/was at head |
834 |
> |
* @param s the node to be unspliced |
835 |
|
*/ |
836 |
< |
private Node<E> reclean() { |
836 |
> |
final void unsplice(Node pred, Node s) { |
837 |
> |
s.forgetContents(); // forget unneeded fields |
838 |
|
/* |
839 |
< |
* cleanMe is, or at one time was, predecessor of cancelled |
840 |
< |
* node s that was the tail so could not be unspliced. If s |
841 |
< |
* is no longer the tail, try to unsplice if necessary and |
842 |
< |
* make cleanMe slot available. This differs from similar |
843 |
< |
* code in clean() because we must check that pred still |
400 |
< |
* points to a cancelled node that must be unspliced -- if |
401 |
< |
* not, we can (must) clear cleanMe without unsplicing. |
402 |
< |
* This can loop only due to contention on casNext or |
403 |
< |
* clearing cleanMe. |
839 |
> |
* See above for rationale. Briefly: if pred still points to |
840 |
> |
* s, try to unlink s. If s cannot be unlinked, because it is |
841 |
> |
* trailing node or pred might be unlinked, and neither pred |
842 |
> |
* nor s are head or offlist, add to sweepVotes, and if enough |
843 |
> |
* votes have accumulated, sweep. |
844 |
|
*/ |
845 |
< |
Node<E> pred; |
846 |
< |
while ((pred = cleanMe.get()) != null) { |
847 |
< |
Node<E> t = getValidatedTail(); |
848 |
< |
Node<E> s = pred.next; |
849 |
< |
if (s != t) { |
850 |
< |
Node<E> sn; |
851 |
< |
if (s == null || s == pred || s.get() != s || |
852 |
< |
(sn = s.next) == s || pred.casNext(s, sn)) |
853 |
< |
cleanMe.compareAndSet(pred, null); |
845 |
> |
if (pred != null && pred != s && pred.next == s) { |
846 |
> |
Node n = s.next; |
847 |
> |
if (n == null || |
848 |
> |
(n != s && pred.casNext(s, n) && pred.isMatched())) { |
849 |
> |
for (;;) { // check if at, or could be, head |
850 |
> |
Node h = head; |
851 |
> |
if (h == pred || h == s || h == null) |
852 |
> |
return; // at head or list empty |
853 |
> |
if (!h.isMatched()) |
854 |
> |
break; |
855 |
> |
Node hn = h.next; |
856 |
> |
if (hn == null) |
857 |
> |
return; // now empty |
858 |
> |
if (hn != h && casHead(h, hn)) |
859 |
> |
h.forgetNext(); // advance head |
860 |
> |
} |
861 |
> |
if (pred.next != pred && s.next != s) { // recheck if offlist |
862 |
> |
for (;;) { // sweep now if enough votes |
863 |
> |
int v = sweepVotes; |
864 |
> |
if (v < SWEEP_THRESHOLD) { |
865 |
> |
if (casSweepVotes(v, v + 1)) |
866 |
> |
break; |
867 |
> |
} |
868 |
> |
else if (casSweepVotes(v, 0)) { |
869 |
> |
sweep(); |
870 |
> |
break; |
871 |
> |
} |
872 |
> |
} |
873 |
> |
} |
874 |
|
} |
415 |
– |
else // s is still tail; cannot clean |
416 |
– |
break; |
875 |
|
} |
418 |
– |
return pred; |
876 |
|
} |
877 |
|
|
878 |
|
/** |
879 |
+ |
* Unlinks matched nodes encountered in a traversal from head. |
880 |
+ |
*/ |
881 |
+ |
private void sweep() { |
882 |
+ |
Node p = head, s, n; |
883 |
+ |
while (p != null && (s = p.next) != null && (n = s.next) != null) { |
884 |
+ |
if (p == s || s == n) |
885 |
+ |
p = head; // stale |
886 |
+ |
else if (s.isMatched()) |
887 |
+ |
p.casNext(s, n); |
888 |
+ |
else |
889 |
+ |
p = s; |
890 |
+ |
} |
891 |
+ |
} |
892 |
+ |
|
893 |
+ |
/** |
894 |
+ |
* Main implementation of remove(Object) |
895 |
+ |
*/ |
896 |
+ |
private boolean findAndRemove(Object e) { |
897 |
+ |
if (e != null) { |
898 |
+ |
for (Node pred = null, p = head; p != null; ) { |
899 |
+ |
Object item = p.item; |
900 |
+ |
if (p.isData) { |
901 |
+ |
if (item != null && item != p && e.equals(item) && |
902 |
+ |
p.tryMatchData()) { |
903 |
+ |
unsplice(pred, p); |
904 |
+ |
return true; |
905 |
+ |
} |
906 |
+ |
} |
907 |
+ |
else if (item == null) |
908 |
+ |
break; |
909 |
+ |
pred = p; |
910 |
+ |
if ((p = p.next) == pred) { // stale |
911 |
+ |
pred = null; |
912 |
+ |
p = head; |
913 |
+ |
} |
914 |
+ |
} |
915 |
+ |
} |
916 |
+ |
return false; |
917 |
+ |
} |
918 |
+ |
|
919 |
+ |
|
920 |
+ |
/** |
921 |
|
* Creates an initially empty {@code LinkedTransferQueue}. |
922 |
|
*/ |
923 |
|
public LinkedTransferQueue() { |
425 |
– |
Node<E> dummy = new Node<E>(null, false); |
426 |
– |
head = new PaddedAtomicReference<Node<E>>(dummy); |
427 |
– |
tail = new PaddedAtomicReference<Node<E>>(dummy); |
428 |
– |
cleanMe = new PaddedAtomicReference<Node<E>>(null); |
924 |
|
} |
925 |
|
|
926 |
|
/** |
938 |
|
} |
939 |
|
|
940 |
|
/** |
941 |
< |
* @throws InterruptedException {@inheritDoc} |
942 |
< |
* @throws NullPointerException {@inheritDoc} |
941 |
> |
* Inserts the specified element at the tail of this queue. |
942 |
> |
* As the queue is unbounded, this method will never block. |
943 |
> |
* |
944 |
> |
* @throws NullPointerException if the specified element is null |
945 |
|
*/ |
946 |
< |
public void put(E e) throws InterruptedException { |
947 |
< |
if (e == null) throw new NullPointerException(); |
451 |
< |
if (Thread.interrupted()) throw new InterruptedException(); |
452 |
< |
xfer(e, NOWAIT, 0); |
946 |
> |
public void put(E e) { |
947 |
> |
xfer(e, true, ASYNC, 0); |
948 |
|
} |
949 |
|
|
950 |
|
/** |
951 |
< |
* @throws InterruptedException {@inheritDoc} |
952 |
< |
* @throws NullPointerException {@inheritDoc} |
951 |
> |
* Inserts the specified element at the tail of this queue. |
952 |
> |
* As the queue is unbounded, this method will never block or |
953 |
> |
* return {@code false}. |
954 |
> |
* |
955 |
> |
* @return {@code true} (as specified by |
956 |
> |
* {@link BlockingQueue#offer(Object,long,TimeUnit) BlockingQueue.offer}) |
957 |
> |
* @throws NullPointerException if the specified element is null |
958 |
|
*/ |
959 |
< |
public boolean offer(E e, long timeout, TimeUnit unit) |
960 |
< |
throws InterruptedException { |
461 |
< |
if (e == null) throw new NullPointerException(); |
462 |
< |
if (Thread.interrupted()) throw new InterruptedException(); |
463 |
< |
xfer(e, NOWAIT, 0); |
959 |
> |
public boolean offer(E e, long timeout, TimeUnit unit) { |
960 |
> |
xfer(e, true, ASYNC, 0); |
961 |
|
return true; |
962 |
|
} |
963 |
|
|
964 |
|
/** |
965 |
< |
* @throws NullPointerException {@inheritDoc} |
965 |
> |
* Inserts the specified element at the tail of this queue. |
966 |
> |
* As the queue is unbounded, this method will never return {@code false}. |
967 |
> |
* |
968 |
> |
* @return {@code true} (as specified by |
969 |
> |
* {@link BlockingQueue#offer(Object) BlockingQueue.offer}) |
970 |
> |
* @throws NullPointerException if the specified element is null |
971 |
|
*/ |
972 |
|
public boolean offer(E e) { |
973 |
< |
if (e == null) throw new NullPointerException(); |
472 |
< |
xfer(e, NOWAIT, 0); |
973 |
> |
xfer(e, true, ASYNC, 0); |
974 |
|
return true; |
975 |
|
} |
976 |
|
|
977 |
|
/** |
978 |
< |
* @throws NullPointerException {@inheritDoc} |
978 |
> |
* Inserts the specified element at the tail of this queue. |
979 |
> |
* As the queue is unbounded, this method will never throw |
980 |
> |
* {@link IllegalStateException} or return {@code false}. |
981 |
> |
* |
982 |
> |
* @return {@code true} (as specified by {@link Collection#add}) |
983 |
> |
* @throws NullPointerException if the specified element is null |
984 |
|
*/ |
985 |
|
public boolean add(E e) { |
986 |
< |
if (e == null) throw new NullPointerException(); |
481 |
< |
xfer(e, NOWAIT, 0); |
986 |
> |
xfer(e, true, ASYNC, 0); |
987 |
|
return true; |
988 |
|
} |
989 |
|
|
990 |
|
/** |
991 |
< |
* @throws InterruptedException {@inheritDoc} |
992 |
< |
* @throws NullPointerException {@inheritDoc} |
991 |
> |
* Transfers the element to a waiting consumer immediately, if possible. |
992 |
> |
* |
993 |
> |
* <p>More precisely, transfers the specified element immediately |
994 |
> |
* if there exists a consumer already waiting to receive it (in |
995 |
> |
* {@link #take} or timed {@link #poll(long,TimeUnit) poll}), |
996 |
> |
* otherwise returning {@code false} without enqueuing the element. |
997 |
> |
* |
998 |
> |
* @throws NullPointerException if the specified element is null |
999 |
> |
*/ |
1000 |
> |
public boolean tryTransfer(E e) { |
1001 |
> |
return xfer(e, true, NOW, 0) == null; |
1002 |
> |
} |
1003 |
> |
|
1004 |
> |
/** |
1005 |
> |
* Transfers the element to a consumer, waiting if necessary to do so. |
1006 |
> |
* |
1007 |
> |
* <p>More precisely, transfers the specified element immediately |
1008 |
> |
* if there exists a consumer already waiting to receive it (in |
1009 |
> |
* {@link #take} or timed {@link #poll(long,TimeUnit) poll}), |
1010 |
> |
* else inserts the specified element at the tail of this queue |
1011 |
> |
* and waits until the element is received by a consumer. |
1012 |
> |
* |
1013 |
> |
* @throws NullPointerException if the specified element is null |
1014 |
|
*/ |
1015 |
|
public void transfer(E e) throws InterruptedException { |
1016 |
< |
if (e == null) throw new NullPointerException(); |
1017 |
< |
if (xfer(e, WAIT, 0) == null) { |
492 |
< |
Thread.interrupted(); |
1016 |
> |
if (xfer(e, true, SYNC, 0) != null) { |
1017 |
> |
Thread.interrupted(); // failure possible only due to interrupt |
1018 |
|
throw new InterruptedException(); |
1019 |
|
} |
1020 |
|
} |
1021 |
|
|
1022 |
|
/** |
1023 |
< |
* @throws InterruptedException {@inheritDoc} |
1024 |
< |
* @throws NullPointerException {@inheritDoc} |
1023 |
> |
* Transfers the element to a consumer if it is possible to do so |
1024 |
> |
* before the timeout elapses. |
1025 |
> |
* |
1026 |
> |
* <p>More precisely, transfers the specified element immediately |
1027 |
> |
* if there exists a consumer already waiting to receive it (in |
1028 |
> |
* {@link #take} or timed {@link #poll(long,TimeUnit) poll}), |
1029 |
> |
* else inserts the specified element at the tail of this queue |
1030 |
> |
* and waits until the element is received by a consumer, |
1031 |
> |
* returning {@code false} if the specified wait time elapses |
1032 |
> |
* before the element can be transferred. |
1033 |
> |
* |
1034 |
> |
* @throws NullPointerException if the specified element is null |
1035 |
|
*/ |
1036 |
|
public boolean tryTransfer(E e, long timeout, TimeUnit unit) |
1037 |
|
throws InterruptedException { |
1038 |
< |
if (e == null) throw new NullPointerException(); |
504 |
< |
if (xfer(e, TIMEOUT, unit.toNanos(timeout)) != null) |
1038 |
> |
if (xfer(e, true, TIMED, unit.toNanos(timeout)) == null) |
1039 |
|
return true; |
1040 |
|
if (!Thread.interrupted()) |
1041 |
|
return false; |
1042 |
|
throw new InterruptedException(); |
1043 |
|
} |
1044 |
|
|
511 |
– |
/** |
512 |
– |
* @throws NullPointerException {@inheritDoc} |
513 |
– |
*/ |
514 |
– |
public boolean tryTransfer(E e) { |
515 |
– |
if (e == null) throw new NullPointerException(); |
516 |
– |
return fulfill(e) != null; |
517 |
– |
} |
518 |
– |
|
519 |
– |
/** |
520 |
– |
* @throws InterruptedException {@inheritDoc} |
521 |
– |
*/ |
1045 |
|
public E take() throws InterruptedException { |
1046 |
< |
Object e = xfer(null, WAIT, 0); |
1046 |
> |
E e = xfer(null, false, SYNC, 0); |
1047 |
|
if (e != null) |
1048 |
< |
return (E) e; |
1048 |
> |
return e; |
1049 |
|
Thread.interrupted(); |
1050 |
|
throw new InterruptedException(); |
1051 |
|
} |
1052 |
|
|
530 |
– |
/** |
531 |
– |
* @throws InterruptedException {@inheritDoc} |
532 |
– |
*/ |
1053 |
|
public E poll(long timeout, TimeUnit unit) throws InterruptedException { |
1054 |
< |
Object e = xfer(null, TIMEOUT, unit.toNanos(timeout)); |
1054 |
> |
E e = xfer(null, false, TIMED, unit.toNanos(timeout)); |
1055 |
|
if (e != null || !Thread.interrupted()) |
1056 |
< |
return (E) e; |
1056 |
> |
return e; |
1057 |
|
throw new InterruptedException(); |
1058 |
|
} |
1059 |
|
|
1060 |
|
public E poll() { |
1061 |
< |
return fulfill(null); |
1061 |
> |
return xfer(null, false, NOW, 0); |
1062 |
|
} |
1063 |
|
|
1064 |
|
/** |
1065 |
< |
* @throws NullPointerException {@inheritDoc} |
1066 |
< |
* @throwsIllegalArgumentException {@inheritDoc} |
1065 |
> |
* @throws NullPointerException {@inheritDoc} |
1066 |
> |
* @throws IllegalArgumentException {@inheritDoc} |
1067 |
|
*/ |
1068 |
|
public int drainTo(Collection<? super E> c) { |
1069 |
|
if (c == null) |
1080 |
|
} |
1081 |
|
|
1082 |
|
/** |
1083 |
< |
* @throws NullPointerException {@inheritDoc} |
1084 |
< |
* @throwsIllegalArgumentException {@inheritDoc} |
1083 |
> |
* @throws NullPointerException {@inheritDoc} |
1084 |
> |
* @throws IllegalArgumentException {@inheritDoc} |
1085 |
|
*/ |
1086 |
|
public int drainTo(Collection<? super E> c, int maxElements) { |
1087 |
|
if (c == null) |
1097 |
|
return n; |
1098 |
|
} |
1099 |
|
|
580 |
– |
// Traversal-based methods |
581 |
– |
|
1100 |
|
/** |
1101 |
< |
* Returns head after performing any outstanding helping steps. |
1101 |
> |
* Returns an iterator over the elements in this queue in proper |
1102 |
> |
* sequence, from head to tail. |
1103 |
> |
* |
1104 |
> |
* <p>The returned iterator is a "weakly consistent" iterator that |
1105 |
> |
* will never throw |
1106 |
> |
* {@link ConcurrentModificationException ConcurrentModificationException}, |
1107 |
> |
* and guarantees to traverse elements as they existed upon |
1108 |
> |
* construction of the iterator, and may (but is not guaranteed |
1109 |
> |
* to) reflect any modifications subsequent to construction. |
1110 |
> |
* |
1111 |
> |
* @return an iterator over the elements in this queue in proper sequence |
1112 |
|
*/ |
585 |
– |
private Node<E> traversalHead() { |
586 |
– |
for (;;) { |
587 |
– |
Node<E> t = tail.get(); |
588 |
– |
Node<E> h = head.get(); |
589 |
– |
if (h != null && t != null) { |
590 |
– |
Node<E> last = t.next; |
591 |
– |
Node<E> first = h.next; |
592 |
– |
if (t == tail.get()) { |
593 |
– |
if (last != null) |
594 |
– |
tail.compareAndSet(t, last); |
595 |
– |
else if (first != null) { |
596 |
– |
Object x = first.get(); |
597 |
– |
if (x == first) |
598 |
– |
advanceHead(h, first); |
599 |
– |
else |
600 |
– |
return h; |
601 |
– |
} |
602 |
– |
else |
603 |
– |
return h; |
604 |
– |
} |
605 |
– |
} |
606 |
– |
reclean(); |
607 |
– |
} |
608 |
– |
} |
609 |
– |
|
610 |
– |
|
1113 |
|
public Iterator<E> iterator() { |
1114 |
|
return new Itr(); |
1115 |
|
} |
1116 |
|
|
615 |
– |
/** |
616 |
– |
* Iterators. Basic strategy is to traverse list, treating |
617 |
– |
* non-data (i.e., request) nodes as terminating list. |
618 |
– |
* Once a valid data node is found, the item is cached |
619 |
– |
* so that the next call to next() will return it even |
620 |
– |
* if subsequently removed. |
621 |
– |
*/ |
622 |
– |
class Itr implements Iterator<E> { |
623 |
– |
Node<E> next; // node to return next |
624 |
– |
Node<E> pnext; // predecessor of next |
625 |
– |
Node<E> snext; // successor of next |
626 |
– |
Node<E> curr; // last returned node, for remove() |
627 |
– |
Node<E> pcurr; // predecessor of curr, for remove() |
628 |
– |
E nextItem; // Cache of next item, once committed to in next |
629 |
– |
|
630 |
– |
Itr() { |
631 |
– |
findNext(); |
632 |
– |
} |
633 |
– |
|
634 |
– |
/** |
635 |
– |
* Ensures next points to next valid node, or null if none. |
636 |
– |
*/ |
637 |
– |
void findNext() { |
638 |
– |
for (;;) { |
639 |
– |
Node<E> pred = pnext; |
640 |
– |
Node<E> q = next; |
641 |
– |
if (pred == null || pred == q) { |
642 |
– |
pred = traversalHead(); |
643 |
– |
q = pred.next; |
644 |
– |
} |
645 |
– |
if (q == null || !q.isData) { |
646 |
– |
next = null; |
647 |
– |
return; |
648 |
– |
} |
649 |
– |
Object x = q.get(); |
650 |
– |
Node<E> s = q.next; |
651 |
– |
if (x != null && q != x && q != s) { |
652 |
– |
nextItem = (E) x; |
653 |
– |
snext = s; |
654 |
– |
pnext = pred; |
655 |
– |
next = q; |
656 |
– |
return; |
657 |
– |
} |
658 |
– |
pnext = q; |
659 |
– |
next = s; |
660 |
– |
} |
661 |
– |
} |
662 |
– |
|
663 |
– |
public boolean hasNext() { |
664 |
– |
return next != null; |
665 |
– |
} |
666 |
– |
|
667 |
– |
public E next() { |
668 |
– |
if (next == null) throw new NoSuchElementException(); |
669 |
– |
pcurr = pnext; |
670 |
– |
curr = next; |
671 |
– |
pnext = next; |
672 |
– |
next = snext; |
673 |
– |
E x = nextItem; |
674 |
– |
findNext(); |
675 |
– |
return x; |
676 |
– |
} |
677 |
– |
|
678 |
– |
public void remove() { |
679 |
– |
Node<E> p = curr; |
680 |
– |
if (p == null) |
681 |
– |
throw new IllegalStateException(); |
682 |
– |
Object x = p.get(); |
683 |
– |
if (x != null && x != p && p.compareAndSet(x, p)) |
684 |
– |
clean(pcurr, p); |
685 |
– |
} |
686 |
– |
} |
687 |
– |
|
1117 |
|
public E peek() { |
1118 |
< |
for (;;) { |
690 |
< |
Node<E> h = traversalHead(); |
691 |
< |
Node<E> p = h.next; |
692 |
< |
if (p == null) |
693 |
< |
return null; |
694 |
< |
Object x = p.get(); |
695 |
< |
if (p != x) { |
696 |
< |
if (!p.isData) |
697 |
< |
return null; |
698 |
< |
if (x != null) |
699 |
< |
return (E) x; |
700 |
< |
} |
701 |
< |
} |
1118 |
> |
return firstDataItem(); |
1119 |
|
} |
1120 |
|
|
1121 |
+ |
/** |
1122 |
+ |
* Returns {@code true} if this queue contains no elements. |
1123 |
+ |
* |
1124 |
+ |
* @return {@code true} if this queue contains no elements |
1125 |
+ |
*/ |
1126 |
|
public boolean isEmpty() { |
1127 |
< |
for (;;) { |
706 |
< |
Node<E> h = traversalHead(); |
707 |
< |
Node<E> p = h.next; |
708 |
< |
if (p == null) |
709 |
< |
return true; |
710 |
< |
Object x = p.get(); |
711 |
< |
if (p != x) { |
712 |
< |
if (!p.isData) |
713 |
< |
return true; |
714 |
< |
if (x != null) |
715 |
< |
return false; |
716 |
< |
} |
717 |
< |
} |
1127 |
> |
return firstOfMode(true) == null; |
1128 |
|
} |
1129 |
|
|
1130 |
|
public boolean hasWaitingConsumer() { |
1131 |
< |
for (;;) { |
722 |
< |
Node<E> h = traversalHead(); |
723 |
< |
Node<E> p = h.next; |
724 |
< |
if (p == null) |
725 |
< |
return false; |
726 |
< |
Object x = p.get(); |
727 |
< |
if (p != x) |
728 |
< |
return !p.isData; |
729 |
< |
} |
1131 |
> |
return firstOfMode(false) != null; |
1132 |
|
} |
1133 |
|
|
1134 |
|
/** |
1144 |
|
* @return the number of elements in this queue |
1145 |
|
*/ |
1146 |
|
public int size() { |
1147 |
< |
int count = 0; |
746 |
< |
Node<E> h = traversalHead(); |
747 |
< |
for (Node<E> p = h.next; p != null && p.isData; p = p.next) { |
748 |
< |
Object x = p.get(); |
749 |
< |
if (x != null && x != p) { |
750 |
< |
if (++count == Integer.MAX_VALUE) // saturated |
751 |
< |
break; |
752 |
< |
} |
753 |
< |
} |
754 |
< |
return count; |
1147 |
> |
return countOfMode(true); |
1148 |
|
} |
1149 |
|
|
1150 |
|
public int getWaitingConsumerCount() { |
1151 |
< |
int count = 0; |
759 |
< |
Node<E> h = traversalHead(); |
760 |
< |
for (Node<E> p = h.next; p != null && !p.isData; p = p.next) { |
761 |
< |
if (p.get() == null) { |
762 |
< |
if (++count == Integer.MAX_VALUE) |
763 |
< |
break; |
764 |
< |
} |
765 |
< |
} |
766 |
< |
return count; |
1151 |
> |
return countOfMode(false); |
1152 |
|
} |
1153 |
|
|
1154 |
< |
public int remainingCapacity() { |
1155 |
< |
return Integer.MAX_VALUE; |
1154 |
> |
/** |
1155 |
> |
* Removes a single instance of the specified element from this queue, |
1156 |
> |
* if it is present. More formally, removes an element {@code e} such |
1157 |
> |
* that {@code o.equals(e)}, if this queue contains one or more such |
1158 |
> |
* elements. |
1159 |
> |
* Returns {@code true} if this queue contained the specified element |
1160 |
> |
* (or equivalently, if this queue changed as a result of the call). |
1161 |
> |
* |
1162 |
> |
* @param o element to be removed from this queue, if present |
1163 |
> |
* @return {@code true} if this queue changed as a result of the call |
1164 |
> |
*/ |
1165 |
> |
public boolean remove(Object o) { |
1166 |
> |
return findAndRemove(o); |
1167 |
|
} |
1168 |
|
|
1169 |
< |
public boolean remove(Object o) { |
1170 |
< |
if (o == null) |
1171 |
< |
return false; |
1172 |
< |
for (;;) { |
1173 |
< |
Node<E> pred = traversalHead(); |
1174 |
< |
for (;;) { |
1175 |
< |
Node<E> q = pred.next; |
1176 |
< |
if (q == null || !q.isData) |
1177 |
< |
return false; |
782 |
< |
if (q == pred) // restart |
783 |
< |
break; |
784 |
< |
Object x = q.get(); |
785 |
< |
if (x != null && x != q && o.equals(x) && |
786 |
< |
q.compareAndSet(x, q)) { |
787 |
< |
clean(pred, q); |
788 |
< |
return true; |
789 |
< |
} |
790 |
< |
pred = q; |
791 |
< |
} |
792 |
< |
} |
1169 |
> |
/** |
1170 |
> |
* Always returns {@code Integer.MAX_VALUE} because a |
1171 |
> |
* {@code LinkedTransferQueue} is not capacity constrained. |
1172 |
> |
* |
1173 |
> |
* @return {@code Integer.MAX_VALUE} (as specified by |
1174 |
> |
* {@link BlockingQueue#remainingCapacity()}) |
1175 |
> |
*/ |
1176 |
> |
public int remainingCapacity() { |
1177 |
> |
return Integer.MAX_VALUE; |
1178 |
|
} |
1179 |
|
|
1180 |
|
/** |
1181 |
< |
* Save the state to a stream (that is, serialize it). |
1181 |
> |
* Saves the state to a stream (that is, serializes it). |
1182 |
|
* |
1183 |
|
* @serialData All of the elements (each an {@code E}) in |
1184 |
|
* the proper order, followed by a null |
1194 |
|
} |
1195 |
|
|
1196 |
|
/** |
1197 |
< |
* Reconstitute the Queue instance from a stream (that is, |
1198 |
< |
* deserialize it). |
1197 |
> |
* Reconstitutes the Queue instance from a stream (that is, |
1198 |
> |
* deserializes it). |
1199 |
|
* |
1200 |
|
* @param s the stream |
1201 |
|
*/ |
1202 |
|
private void readObject(java.io.ObjectInputStream s) |
1203 |
|
throws java.io.IOException, ClassNotFoundException { |
1204 |
|
s.defaultReadObject(); |
820 |
– |
resetHeadAndTail(); |
1205 |
|
for (;;) { |
1206 |
|
@SuppressWarnings("unchecked") E item = (E) s.readObject(); |
1207 |
|
if (item == null) |
1211 |
|
} |
1212 |
|
} |
1213 |
|
|
830 |
– |
// Support for resetting head/tail while deserializing |
831 |
– |
private void resetHeadAndTail() { |
832 |
– |
Node<E> dummy = new Node<E>(null, false); |
833 |
– |
UNSAFE.putObjectVolatile(this, headOffset, |
834 |
– |
new PaddedAtomicReference<Node<E>>(dummy)); |
835 |
– |
UNSAFE.putObjectVolatile(this, tailOffset, |
836 |
– |
new PaddedAtomicReference<Node<E>>(dummy)); |
837 |
– |
UNSAFE.putObjectVolatile(this, cleanMeOffset, |
838 |
– |
new PaddedAtomicReference<Node<E>>(null)); |
839 |
– |
} |
840 |
– |
|
1214 |
|
// Unsafe mechanics |
1215 |
|
|
1216 |
|
private static final sun.misc.Unsafe UNSAFE = getUnsafe(); |
1217 |
|
private static final long headOffset = |
1218 |
< |
objectFieldOffset("head", LinkedTransferQueue.class); |
1218 |
> |
objectFieldOffset(UNSAFE, "head", LinkedTransferQueue.class); |
1219 |
|
private static final long tailOffset = |
1220 |
< |
objectFieldOffset("tail", LinkedTransferQueue.class); |
1221 |
< |
private static final long cleanMeOffset = |
1222 |
< |
objectFieldOffset("cleanMe", LinkedTransferQueue.class); |
1220 |
> |
objectFieldOffset(UNSAFE, "tail", LinkedTransferQueue.class); |
1221 |
> |
private static final long sweepVotesOffset = |
1222 |
> |
objectFieldOffset(UNSAFE, "sweepVotes", LinkedTransferQueue.class); |
1223 |
|
|
1224 |
< |
private static long objectFieldOffset(String field, Class<?> klazz) { |
1224 |
> |
static long objectFieldOffset(sun.misc.Unsafe UNSAFE, |
1225 |
> |
String field, Class<?> klazz) { |
1226 |
|
try { |
1227 |
|
return UNSAFE.objectFieldOffset(klazz.getDeclaredField(field)); |
1228 |
|
} catch (NoSuchFieldException e) { |
1240 |
|
* |
1241 |
|
* @return a sun.misc.Unsafe |
1242 |
|
*/ |
1243 |
< |
private static sun.misc.Unsafe getUnsafe() { |
1243 |
> |
static sun.misc.Unsafe getUnsafe() { |
1244 |
|
try { |
1245 |
|
return sun.misc.Unsafe.getUnsafe(); |
1246 |
|
} catch (SecurityException se) { |
1260 |
|
} |
1261 |
|
} |
1262 |
|
} |
1263 |
+ |
|
1264 |
|
} |