6 |
|
|
7 |
|
package jsr166y; |
8 |
|
|
9 |
– |
import java.util.concurrent.*; |
10 |
– |
|
9 |
|
import java.util.AbstractQueue; |
10 |
|
import java.util.Collection; |
11 |
|
import java.util.ConcurrentModificationException; |
12 |
|
import java.util.Iterator; |
13 |
|
import java.util.NoSuchElementException; |
14 |
|
import java.util.Queue; |
15 |
+ |
import java.util.concurrent.TimeUnit; |
16 |
|
import java.util.concurrent.locks.LockSupport; |
17 |
+ |
|
18 |
|
/** |
19 |
|
* An unbounded {@link TransferQueue} based on linked nodes. |
20 |
|
* This queue orders elements FIFO (first-in-first-out) with respect |
105 |
|
* successful atomic operation per enq/deq pair. But it also |
106 |
|
* enables lower cost variants of queue maintenance mechanics. (A |
107 |
|
* variation of this idea applies even for non-dual queues that |
108 |
< |
* support deletion of embedded elements, such as |
108 |
> |
* support deletion of interior elements, such as |
109 |
|
* j.u.c.ConcurrentLinkedQueue.) |
110 |
|
* |
111 |
< |
* Once a node is matched, its item can never again change. We |
112 |
< |
* may thus arrange that the linked list of them contains a prefix |
113 |
< |
* of zero or more matched nodes, followed by a suffix of zero or |
114 |
< |
* more unmatched nodes. (Note that we allow both the prefix and |
115 |
< |
* suffix to be zero length, which in turn means that we do not |
116 |
< |
* use a dummy header.) If we were not concerned with either time |
117 |
< |
* or space efficiency, we could correctly perform enqueue and |
118 |
< |
* dequeue operations by traversing from a pointer to the initial |
119 |
< |
* node; CASing the item of the first unmatched node on match and |
120 |
< |
* CASing the next field of the trailing node on appends. While |
121 |
< |
* this would be a terrible idea in itself, it does have the |
122 |
< |
* benefit of not requiring ANY atomic updates on head/tail |
123 |
< |
* fields. |
111 |
> |
* Once a node is matched, its match status can never again |
112 |
> |
* change. We may thus arrange that the linked list of them |
113 |
> |
* contain a prefix of zero or more matched nodes, followed by a |
114 |
> |
* suffix of zero or more unmatched nodes. (Note that we allow |
115 |
> |
* both the prefix and suffix to be zero length, which in turn |
116 |
> |
* means that we do not use a dummy header.) If we were not |
117 |
> |
* concerned with either time or space efficiency, we could |
118 |
> |
* correctly perform enqueue and dequeue operations by traversing |
119 |
> |
* from a pointer to the initial node; CASing the item of the |
120 |
> |
* first unmatched node on match and CASing the next field of the |
121 |
> |
* trailing node on appends. (Plus some special-casing when |
122 |
> |
* initially empty). While this would be a terrible idea in |
123 |
> |
* itself, it does have the benefit of not requiring ANY atomic |
124 |
> |
* updates on head/tail fields. |
125 |
|
* |
126 |
|
* We introduce here an approach that lies between the extremes of |
127 |
< |
* never versus always updating queue (head and tail) pointers |
128 |
< |
* that reflects the tradeoff of sometimes requiring extra traversal |
129 |
< |
* steps to locate the first and/or last unmatched nodes, versus |
130 |
< |
* the reduced overhead and contention of fewer updates to queue |
131 |
< |
* pointers. For example, a possible snapshot of a queue is: |
127 |
> |
* never versus always updating queue (head and tail) pointers. |
128 |
> |
* This offers a tradeoff between sometimes requiring extra |
129 |
> |
* traversal steps to locate the first and/or last unmatched |
130 |
> |
* nodes, versus the reduced overhead and contention of fewer |
131 |
> |
* updates to queue pointers. For example, a possible snapshot of |
132 |
> |
* a queue is: |
133 |
|
* |
134 |
|
* head tail |
135 |
|
* | | |
141 |
|
* similarly for "tail") is an empirical matter. We have found |
142 |
|
* that using very small constants in the range of 1-3 work best |
143 |
|
* over a range of platforms. Larger values introduce increasing |
144 |
< |
* costs of cache misses and risks of long traversal chains. |
144 |
> |
* costs of cache misses and risks of long traversal chains, while |
145 |
> |
* smaller values increase CAS contention and overhead. |
146 |
|
* |
147 |
|
* Dual queues with slack differ from plain M&S dual queues by |
148 |
|
* virtue of only sometimes updating head or tail pointers when |
161 |
|
* targets. Even when using very small slack values, this |
162 |
|
* approach works well for dual queues because it allows all |
163 |
|
* operations up to the point of matching or appending an item |
164 |
< |
* (hence potentially releasing another thread) to be read-only, |
165 |
< |
* thus not introducing any further contention. As described |
166 |
< |
* below, we implement this by performing slack maintenance |
167 |
< |
* retries only after these points. |
164 |
> |
* (hence potentially allowing progress by another thread) to be |
165 |
> |
* read-only, thus not introducing any further contention. As |
166 |
> |
* described below, we implement this by performing slack |
167 |
> |
* maintenance retries only after these points. |
168 |
|
* |
169 |
|
* As an accompaniment to such techniques, traversal overhead can |
170 |
|
* be further reduced without increasing contention of head |
171 |
< |
* pointer updates. During traversals, threads may sometimes |
172 |
< |
* shortcut the "next" link path from the current "head" node to |
173 |
< |
* be closer to the currently known first unmatched node. Again, |
174 |
< |
* this may be triggered with using thresholds or randomization. |
171 |
> |
* pointer updates: Threads may sometimes shortcut the "next" link |
172 |
> |
* path from the current "head" node to be closer to the currently |
173 |
> |
* known first unmatched node, and similarly for tail. Again, this |
174 |
> |
* may be triggered with using thresholds or randomization. |
175 |
|
* |
176 |
|
* These ideas must be further extended to avoid unbounded amounts |
177 |
|
* of costly-to-reclaim garbage caused by the sequential "next" |
199 |
|
* mechanics because an update may leave head at a detached node. |
200 |
|
* And while direct writes are possible for tail updates, they |
201 |
|
* increase the risk of long retraversals, and hence long garbage |
202 |
< |
* chains which can be much more costly than is worthwhile |
202 |
> |
* chains, which can be much more costly than is worthwhile |
203 |
|
* considering that the cost difference of performing a CAS vs |
204 |
|
* write is smaller when they are not triggered on each operation |
205 |
|
* (especially considering that writes and CASes equally require |
206 |
|
* additional GC bookkeeping ("write barriers") that are sometimes |
207 |
|
* more costly than the writes themselves because of contention). |
208 |
|
* |
206 |
– |
* Removal of internal nodes (due to timed out or interrupted |
207 |
– |
* waits, or calls to remove or Iterator.remove) uses a scheme |
208 |
– |
* roughly similar to that in Scherer, Lea, and Scott |
209 |
– |
* SynchronousQueue. Given a predecessor, we can unsplice any node |
210 |
– |
* except the (actual) tail of the queue. To avoid build-up of |
211 |
– |
* cancelled trailing nodes, upon a request to remove a trailing |
212 |
– |
* node, it is placed in field "cleanMe" to be unspliced later. |
213 |
– |
* |
209 |
|
* *** Overview of implementation *** |
210 |
|
* |
211 |
< |
* We use a threshold-based approach to updates, with a target |
212 |
< |
* slack of two. The slack value is hard-wired: a path greater |
211 |
> |
* We use a threshold-based approach to updates, with a slack |
212 |
> |
* threshold of two -- that is, we update head/tail when the |
213 |
> |
* current pointer appears to be two or more steps away from the |
214 |
> |
* first/last node. The slack value is hard-wired: a path greater |
215 |
|
* than one is naturally implemented by checking equality of |
216 |
|
* traversal pointers except when the list has only one element, |
217 |
< |
* in which case we keep max slack at one. Avoiding tracking |
218 |
< |
* explicit counts across situations slightly simplifies an |
217 |
> |
* in which case we keep slack threshold at one. Avoiding tracking |
218 |
> |
* explicit counts across method calls slightly simplifies an |
219 |
|
* already-messy implementation. Using randomization would |
220 |
|
* probably work better if there were a low-quality dirt-cheap |
221 |
|
* per-thread one available, but even ThreadLocalRandom is too |
222 |
|
* heavy for these purposes. |
223 |
|
* |
224 |
< |
* With such a small slack value, path short-circuiting is rarely |
225 |
< |
* worthwhile. However, it is used (in awaitMatch) immediately |
226 |
< |
* before a waiting thread starts to block, as a final bit of |
227 |
< |
* helping at a point when contention with others is extremely |
228 |
< |
* unlikely (since if other threads that could release it are |
229 |
< |
* operating, then the current thread wouldn't be blocking). |
224 |
> |
* With such a small slack threshold value, it is not worthwhile |
225 |
> |
* to augment this with path short-circuiting (i.e., unsplicing |
226 |
> |
* interior nodes) except in the case of cancellation/removal (see |
227 |
> |
* below). |
228 |
> |
* |
229 |
> |
* We allow both the head and tail fields to be null before any |
230 |
> |
* nodes are enqueued; initializing upon first append. This |
231 |
> |
* simplifies some other logic, as well as providing more |
232 |
> |
* efficient explicit control paths instead of letting JVMs insert |
233 |
> |
* implicit NullPointerExceptions when they are null. While not |
234 |
> |
* currently fully implemented, we also leave open the possibility |
235 |
> |
* of re-nulling these fields when empty (which is complicated to |
236 |
> |
* arrange, for little benefit.) |
237 |
|
* |
238 |
|
* All enqueue/dequeue operations are handled by the single method |
239 |
|
* "xfer" with parameters indicating whether to act as some form |
240 |
|
* of offer, put, poll, take, or transfer (each possibly with |
241 |
|
* timeout). The relative complexity of using one monolithic |
242 |
|
* method outweighs the code bulk and maintenance problems of |
243 |
< |
* using nine separate methods. |
243 |
> |
* using separate methods for each case. |
244 |
|
* |
245 |
|
* Operation consists of up to three phases. The first is |
246 |
|
* implemented within method xfer, the second in tryAppend, and |
253 |
|
* case matching it and returning, also if necessary updating |
254 |
|
* head to one past the matched node (or the node itself if the |
255 |
|
* list has no other unmatched nodes). If the CAS misses, then |
256 |
< |
* a retry loops until the slack is at most two. Traversals |
257 |
< |
* also check if the initial head is now off-list, in which |
258 |
< |
* case they start at the new head. |
256 |
> |
* a loop retries advancing head by two steps until either |
257 |
> |
* success or the slack is at most two. By requiring that each |
258 |
> |
* attempt advances head by two (if applicable), we ensure that |
259 |
> |
* the slack does not grow without bound. Traversals also check |
260 |
> |
* if the initial head is now off-list, in which case they |
261 |
> |
* start at the new head. |
262 |
|
* |
263 |
|
* If no candidates are found and the call was untimed |
264 |
|
* poll/offer, (argument "how" is NOW) return. |
265 |
|
* |
266 |
|
* 2. Try to append a new node (method tryAppend) |
267 |
|
* |
268 |
< |
* Starting at current tail pointer, try to append a new node |
269 |
< |
* to the list (or if head was null, establish the first |
270 |
< |
* node). Nodes can be appended only if their predecessors are |
271 |
< |
* either already matched or are of the same mode. If we detect |
272 |
< |
* otherwise, then a new node with opposite mode must have been |
273 |
< |
* appended during traversal, so must restart at phase 1. The |
274 |
< |
* traversal and update steps are otherwise similar to phase 1: |
275 |
< |
* Retrying upon CAS misses and checking for staleness. In |
276 |
< |
* particular, if a self-link is encountered, then we can |
277 |
< |
* safely jump to a node on the list by continuing the |
278 |
< |
* traversal at current head. |
268 |
> |
* Starting at current tail pointer, find the actual last node |
269 |
> |
* and try to append a new node (or if head was null, establish |
270 |
> |
* the first node). Nodes can be appended only if their |
271 |
> |
* predecessors are either already matched or are of the same |
272 |
> |
* mode. If we detect otherwise, then a new node with opposite |
273 |
> |
* mode must have been appended during traversal, so we must |
274 |
> |
* restart at phase 1. The traversal and update steps are |
275 |
> |
* otherwise similar to phase 1: Retrying upon CAS misses and |
276 |
> |
* checking for staleness. In particular, if a self-link is |
277 |
> |
* encountered, then we can safely jump to a node on the list |
278 |
> |
* by continuing the traversal at current head. |
279 |
|
* |
280 |
|
* On successful append, if the call was ASYNC, return. |
281 |
|
* |
282 |
|
* 3. Await match or cancellation (method awaitMatch) |
283 |
|
* |
284 |
|
* Wait for another thread to match node; instead cancelling if |
285 |
< |
* current thread was interrupted or the wait timed out. On |
285 |
> |
* the current thread was interrupted or the wait timed out. On |
286 |
|
* multiprocessors, we use front-of-queue spinning: If a node |
287 |
|
* appears to be the first unmatched node in the queue, it |
288 |
|
* spins a bit before blocking. In either case, before blocking |
297 |
|
* to decide to occasionally perform a Thread.yield. While |
298 |
|
* yield has underdefined specs, we assume that might it help, |
299 |
|
* and will not hurt in limiting impact of spinning on busy |
300 |
< |
* systems. We also use much smaller (1/4) spins for nodes |
301 |
< |
* that are not known to be front but whose predecessors have |
302 |
< |
* not blocked -- these "chained" spins avoid artifacts of |
300 |
> |
* systems. We also use smaller (1/2) spins for nodes that are |
301 |
> |
* not known to be front but whose predecessors have not |
302 |
> |
* blocked -- these "chained" spins avoid artifacts of |
303 |
|
* front-of-queue rules which otherwise lead to alternating |
304 |
|
* nodes spinning vs blocking. Further, front threads that |
305 |
|
* represent phase changes (from data to request node or vice |
306 |
|
* versa) compared to their predecessors receive additional |
307 |
< |
* spins, reflecting the longer code path lengths necessary to |
308 |
< |
* release them under contention. |
307 |
> |
* chained spins, reflecting longer paths typically required to |
308 |
> |
* unblock threads during phase changes. |
309 |
> |
* |
310 |
> |
* |
311 |
> |
* ** Unlinking removed interior nodes ** |
312 |
> |
* |
313 |
> |
* In addition to minimizing garbage retention via self-linking |
314 |
> |
* described above, we also unlink removed interior nodes. These |
315 |
> |
* may arise due to timed out or interrupted waits, or calls to |
316 |
> |
* remove(x) or Iterator.remove. Normally, given a node that was |
317 |
> |
* at one time known to be the predecessor of some node s that is |
318 |
> |
* to be removed, we can unsplice s by CASing the next field of |
319 |
> |
* its predecessor if it still points to s (otherwise s must |
320 |
> |
* already have been removed or is now offlist). But there are two |
321 |
> |
* situations in which we cannot guarantee to make node s |
322 |
> |
* unreachable in this way: (1) If s is the trailing node of list |
323 |
> |
* (i.e., with null next), then it is pinned as the target node |
324 |
> |
* for appends, so can only be removed later after other nodes are |
325 |
> |
* appended. (2) We cannot necessarily unlink s given a |
326 |
> |
* predecessor node that is matched (including the case of being |
327 |
> |
* cancelled): the predecessor may already be unspliced, in which |
328 |
> |
* case some previous reachable node may still point to s. |
329 |
> |
* (For further explanation see Herlihy & Shavit "The Art of |
330 |
> |
* Multiprocessor Programming" chapter 9). Although, in both |
331 |
> |
* cases, we can rule out the need for further action if either s |
332 |
> |
* or its predecessor are (or can be made to be) at, or fall off |
333 |
> |
* from, the head of list. |
334 |
> |
* |
335 |
> |
* Without taking these into account, it would be possible for an |
336 |
> |
* unbounded number of supposedly removed nodes to remain |
337 |
> |
* reachable. Situations leading to such buildup are uncommon but |
338 |
> |
* can occur in practice; for example when a series of short timed |
339 |
> |
* calls to poll repeatedly time out but never otherwise fall off |
340 |
> |
* the list because of an untimed call to take at the front of the |
341 |
> |
* queue. |
342 |
> |
* |
343 |
> |
* When these cases arise, rather than always retraversing the |
344 |
> |
* entire list to find an actual predecessor to unlink (which |
345 |
> |
* won't help for case (1) anyway), we record a conservative |
346 |
> |
* estimate of possible unsplice failures (in "sweepVotes"). |
347 |
> |
* We trigger a full sweep when the estimate exceeds a threshold |
348 |
> |
* ("SWEEP_THRESHOLD") indicating the maximum number of estimated |
349 |
> |
* removal failures to tolerate before sweeping through, unlinking |
350 |
> |
* cancelled nodes that were not unlinked upon initial removal. |
351 |
> |
* We perform sweeps by the thread hitting threshold (rather than |
352 |
> |
* background threads or by spreading work to other threads) |
353 |
> |
* because in the main contexts in which removal occurs, the |
354 |
> |
* caller is already timed-out, cancelled, or performing a |
355 |
> |
* potentially O(n) operation (e.g. remove(x)), none of which are |
356 |
> |
* time-critical enough to warrant the overhead that alternatives |
357 |
> |
* would impose on other threads. |
358 |
> |
* |
359 |
> |
* Because the sweepVotes estimate is conservative, and because |
360 |
> |
* nodes become unlinked "naturally" as they fall off the head of |
361 |
> |
* the queue, and because we allow votes to accumulate even while |
362 |
> |
* sweeps are in progress, there are typically significantly fewer |
363 |
> |
* such nodes than estimated. Choice of a threshold value |
364 |
> |
* balances the likelihood of wasted effort and contention, versus |
365 |
> |
* providing a worst-case bound on retention of interior nodes in |
366 |
> |
* quiescent queues. The value defined below was chosen |
367 |
> |
* empirically to balance these under various timeout scenarios. |
368 |
> |
* |
369 |
> |
* Note that we cannot self-link unlinked interior nodes during |
370 |
> |
* sweeps. However, the associated garbage chains terminate when |
371 |
> |
* some successor ultimately falls off the head of the list and is |
372 |
> |
* self-linked. |
373 |
|
*/ |
374 |
|
|
375 |
|
/** True if on multiprocessor */ |
377 |
|
Runtime.getRuntime().availableProcessors() > 1; |
378 |
|
|
379 |
|
/** |
380 |
< |
* The number of times to spin (with on average one randomly |
381 |
< |
* interspersed call to Thread.yield) on multiprocessor before |
382 |
< |
* blocking when a node is apparently the first waiter in the |
383 |
< |
* queue. See above for explanation. Must be a power of two. The |
384 |
< |
* value is empirically derived -- it works pretty well across a |
385 |
< |
* variety of processors, numbers of CPUs, and OSes. |
380 |
> |
* The number of times to spin (with randomly interspersed calls |
381 |
> |
* to Thread.yield) on multiprocessor before blocking when a node |
382 |
> |
* is apparently the first waiter in the queue. See above for |
383 |
> |
* explanation. Must be a power of two. The value is empirically |
384 |
> |
* derived -- it works pretty well across a variety of processors, |
385 |
> |
* numbers of CPUs, and OSes. |
386 |
|
*/ |
387 |
|
private static final int FRONT_SPINS = 1 << 7; |
388 |
|
|
389 |
|
/** |
390 |
|
* The number of times to spin before blocking when a node is |
391 |
< |
* preceded by another node that is apparently spinning. |
391 |
> |
* preceded by another node that is apparently spinning. Also |
392 |
> |
* serves as an increment to FRONT_SPINS on phase changes, and as |
393 |
> |
* base average frequency for yielding during spins. Must be a |
394 |
> |
* power of two. |
395 |
|
*/ |
396 |
< |
private static final int CHAINED_SPINS = FRONT_SPINS >>> 2; |
396 |
> |
private static final int CHAINED_SPINS = FRONT_SPINS >>> 1; |
397 |
> |
|
398 |
> |
/** |
399 |
> |
* The maximum number of estimated removal failures (sweepVotes) |
400 |
> |
* to tolerate before sweeping through the queue unlinking |
401 |
> |
* cancelled nodes that were not unlinked upon initial |
402 |
> |
* removal. See above for explanation. The value must be at least |
403 |
> |
* two to avoid useless sweeps when removing trailing nodes. |
404 |
> |
*/ |
405 |
> |
static final int SWEEP_THRESHOLD = 32; |
406 |
|
|
407 |
|
/** |
408 |
|
* Queue nodes. Uses Object, not E, for items to allow forgetting |
409 |
|
* them after use. Relies heavily on Unsafe mechanics to minimize |
410 |
< |
* unnecessary ordering constraints: Writes that intrinsically |
411 |
< |
* precede or follow CASes use simple relaxed forms. Other |
329 |
< |
* cleanups use releasing/lazy writes. |
410 |
> |
* unnecessary ordering constraints: Writes that are intrinsically |
411 |
> |
* ordered wrt other accesses or CASes use simple relaxed forms. |
412 |
|
*/ |
413 |
|
static final class Node { |
414 |
|
final boolean isData; // false if this is a request node |
422 |
|
} |
423 |
|
|
424 |
|
final boolean casItem(Object cmp, Object val) { |
425 |
+ |
// assert cmp == null || cmp.getClass() != Node.class; |
426 |
|
return UNSAFE.compareAndSwapObject(this, itemOffset, cmp, val); |
427 |
|
} |
428 |
|
|
429 |
|
/** |
430 |
< |
* Creates a new node. Uses relaxed write because item can only |
431 |
< |
* be seen if followed by CAS. |
430 |
> |
* Constructs a new node. Uses relaxed write because item can |
431 |
> |
* only be seen after publication via casNext. |
432 |
|
*/ |
433 |
|
Node(Object item, boolean isData) { |
434 |
|
UNSAFE.putObject(this, itemOffset, item); // relaxed write |
444 |
|
} |
445 |
|
|
446 |
|
/** |
447 |
< |
* Sets item to self (using a releasing/lazy write) and waiter |
448 |
< |
* to null, to avoid garbage retention after extracting or |
449 |
< |
* cancelling. |
447 |
> |
* Sets item to self and waiter to null, to avoid garbage |
448 |
> |
* retention after matching or cancelling. Uses relaxed writes |
449 |
> |
* because order is already constrained in the only calling |
450 |
> |
* contexts: item is forgotten only after volatile/atomic |
451 |
> |
* mechanics that extract items. Similarly, clearing waiter |
452 |
> |
* follows either CAS or return from park (if ever parked; |
453 |
> |
* else we don't care). |
454 |
|
*/ |
455 |
|
final void forgetContents() { |
456 |
< |
UNSAFE.putOrderedObject(this, itemOffset, this); |
457 |
< |
UNSAFE.putOrderedObject(this, waiterOffset, null); |
456 |
> |
UNSAFE.putObject(this, itemOffset, this); |
457 |
> |
UNSAFE.putObject(this, waiterOffset, null); |
458 |
|
} |
459 |
|
|
460 |
|
/** |
463 |
|
*/ |
464 |
|
final boolean isMatched() { |
465 |
|
Object x = item; |
466 |
< |
return x == this || (x != null) != isData; |
466 |
> |
return (x == this) || ((x == null) == isData); |
467 |
> |
} |
468 |
> |
|
469 |
> |
/** |
470 |
> |
* Returns true if this is an unmatched request node. |
471 |
> |
*/ |
472 |
> |
final boolean isUnmatchedRequest() { |
473 |
> |
return !isData && item == null; |
474 |
|
} |
475 |
|
|
476 |
|
/** |
488 |
|
* Tries to artificially match a data node -- used by remove. |
489 |
|
*/ |
490 |
|
final boolean tryMatchData() { |
491 |
+ |
// assert isData; |
492 |
|
Object x = item; |
493 |
|
if (x != null && x != this && casItem(x, null)) { |
494 |
|
LockSupport.unpark(waiter); |
510 |
|
} |
511 |
|
|
512 |
|
/** head of the queue; null until first enqueue */ |
513 |
< |
private transient volatile Node head; |
419 |
< |
|
420 |
< |
/** predecessor of dangling unspliceable node */ |
421 |
< |
private transient volatile Node cleanMe; // decl here to reduce contention |
513 |
> |
transient volatile Node head; |
514 |
|
|
515 |
|
/** tail of the queue; null until first append */ |
516 |
|
private transient volatile Node tail; |
517 |
|
|
518 |
+ |
/** The number of apparent failures to unsplice removed nodes */ |
519 |
+ |
private transient volatile int sweepVotes; |
520 |
+ |
|
521 |
|
// CAS methods for fields |
522 |
|
private boolean casTail(Node cmp, Node val) { |
523 |
|
return UNSAFE.compareAndSwapObject(this, tailOffset, cmp, val); |
527 |
|
return UNSAFE.compareAndSwapObject(this, headOffset, cmp, val); |
528 |
|
} |
529 |
|
|
530 |
< |
private boolean casCleanMe(Node cmp, Node val) { |
531 |
< |
return UNSAFE.compareAndSwapObject(this, cleanMeOffset, cmp, val); |
530 |
> |
private boolean casSweepVotes(int cmp, int val) { |
531 |
> |
return UNSAFE.compareAndSwapInt(this, sweepVotesOffset, cmp, val); |
532 |
|
} |
533 |
|
|
534 |
|
/* |
535 |
< |
* Possible values for "how" argument in xfer method. Beware that |
441 |
< |
* the order of assigned numerical values matters. |
535 |
> |
* Possible values for "how" argument in xfer method. |
536 |
|
*/ |
537 |
< |
private static final int NOW = 0; // for untimed poll, tryTransfer |
538 |
< |
private static final int ASYNC = 1; // for offer, put, add |
539 |
< |
private static final int SYNC = 2; // for transfer, take |
540 |
< |
private static final int TIMEOUT = 3; // for timed poll, tryTransfer |
537 |
> |
private static final int NOW = 0; // for untimed poll, tryTransfer |
538 |
> |
private static final int ASYNC = 1; // for offer, put, add |
539 |
> |
private static final int SYNC = 2; // for transfer, take |
540 |
> |
private static final int TIMED = 3; // for timed poll, tryTransfer |
541 |
> |
|
542 |
> |
@SuppressWarnings("unchecked") |
543 |
> |
static <E> E cast(Object item) { |
544 |
> |
// assert item == null || item.getClass() != Node.class; |
545 |
> |
return (E) item; |
546 |
> |
} |
547 |
|
|
548 |
|
/** |
549 |
|
* Implements all queuing methods. See above for explanation. |
550 |
|
* |
551 |
|
* @param e the item or null for take |
552 |
|
* @param haveData true if this is a put, else a take |
553 |
< |
* @param how NOW, ASYNC, SYNC, or TIMEOUT |
554 |
< |
* @param nanos timeout in nanosecs, used only if mode is TIMEOUT |
553 |
> |
* @param how NOW, ASYNC, SYNC, or TIMED |
554 |
> |
* @param nanos timeout in nanosecs, used only if mode is TIMED |
555 |
|
* @return an item if matched, else e |
556 |
|
* @throws NullPointerException if haveData mode but e is null |
557 |
|
*/ |
558 |
< |
private Object xfer(Object e, boolean haveData, int how, long nanos) { |
558 |
> |
private E xfer(E e, boolean haveData, int how, long nanos) { |
559 |
|
if (haveData && (e == null)) |
560 |
|
throw new NullPointerException(); |
561 |
|
Node s = null; // the node to append, if needed |
562 |
|
|
563 |
< |
retry: for (;;) { // restart on append race |
563 |
> |
retry: |
564 |
> |
for (;;) { // restart on append race |
565 |
|
|
566 |
|
for (Node h = head, p = h; p != null;) { // find & match first node |
567 |
|
boolean isData = p.isData; |
570 |
|
if (isData == haveData) // can't match |
571 |
|
break; |
572 |
|
if (p.casItem(item, e)) { // match |
573 |
< |
Thread w = p.waiter; |
574 |
< |
while (p != h) { // update head |
575 |
< |
Node n = p.next; // by 2 unless singleton |
475 |
< |
if (n != null) |
476 |
< |
p = n; |
477 |
< |
if (head == h && casHead(h, p)) { |
573 |
> |
for (Node q = p; q != h;) { |
574 |
> |
Node n = q.next; // update by 2 unless singleton |
575 |
> |
if (head == h && casHead(h, n == null ? q : n)) { |
576 |
|
h.forgetNext(); |
577 |
|
break; |
578 |
|
} // advance and retry |
579 |
|
if ((h = head) == null || |
580 |
< |
(p = h.next) == null || !p.isMatched()) |
580 |
> |
(q = h.next) == null || !q.isMatched()) |
581 |
|
break; // unless slack < 2 |
582 |
|
} |
583 |
< |
LockSupport.unpark(w); |
584 |
< |
return item; |
583 |
> |
LockSupport.unpark(p.waiter); |
584 |
> |
return this.<E>cast(item); |
585 |
|
} |
586 |
|
} |
587 |
|
Node n = p.next; |
588 |
< |
p = p != n ? n : (h = head); // Use head if p offlist |
588 |
> |
p = (p != n) ? n : (h = head); // Use head if p offlist |
589 |
|
} |
590 |
|
|
591 |
< |
if (how >= ASYNC) { // No matches available |
591 |
> |
if (how != NOW) { // No matches available |
592 |
|
if (s == null) |
593 |
|
s = new Node(e, haveData); |
594 |
|
Node pred = tryAppend(s, haveData); |
595 |
|
if (pred == null) |
596 |
|
continue retry; // lost race vs opposite mode |
597 |
< |
if (how >= SYNC) |
598 |
< |
return awaitMatch(pred, s, e, how, nanos); |
597 |
> |
if (how != ASYNC) |
598 |
> |
return awaitMatch(s, pred, e, (how == TIMED), nanos); |
599 |
|
} |
600 |
|
return e; // not waiting |
601 |
|
} |
604 |
|
/** |
605 |
|
* Tries to append node s as tail. |
606 |
|
* |
509 |
– |
* @param haveData true if appending in data mode |
607 |
|
* @param s the node to append |
608 |
+ |
* @param haveData true if appending in data mode |
609 |
|
* @return null on failure due to losing race with append in |
610 |
|
* different mode, else s's predecessor, or s itself if no |
611 |
|
* predecessor |
612 |
|
*/ |
613 |
|
private Node tryAppend(Node s, boolean haveData) { |
614 |
< |
for (Node t = tail, p = t;;) { // move p to actual tail and append |
614 |
> |
for (Node t = tail, p = t;;) { // move p to last node and append |
615 |
|
Node n, u; // temps for reads of next & tail |
616 |
|
if (p == null && (p = head) == null) { |
617 |
|
if (casHead(null, s)) |
619 |
|
} |
620 |
|
else if (p.cannotPrecede(haveData)) |
621 |
|
return null; // lost race vs opposite mode |
622 |
< |
else if ((n = p.next) != null) // Not tail; keep traversing |
622 |
> |
else if ((n = p.next) != null) // not last; keep traversing |
623 |
|
p = p != t && t != (u = tail) ? (t = u) : // stale tail |
624 |
< |
p != n ? n : null; // restart if off list |
624 |
> |
(p != n) ? n : null; // restart if off list |
625 |
|
else if (!p.casNext(null, s)) |
626 |
|
p = p.next; // re-read on CAS failure |
627 |
|
else { |
628 |
< |
if (p != t) { // Update if slack now >= 2 |
628 |
> |
if (p != t) { // update if slack now >= 2 |
629 |
|
while ((tail != t || !casTail(t, s)) && |
630 |
|
(t = tail) != null && |
631 |
|
(s = t.next) != null && // advance and retry |
639 |
|
/** |
640 |
|
* Spins/yields/blocks until node s is matched or caller gives up. |
641 |
|
* |
544 |
– |
* @param pred the predecessor of s or s or null if none |
642 |
|
* @param s the waiting node |
643 |
+ |
* @param pred the predecessor of s, or s itself if it has no |
644 |
+ |
* predecessor, or null if unknown (the null case does not occur |
645 |
+ |
* in any current calls but may in possible future extensions) |
646 |
|
* @param e the comparison value for checking match |
647 |
< |
* @param how either SYNC or TIMEOUT |
648 |
< |
* @param nanos timeout value |
647 |
> |
* @param timed if true, wait only until timeout elapses |
648 |
> |
* @param nanos timeout in nanosecs, used only if timed is true |
649 |
|
* @return matched item, or e if unmatched on interrupt or timeout |
650 |
|
*/ |
651 |
< |
private Object awaitMatch(Node pred, Node s, Object e, |
652 |
< |
int how, long nanos) { |
553 |
< |
long lastTime = (how == TIMEOUT) ? System.nanoTime() : 0L; |
651 |
> |
private E awaitMatch(Node s, Node pred, E e, boolean timed, long nanos) { |
652 |
> |
long lastTime = timed ? System.nanoTime() : 0L; |
653 |
|
Thread w = Thread.currentThread(); |
654 |
|
int spins = -1; // initialized after first item and cancel checks |
655 |
|
ThreadLocalRandom randomYields = null; // bound if needed |
657 |
|
for (;;) { |
658 |
|
Object item = s.item; |
659 |
|
if (item != e) { // matched |
660 |
+ |
// assert item != s; |
661 |
|
s.forgetContents(); // avoid garbage |
662 |
< |
return item; |
662 |
> |
return this.<E>cast(item); |
663 |
|
} |
664 |
< |
if ((w.isInterrupted() || (how == TIMEOUT && nanos <= 0)) && |
665 |
< |
s.casItem(e, s)) { // cancel |
664 |
> |
if ((w.isInterrupted() || (timed && nanos <= 0)) && |
665 |
> |
s.casItem(e, s)) { // cancel |
666 |
|
unsplice(pred, s); |
667 |
|
return e; |
668 |
|
} |
671 |
|
if ((spins = spinsFor(pred, s.isData)) > 0) |
672 |
|
randomYields = ThreadLocalRandom.current(); |
673 |
|
} |
674 |
< |
else if (spins > 0) { // spin, occasionally yield |
575 |
< |
if (randomYields.nextInt(FRONT_SPINS) == 0) |
576 |
< |
Thread.yield(); |
674 |
> |
else if (spins > 0) { // spin |
675 |
|
--spins; |
676 |
+ |
if (randomYields.nextInt(CHAINED_SPINS) == 0) |
677 |
+ |
Thread.yield(); // occasionally yield |
678 |
|
} |
679 |
|
else if (s.waiter == null) { |
680 |
< |
shortenHeadPath(); // reduce slack before blocking |
581 |
< |
s.waiter = w; // request unpark |
680 |
> |
s.waiter = w; // request unpark then recheck |
681 |
|
} |
682 |
< |
else if (how == TIMEOUT) { |
682 |
> |
else if (timed) { |
683 |
|
long now = System.nanoTime(); |
684 |
|
if ((nanos -= now - lastTime) > 0) |
685 |
|
LockSupport.parkNanos(this, nanos); |
687 |
|
} |
688 |
|
else { |
689 |
|
LockSupport.park(this); |
591 |
– |
spins = -1; // spin if front upon wakeup |
690 |
|
} |
691 |
|
} |
692 |
|
} |
697 |
|
*/ |
698 |
|
private static int spinsFor(Node pred, boolean haveData) { |
699 |
|
if (MP && pred != null) { |
700 |
< |
boolean predData = pred.isData; |
701 |
< |
if (predData != haveData) // front and phase change |
702 |
< |
return FRONT_SPINS + (FRONT_SPINS >>> 1); |
605 |
< |
if (predData != (pred.item != null)) // probably at front |
700 |
> |
if (pred.isData != haveData) // phase change |
701 |
> |
return FRONT_SPINS + CHAINED_SPINS; |
702 |
> |
if (pred.isMatched()) // probably at front |
703 |
|
return FRONT_SPINS; |
704 |
|
if (pred.waiter == null) // pred apparently spinning |
705 |
|
return CHAINED_SPINS; |
707 |
|
return 0; |
708 |
|
} |
709 |
|
|
710 |
+ |
/* -------------- Traversal methods -------------- */ |
711 |
+ |
|
712 |
|
/** |
713 |
< |
* Tries (once) to unsplice nodes between head and first unmatched |
714 |
< |
* or trailing node; failing on contention. |
715 |
< |
*/ |
716 |
< |
private void shortenHeadPath() { |
717 |
< |
Node h, hn, p, q; |
718 |
< |
if ((p = h = head) != null && h.isMatched() && |
719 |
< |
(q = hn = h.next) != null) { |
621 |
< |
Node n; |
622 |
< |
while ((n = q.next) != q) { |
623 |
< |
if (n == null || !q.isMatched()) { |
624 |
< |
if (hn != q && h.next == hn) |
625 |
< |
h.casNext(hn, q); |
626 |
< |
break; |
627 |
< |
} |
628 |
< |
p = q; |
629 |
< |
q = n; |
630 |
< |
} |
631 |
< |
} |
713 |
> |
* Returns the successor of p, or the head node if p.next has been |
714 |
> |
* linked to self, which will only be true if traversing with a |
715 |
> |
* stale pointer that is now off the list. |
716 |
> |
*/ |
717 |
> |
final Node succ(Node p) { |
718 |
> |
Node next = p.next; |
719 |
> |
return (p == next) ? head : next; |
720 |
|
} |
721 |
|
|
634 |
– |
/* -------------- Traversal methods -------------- */ |
635 |
– |
|
722 |
|
/** |
723 |
|
* Returns the first unmatched node of the given mode, or null if |
724 |
|
* none. Used by methods isEmpty, hasWaitingConsumer. |
725 |
|
*/ |
726 |
< |
private Node firstOfMode(boolean data) { |
727 |
< |
for (Node p = head; p != null; ) { |
726 |
> |
private Node firstOfMode(boolean isData) { |
727 |
> |
for (Node p = head; p != null; p = succ(p)) { |
728 |
|
if (!p.isMatched()) |
729 |
< |
return p.isData == data? p : null; |
644 |
< |
Node n = p.next; |
645 |
< |
p = n != p ? n : head; |
729 |
> |
return (p.isData == isData) ? p : null; |
730 |
|
} |
731 |
|
return null; |
732 |
|
} |
733 |
|
|
734 |
|
/** |
735 |
|
* Returns the item in the first unmatched node with isData; or |
736 |
< |
* null if none. Used by peek. |
736 |
> |
* null if none. Used by peek. |
737 |
|
*/ |
738 |
< |
private Object firstDataItem() { |
739 |
< |
for (Node p = head; p != null; ) { |
656 |
< |
boolean isData = p.isData; |
738 |
> |
private E firstDataItem() { |
739 |
> |
for (Node p = head; p != null; p = succ(p)) { |
740 |
|
Object item = p.item; |
741 |
< |
if (item != p && (item != null) == isData) |
742 |
< |
return isData ? item : null; |
743 |
< |
Node n = p.next; |
744 |
< |
p = n != p ? n : head; |
741 |
> |
if (p.isData) { |
742 |
> |
if (item != null && item != p) |
743 |
> |
return this.<E>cast(item); |
744 |
> |
} |
745 |
> |
else if (item == null) |
746 |
> |
return null; |
747 |
|
} |
748 |
|
return null; |
749 |
|
} |
774 |
|
|
775 |
|
final class Itr implements Iterator<E> { |
776 |
|
private Node nextNode; // next node to return item for |
777 |
< |
private Object nextItem; // the corresponding item |
777 |
> |
private E nextItem; // the corresponding item |
778 |
|
private Node lastRet; // last returned node, to support remove |
779 |
+ |
private Node lastPred; // predecessor to unlink lastRet |
780 |
|
|
781 |
|
/** |
782 |
|
* Moves to next node after prev, or first node if prev null. |
783 |
|
*/ |
784 |
|
private void advance(Node prev) { |
785 |
< |
lastRet = prev; |
786 |
< |
Node p; |
787 |
< |
if (prev == null || (p = prev.next) == prev) |
788 |
< |
p = head; |
789 |
< |
while (p != null) { |
790 |
< |
Object item = p.item; |
791 |
< |
if (p.isData) { |
792 |
< |
if (item != null && item != p) { |
793 |
< |
nextItem = item; |
794 |
< |
nextNode = p; |
785 |
> |
/* |
786 |
> |
* To track and avoid buildup of deleted nodes in the face |
787 |
> |
* of calls to both Queue.remove and Itr.remove, we must |
788 |
> |
* include variants of unsplice and sweep upon each |
789 |
> |
* advance: Upon Itr.remove, we may need to catch up links |
790 |
> |
* from lastPred, and upon other removes, we might need to |
791 |
> |
* skip ahead from stale nodes and unsplice deleted ones |
792 |
> |
* found while advancing. |
793 |
> |
*/ |
794 |
> |
|
795 |
> |
Node r, b; // reset lastPred upon possible deletion of lastRet |
796 |
> |
if ((r = lastRet) != null && !r.isMatched()) |
797 |
> |
lastPred = r; // next lastPred is old lastRet |
798 |
> |
else if ((b = lastPred) == null || b.isMatched()) |
799 |
> |
lastPred = null; // at start of list |
800 |
> |
else { |
801 |
> |
Node s, n; // help with removal of lastPred.next |
802 |
> |
while ((s = b.next) != null && |
803 |
> |
s != b && s.isMatched() && |
804 |
> |
(n = s.next) != null && n != s) |
805 |
> |
b.casNext(s, n); |
806 |
> |
} |
807 |
> |
|
808 |
> |
this.lastRet = prev; |
809 |
> |
for (Node p = prev, s, n;;) { |
810 |
> |
s = (p == null) ? head : p.next; |
811 |
> |
if (s == null) |
812 |
> |
break; |
813 |
> |
else if (s == p) { |
814 |
> |
p = null; |
815 |
> |
continue; |
816 |
> |
} |
817 |
> |
Object item = s.item; |
818 |
> |
if (s.isData) { |
819 |
> |
if (item != null && item != s) { |
820 |
> |
nextItem = LinkedTransferQueue.<E>cast(item); |
821 |
> |
nextNode = s; |
822 |
|
return; |
823 |
|
} |
824 |
|
} |
825 |
|
else if (item == null) |
826 |
|
break; |
827 |
< |
Node n = p.next; |
828 |
< |
p = n != p ? n : head; |
827 |
> |
// assert s.isMatched(); |
828 |
> |
if (p == null) |
829 |
> |
p = s; |
830 |
> |
else if ((n = s.next) == null) |
831 |
> |
break; |
832 |
> |
else if (s == n) |
833 |
> |
p = null; |
834 |
> |
else |
835 |
> |
p.casNext(s, n); |
836 |
|
} |
837 |
|
nextNode = null; |
838 |
+ |
nextItem = null; |
839 |
|
} |
840 |
|
|
841 |
|
Itr() { |
849 |
|
public final E next() { |
850 |
|
Node p = nextNode; |
851 |
|
if (p == null) throw new NoSuchElementException(); |
852 |
< |
Object e = nextItem; |
852 |
> |
E e = nextItem; |
853 |
|
advance(p); |
854 |
< |
return (E) e; |
854 |
> |
return e; |
855 |
|
} |
856 |
|
|
857 |
|
public final void remove() { |
858 |
< |
Node p = lastRet; |
859 |
< |
if (p == null) throw new IllegalStateException(); |
860 |
< |
lastRet = null; |
861 |
< |
findAndRemoveNode(p); |
858 |
> |
final Node lastRet = this.lastRet; |
859 |
> |
if (lastRet == null) |
860 |
> |
throw new IllegalStateException(); |
861 |
> |
this.lastRet = null; |
862 |
> |
if (lastRet.tryMatchData()) |
863 |
> |
unsplice(lastPred, lastRet); |
864 |
|
} |
865 |
|
} |
866 |
|
|
870 |
|
* Unsplices (now or later) the given deleted/cancelled node with |
871 |
|
* the given predecessor. |
872 |
|
* |
873 |
< |
* @param pred predecessor of node to be unspliced |
873 |
> |
* @param pred a node that was at one time known to be the |
874 |
> |
* predecessor of s, or null or s itself if s is/was at head |
875 |
|
* @param s the node to be unspliced |
876 |
|
*/ |
877 |
< |
private void unsplice(Node pred, Node s) { |
878 |
< |
s.forgetContents(); // clear unneeded fields |
877 |
> |
final void unsplice(Node pred, Node s) { |
878 |
> |
s.forgetContents(); // forget unneeded fields |
879 |
|
/* |
880 |
< |
* At any given time, exactly one node on list cannot be |
881 |
< |
* deleted -- the last inserted node. To accommodate this, if |
882 |
< |
* we cannot delete s, we save its predecessor as "cleanMe", |
883 |
< |
* processing the previously saved version first. Because only |
884 |
< |
* one node in the list can have a null next, at least one of |
761 |
< |
* node s or the node previously saved can always be |
762 |
< |
* processed, so this always terminates. |
880 |
> |
* See above for rationale. Briefly: if pred still points to |
881 |
> |
* s, try to unlink s. If s cannot be unlinked, because it is |
882 |
> |
* trailing node or pred might be unlinked, and neither pred |
883 |
> |
* nor s are head or offlist, add to sweepVotes, and if enough |
884 |
> |
* votes have accumulated, sweep. |
885 |
|
*/ |
886 |
< |
if (pred != null && pred != s) { |
887 |
< |
while (pred.next == s) { |
888 |
< |
Node oldpred = cleanMe == null? null : reclean(); |
889 |
< |
Node n = s.next; |
890 |
< |
if (n != null) { |
891 |
< |
if (n != s) |
892 |
< |
pred.casNext(s, n); |
893 |
< |
break; |
886 |
> |
if (pred != null && pred != s && pred.next == s) { |
887 |
> |
Node n = s.next; |
888 |
> |
if (n == null || |
889 |
> |
(n != s && pred.casNext(s, n) && pred.isMatched())) { |
890 |
> |
for (;;) { // check if at, or could be, head |
891 |
> |
Node h = head; |
892 |
> |
if (h == pred || h == s || h == null) |
893 |
> |
return; // at head or list empty |
894 |
> |
if (!h.isMatched()) |
895 |
> |
break; |
896 |
> |
Node hn = h.next; |
897 |
> |
if (hn == null) |
898 |
> |
return; // now empty |
899 |
> |
if (hn != h && casHead(h, hn)) |
900 |
> |
h.forgetNext(); // advance head |
901 |
> |
} |
902 |
> |
if (pred.next != pred && s.next != s) { // recheck if offlist |
903 |
> |
for (;;) { // sweep now if enough votes |
904 |
> |
int v = sweepVotes; |
905 |
> |
if (v < SWEEP_THRESHOLD) { |
906 |
> |
if (casSweepVotes(v, v + 1)) |
907 |
> |
break; |
908 |
> |
} |
909 |
> |
else if (casSweepVotes(v, 0)) { |
910 |
> |
sweep(); |
911 |
> |
break; |
912 |
> |
} |
913 |
> |
} |
914 |
|
} |
773 |
– |
if (oldpred == pred || // Already saved |
774 |
– |
(oldpred == null && casCleanMe(null, pred))) |
775 |
– |
break; // Postpone cleaning |
915 |
|
} |
916 |
|
} |
917 |
|
} |
918 |
|
|
919 |
|
/** |
920 |
< |
* Tries to unsplice the deleted/cancelled node held in cleanMe |
921 |
< |
* that was previously uncleanable because it was at tail. |
783 |
< |
* |
784 |
< |
* @return current cleanMe node (or null) |
920 |
> |
* Unlinks matched (typically cancelled) nodes encountered in a |
921 |
> |
* traversal from head. |
922 |
|
*/ |
923 |
< |
private Node reclean() { |
924 |
< |
/* |
925 |
< |
* cleanMe is, or at one time was, predecessor of a cancelled |
926 |
< |
* node s that was the tail so could not be unspliced. If it |
927 |
< |
* is no longer the tail, try to unsplice if necessary and |
928 |
< |
* make cleanMe slot available. This differs from similar |
792 |
< |
* code in unsplice() because we must check that pred still |
793 |
< |
* points to a matched node that can be unspliced -- if not, |
794 |
< |
* we can (must) clear cleanMe without unsplicing. This can |
795 |
< |
* loop only due to contention. |
796 |
< |
*/ |
797 |
< |
Node pred; |
798 |
< |
while ((pred = cleanMe) != null) { |
799 |
< |
Node s = pred.next; |
800 |
< |
Node n; |
801 |
< |
if (s == null || s == pred || !s.isMatched()) |
802 |
< |
casCleanMe(pred, null); // already gone |
803 |
< |
else if ((n = s.next) != null) { |
804 |
< |
if (n != s) |
805 |
< |
pred.casNext(s, n); |
806 |
< |
casCleanMe(pred, null); |
807 |
< |
} |
808 |
< |
else |
923 |
> |
private void sweep() { |
924 |
> |
for (Node p = head, s, n; p != null && (s = p.next) != null; ) { |
925 |
> |
if (!s.isMatched()) |
926 |
> |
// Unmatched nodes are never self-linked |
927 |
> |
p = s; |
928 |
> |
else if ((n = s.next) == null) // trailing node is pinned |
929 |
|
break; |
930 |
< |
} |
931 |
< |
return pred; |
932 |
< |
} |
933 |
< |
|
934 |
< |
/** |
815 |
< |
* Main implementation of Iterator.remove(). Find |
816 |
< |
* and unsplice the given node. |
817 |
< |
*/ |
818 |
< |
final void findAndRemoveNode(Node s) { |
819 |
< |
if (s.tryMatchData()) { |
820 |
< |
Node pred = null; |
821 |
< |
Node p = head; |
822 |
< |
while (p != null) { |
823 |
< |
if (p == s) { |
824 |
< |
unsplice(pred, p); |
825 |
< |
break; |
826 |
< |
} |
827 |
< |
if (!p.isData && !p.isMatched()) |
828 |
< |
break; |
829 |
< |
pred = p; |
830 |
< |
if ((p = p.next) == pred) { // stale |
831 |
< |
pred = null; |
832 |
< |
p = head; |
833 |
< |
} |
834 |
< |
} |
930 |
> |
else if (s == n) // stale |
931 |
> |
// No need to also check for p == s, since that implies s == n |
932 |
> |
p = head; |
933 |
> |
else |
934 |
> |
p.casNext(s, n); |
935 |
|
} |
936 |
|
} |
937 |
|
|
940 |
|
*/ |
941 |
|
private boolean findAndRemove(Object e) { |
942 |
|
if (e != null) { |
943 |
< |
Node pred = null; |
844 |
< |
Node p = head; |
845 |
< |
while (p != null) { |
943 |
> |
for (Node pred = null, p = head; p != null; ) { |
944 |
|
Object item = p.item; |
945 |
|
if (p.isData) { |
946 |
|
if (item != null && item != p && e.equals(item) && |
952 |
|
else if (item == null) |
953 |
|
break; |
954 |
|
pred = p; |
955 |
< |
if ((p = p.next) == pred) { |
955 |
> |
if ((p = p.next) == pred) { // stale |
956 |
|
pred = null; |
957 |
|
p = head; |
958 |
|
} |
1010 |
|
* Inserts the specified element at the tail of this queue. |
1011 |
|
* As the queue is unbounded, this method will never return {@code false}. |
1012 |
|
* |
1013 |
< |
* @return {@code true} (as specified by |
916 |
< |
* {@link BlockingQueue#offer(Object) BlockingQueue.offer}) |
1013 |
> |
* @return {@code true} (as specified by {@link Queue#offer}) |
1014 |
|
* @throws NullPointerException if the specified element is null |
1015 |
|
*/ |
1016 |
|
public boolean offer(E e) { |
1079 |
|
*/ |
1080 |
|
public boolean tryTransfer(E e, long timeout, TimeUnit unit) |
1081 |
|
throws InterruptedException { |
1082 |
< |
if (xfer(e, true, TIMEOUT, unit.toNanos(timeout)) == null) |
1082 |
> |
if (xfer(e, true, TIMED, unit.toNanos(timeout)) == null) |
1083 |
|
return true; |
1084 |
|
if (!Thread.interrupted()) |
1085 |
|
return false; |
1087 |
|
} |
1088 |
|
|
1089 |
|
public E take() throws InterruptedException { |
1090 |
< |
Object e = xfer(null, false, SYNC, 0); |
1090 |
> |
E e = xfer(null, false, SYNC, 0); |
1091 |
|
if (e != null) |
1092 |
< |
return (E)e; |
1092 |
> |
return e; |
1093 |
|
Thread.interrupted(); |
1094 |
|
throw new InterruptedException(); |
1095 |
|
} |
1096 |
|
|
1097 |
|
public E poll(long timeout, TimeUnit unit) throws InterruptedException { |
1098 |
< |
Object e = xfer(null, false, TIMEOUT, unit.toNanos(timeout)); |
1098 |
> |
E e = xfer(null, false, TIMED, unit.toNanos(timeout)); |
1099 |
|
if (e != null || !Thread.interrupted()) |
1100 |
< |
return (E)e; |
1100 |
> |
return e; |
1101 |
|
throw new InterruptedException(); |
1102 |
|
} |
1103 |
|
|
1104 |
|
public E poll() { |
1105 |
< |
return (E)xfer(null, false, NOW, 0); |
1105 |
> |
return xfer(null, false, NOW, 0); |
1106 |
|
} |
1107 |
|
|
1108 |
|
/** |
1159 |
|
} |
1160 |
|
|
1161 |
|
public E peek() { |
1162 |
< |
return (E) firstDataItem(); |
1162 |
> |
return firstDataItem(); |
1163 |
|
} |
1164 |
|
|
1165 |
|
/** |
1168 |
|
* @return {@code true} if this queue contains no elements |
1169 |
|
*/ |
1170 |
|
public boolean isEmpty() { |
1171 |
< |
return firstOfMode(true) == null; |
1171 |
> |
for (Node p = head; p != null; p = succ(p)) { |
1172 |
> |
if (!p.isMatched()) |
1173 |
> |
return !p.isData; |
1174 |
> |
} |
1175 |
> |
return true; |
1176 |
|
} |
1177 |
|
|
1178 |
|
public boolean hasWaitingConsumer() { |
1215 |
|
} |
1216 |
|
|
1217 |
|
/** |
1218 |
+ |
* Returns {@code true} if this queue contains the specified element. |
1219 |
+ |
* More formally, returns {@code true} if and only if this queue contains |
1220 |
+ |
* at least one element {@code e} such that {@code o.equals(e)}. |
1221 |
+ |
* |
1222 |
+ |
* @param o object to be checked for containment in this queue |
1223 |
+ |
* @return {@code true} if this queue contains the specified element |
1224 |
+ |
*/ |
1225 |
+ |
public boolean contains(Object o) { |
1226 |
+ |
if (o == null) return false; |
1227 |
+ |
for (Node p = head; p != null; p = succ(p)) { |
1228 |
+ |
Object item = p.item; |
1229 |
+ |
if (p.isData) { |
1230 |
+ |
if (item != null && item != p && o.equals(item)) |
1231 |
+ |
return true; |
1232 |
+ |
} |
1233 |
+ |
else if (item == null) |
1234 |
+ |
break; |
1235 |
+ |
} |
1236 |
+ |
return false; |
1237 |
+ |
} |
1238 |
+ |
|
1239 |
+ |
/** |
1240 |
|
* Always returns {@code Integer.MAX_VALUE} because a |
1241 |
|
* {@code LinkedTransferQueue} is not capacity constrained. |
1242 |
|
* |
1281 |
|
} |
1282 |
|
} |
1283 |
|
|
1161 |
– |
|
1284 |
|
// Unsafe mechanics |
1285 |
|
|
1286 |
|
private static final sun.misc.Unsafe UNSAFE = getUnsafe(); |
1288 |
|
objectFieldOffset(UNSAFE, "head", LinkedTransferQueue.class); |
1289 |
|
private static final long tailOffset = |
1290 |
|
objectFieldOffset(UNSAFE, "tail", LinkedTransferQueue.class); |
1291 |
< |
private static final long cleanMeOffset = |
1292 |
< |
objectFieldOffset(UNSAFE, "cleanMe", LinkedTransferQueue.class); |
1291 |
> |
private static final long sweepVotesOffset = |
1292 |
> |
objectFieldOffset(UNSAFE, "sweepVotes", LinkedTransferQueue.class); |
1293 |
|
|
1294 |
|
static long objectFieldOffset(sun.misc.Unsafe UNSAFE, |
1295 |
|
String field, Class<?> klazz) { |
1303 |
|
} |
1304 |
|
} |
1305 |
|
|
1306 |
< |
private static sun.misc.Unsafe getUnsafe() { |
1306 |
> |
/** |
1307 |
> |
* Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. |
1308 |
> |
* Replace with a simple call to Unsafe.getUnsafe when integrating |
1309 |
> |
* into a jdk. |
1310 |
> |
* |
1311 |
> |
* @return a sun.misc.Unsafe |
1312 |
> |
*/ |
1313 |
> |
static sun.misc.Unsafe getUnsafe() { |
1314 |
|
try { |
1315 |
|
return sun.misc.Unsafe.getUnsafe(); |
1316 |
|
} catch (SecurityException se) { |