1 |
/* |
2 |
* Written by Doug Lea with assistance from members of JCP JSR-166 |
3 |
* Expert Group and released to the public domain, as explained at |
4 |
* http://creativecommons.org/licenses/publicdomain |
5 |
*/ |
6 |
|
7 |
package jsr166y; |
8 |
|
9 |
import java.util.concurrent.*; |
10 |
|
11 |
import java.util.AbstractQueue; |
12 |
import java.util.Collection; |
13 |
import java.util.ConcurrentModificationException; |
14 |
import java.util.Iterator; |
15 |
import java.util.NoSuchElementException; |
16 |
import java.util.Queue; |
17 |
import java.util.concurrent.locks.LockSupport; |
18 |
/** |
19 |
* An unbounded {@link TransferQueue} based on linked nodes. |
20 |
* This queue orders elements FIFO (first-in-first-out) with respect |
21 |
* to any given producer. The <em>head</em> of the queue is that |
22 |
* element that has been on the queue the longest time for some |
23 |
* producer. The <em>tail</em> of the queue is that element that has |
24 |
* been on the queue the shortest time for some producer. |
25 |
* |
26 |
* <p>Beware that, unlike in most collections, the {@code size} |
27 |
* method is <em>NOT</em> a constant-time operation. Because of the |
28 |
* asynchronous nature of these queues, determining the current number |
29 |
* of elements requires a traversal of the elements. |
30 |
* |
31 |
* <p>This class and its iterator implement all of the |
32 |
* <em>optional</em> methods of the {@link Collection} and {@link |
33 |
* Iterator} interfaces. |
34 |
* |
35 |
* <p>Memory consistency effects: As with other concurrent |
36 |
* collections, actions in a thread prior to placing an object into a |
37 |
* {@code LinkedTransferQueue} |
38 |
* <a href="package-summary.html#MemoryVisibility"><i>happen-before</i></a> |
39 |
* actions subsequent to the access or removal of that element from |
40 |
* the {@code LinkedTransferQueue} in another thread. |
41 |
* |
42 |
* <p>This class is a member of the |
43 |
* <a href="{@docRoot}/../technotes/guides/collections/index.html"> |
44 |
* Java Collections Framework</a>. |
45 |
* |
46 |
* @since 1.7 |
47 |
* @author Doug Lea |
48 |
* @param <E> the type of elements held in this collection |
49 |
*/ |
50 |
public class LinkedTransferQueue<E> extends AbstractQueue<E> |
51 |
implements TransferQueue<E>, java.io.Serializable { |
52 |
private static final long serialVersionUID = -3223113410248163686L; |
53 |
|
54 |
/* |
55 |
* *** Overview of Dual Queues with Slack *** |
56 |
* |
57 |
* Dual Queues, introduced by Scherer and Scott |
58 |
* (http://www.cs.rice.edu/~wns1/papers/2004-DISC-DDS.pdf) are |
59 |
* (linked) queues in which nodes may represent either data or |
60 |
* requests. When a thread tries to enqueue a data node, but |
61 |
* encounters a request node, it instead "matches" and removes it; |
62 |
* and vice versa for enqueuing requests. Blocking Dual Queues |
63 |
* arrange that threads enqueuing unmatched requests block until |
64 |
* other threads provide the match. Dual Synchronous Queues (see |
65 |
* Scherer, Lea, & Scott |
66 |
* http://www.cs.rochester.edu/u/scott/papers/2009_Scherer_CACM_SSQ.pdf) |
67 |
* additionally arrange that threads enqueuing unmatched data also |
68 |
* block. Dual Transfer Queues support all of these modes, as |
69 |
* dictated by callers. |
70 |
* |
71 |
* A FIFO dual queue may be implemented using a variation of the |
72 |
* Michael & Scott (M&S) lock-free queue algorithm |
73 |
* (http://www.cs.rochester.edu/u/scott/papers/1996_PODC_queues.pdf). |
74 |
* It maintains two pointer fields, "head", pointing to a |
75 |
* (matched) node that in turn points to the first actual |
76 |
* (unmatched) queue node (or null if empty); and "tail" that |
77 |
* points to the last node on the queue (or again null if |
78 |
* empty). For example, here is a possible queue with four data |
79 |
* elements: |
80 |
* |
81 |
* head tail |
82 |
* | | |
83 |
* v v |
84 |
* M -> U -> U -> U -> U |
85 |
* |
86 |
* The M&S queue algorithm is known to be prone to scalability and |
87 |
* overhead limitations when maintaining (via CAS) these head and |
88 |
* tail pointers. This has led to the development of |
89 |
* contention-reducing variants such as elimination arrays (see |
90 |
* Moir et al http://portal.acm.org/citation.cfm?id=1074013) and |
91 |
* optimistic back pointers (see Ladan-Mozes & Shavit |
92 |
* http://people.csail.mit.edu/edya/publications/OptimisticFIFOQueue-journal.pdf). |
93 |
* However, the nature of dual queues enables a simpler tactic for |
94 |
* improving M&S-style implementations when dual-ness is needed. |
95 |
* |
96 |
* In a dual queue, each node must atomically maintain its match |
97 |
* status. While there are other possible variants, we implement |
98 |
* this here as: for a data-mode node, matching entails CASing an |
99 |
* "item" field from a non-null data value to null upon match, and |
100 |
* vice-versa for request nodes, CASing from null to a data |
101 |
* value. (Note that the linearization properties of this style of |
102 |
* queue are easy to verify -- elements are made available by |
103 |
* linking, and unavailable by matching.) Compared to plain M&S |
104 |
* queues, this property of dual queues requires one additional |
105 |
* successful atomic operation per enq/deq pair. But it also |
106 |
* enables lower cost variants of queue maintenance mechanics. (A |
107 |
* variation of this idea applies even for non-dual queues that |
108 |
* support deletion of embedded elements, such as |
109 |
* j.u.c.ConcurrentLinkedQueue.) |
110 |
* |
111 |
* Once a node is matched, its item can never again change. We |
112 |
* may thus arrange that the linked list of them contains a prefix |
113 |
* of zero or more matched nodes, followed by a suffix of zero or |
114 |
* more unmatched nodes. (Note that we allow both the prefix and |
115 |
* suffix to be zero length, which in turn means that we do not |
116 |
* use a dummy header.) If we were not concerned with either time |
117 |
* or space efficiency, we could correctly perform enqueue and |
118 |
* dequeue operations by traversing from a pointer to the initial |
119 |
* node; CASing the item of the first unmatched node on match and |
120 |
* CASing the next field of the trailing node on appends. While |
121 |
* this would be a terrible idea in itself, it does have the |
122 |
* benefit of not requiring ANY atomic updates on head/tail |
123 |
* fields. |
124 |
* |
125 |
* We introduce here an approach that lies between the extremes of |
126 |
* never versus always updating queue (head and tail) pointers |
127 |
* that reflects the tradeoff of sometimes requiring extra traversal |
128 |
* steps to locate the first and/or last unmatched nodes, versus |
129 |
* the reduced overhead and contention of fewer updates to queue |
130 |
* pointers. For example, a possible snapshot of a queue is: |
131 |
* |
132 |
* head tail |
133 |
* | | |
134 |
* v v |
135 |
* M -> M -> U -> U -> U -> U |
136 |
* |
137 |
* The best value for this "slack" (the targeted maximum distance |
138 |
* between the value of "head" and the first unmatched node, and |
139 |
* similarly for "tail") is an empirical matter. We have found |
140 |
* that using very small constants in the range of 1-3 work best |
141 |
* over a range of platforms. Larger values introduce increasing |
142 |
* costs of cache misses and risks of long traversal chains. |
143 |
* |
144 |
* Dual queues with slack differ from plain M&S dual queues by |
145 |
* virtue of only sometimes updating head or tail pointers when |
146 |
* matching, appending, or even traversing nodes; in order to |
147 |
* maintain a targeted slack. The idea of "sometimes" may be |
148 |
* operationalized in several ways. The simplest is to use a |
149 |
* per-operation counter incremented on each traversal step, and |
150 |
* to try (via CAS) to update the associated queue pointer |
151 |
* whenever the count exceeds a threshold. Another, that requires |
152 |
* more overhead, is to use random number generators to update |
153 |
* with a given probability per traversal step. |
154 |
* |
155 |
* In any strategy along these lines, because CASes updating |
156 |
* fields may fail, the actual slack may exceed targeted |
157 |
* slack. However, they may be retried at any time to maintain |
158 |
* targets. Even when using very small slack values, this |
159 |
* approach works well for dual queues because it allows all |
160 |
* operations up to the point of matching or appending an item |
161 |
* (hence potentially releasing another thread) to be read-only, |
162 |
* thus not introducing any further contention. As described |
163 |
* below, we implement this by performing slack maintenance |
164 |
* retries only after these points. |
165 |
* |
166 |
* As an accompaniment to such techniques, traversal overhead can |
167 |
* be further reduced without increasing contention of head |
168 |
* pointer updates. During traversals, threads may sometimes |
169 |
* shortcut the "next" link path from the current "head" node to |
170 |
* be closer to the currently known first unmatched node. Again, |
171 |
* this may be triggered with using thresholds or randomization. |
172 |
* |
173 |
* These ideas must be further extended to avoid unbounded amounts |
174 |
* of costly-to-reclaim garbage caused by the sequential "next" |
175 |
* links of nodes starting at old forgotten head nodes: As first |
176 |
* described in detail by Boehm |
177 |
* (http://portal.acm.org/citation.cfm?doid=503272.503282) if a GC |
178 |
* delays noticing that any arbitrarily old node has become |
179 |
* garbage, all newer dead nodes will also be unreclaimed. |
180 |
* (Similar issues arise in non-GC environments.) To cope with |
181 |
* this in our implementation, upon CASing to advance the head |
182 |
* pointer, we set the "next" link of the previous head to point |
183 |
* only to itself; thus limiting the length of connected dead lists. |
184 |
* (We also take similar care to wipe out possibly garbage |
185 |
* retaining values held in other Node fields.) However, doing so |
186 |
* adds some further complexity to traversal: If any "next" |
187 |
* pointer links to itself, it indicates that the current thread |
188 |
* has lagged behind a head-update, and so the traversal must |
189 |
* continue from the "head". Traversals trying to find the |
190 |
* current tail starting from "tail" may also encounter |
191 |
* self-links, in which case they also continue at "head". |
192 |
* |
193 |
* It is tempting in slack-based scheme to not even use CAS for |
194 |
* updates (similarly to Ladan-Mozes & Shavit). However, this |
195 |
* cannot be done for head updates under the above link-forgetting |
196 |
* mechanics because an update may leave head at a detached node. |
197 |
* And while direct writes are possible for tail updates, they |
198 |
* increase the risk of long retraversals, and hence long garbage |
199 |
* chains which can be much more costly than is worthwhile |
200 |
* considering that the cost difference of performing a CAS vs |
201 |
* write is smaller when they are not triggered on each operation |
202 |
* (especially considering that writes and CASes equally require |
203 |
* additional GC bookkeeping ("write barriers") that are sometimes |
204 |
* more costly than the writes themselves because of contention). |
205 |
* |
206 |
* Removal of internal nodes (due to timed out or interrupted |
207 |
* waits, or calls to remove or Iterator.remove) uses a scheme |
208 |
* roughly similar to that in Scherer, Lea, and Scott |
209 |
* SynchronousQueue. Given a predecessor, we can unsplice any node |
210 |
* except the (actual) tail of the queue. To avoid build-up of |
211 |
* cancelled trailing nodes, upon a request to remove a trailing |
212 |
* node, it is placed in field "cleanMe" to be unspliced later. |
213 |
* |
214 |
* *** Overview of implementation *** |
215 |
* |
216 |
* We use a threshold-based approach to updates, with a target |
217 |
* slack of two. The slack value is hard-wired: a path greater |
218 |
* than one is naturally implemented by checking equality of |
219 |
* traversal pointers except when the list has only one element, |
220 |
* in which case we keep max slack at one. Avoiding tracking |
221 |
* explicit counts across situations slightly simplifies an |
222 |
* already-messy implementation. Using randomization would |
223 |
* probably work better if there were a low-quality dirt-cheap |
224 |
* per-thread one available, but even ThreadLocalRandom is too |
225 |
* heavy for these purposes. |
226 |
* |
227 |
* With such a small slack value, path short-circuiting is rarely |
228 |
* worthwhile. However, it is used (in awaitMatch) immediately |
229 |
* before a waiting thread starts to block, as a final bit of |
230 |
* helping at a point when contention with others is extremely |
231 |
* unlikely (since if other threads that could release it are |
232 |
* operating, then the current thread wouldn't be blocking). |
233 |
* |
234 |
* All enqueue/dequeue operations are handled by the single method |
235 |
* "xfer" with parameters indicating whether to act as some form |
236 |
* of offer, put, poll, take, or transfer (each possibly with |
237 |
* timeout). The relative complexity of using one monolithic |
238 |
* method outweighs the code bulk and maintenance problems of |
239 |
* using nine separate methods. |
240 |
* |
241 |
* Operation consists of up to three phases. The first is |
242 |
* implemented within method xfer, the second in tryAppend, and |
243 |
* the third in method awaitMatch. |
244 |
* |
245 |
* 1. Try to match an existing node |
246 |
* |
247 |
* Starting at head, skip already-matched nodes until finding |
248 |
* an unmatched node of opposite mode, if one exists, in which |
249 |
* case matching it and returning, also if necessary updating |
250 |
* head to one past the matched node (or the node itself if the |
251 |
* list has no other unmatched nodes). If the CAS misses, then |
252 |
* a retry loops until the slack is at most two. Traversals |
253 |
* also check if the initial head is now off-list, in which |
254 |
* case they start at the new head. |
255 |
* |
256 |
* If no candidates are found and the call was untimed |
257 |
* poll/offer, (argument "how" is NOW) return. |
258 |
* |
259 |
* 2. Try to append a new node (method tryAppend) |
260 |
* |
261 |
* Starting at current tail pointer, try to append a new node |
262 |
* to the list (or if head was null, establish the first |
263 |
* node). Nodes can be appended only if their predecessors are |
264 |
* either already matched or are of the same mode. If we detect |
265 |
* otherwise, then a new node with opposite mode must have been |
266 |
* appended during traversal, so must restart at phase 1. The |
267 |
* traversal and update steps are otherwise similar to phase 1: |
268 |
* Retrying upon CAS misses and checking for staleness. In |
269 |
* particular, if a self-link is encountered, then we can |
270 |
* safely jump to a node on the list by continuing the |
271 |
* traversal at current head. |
272 |
* |
273 |
* On successful append, if the call was ASYNC, return. |
274 |
* |
275 |
* 3. Await match or cancellation (method awaitMatch) |
276 |
* |
277 |
* Wait for another thread to match node; instead cancelling if |
278 |
* current thread was interrupted or the wait timed out. On |
279 |
* multiprocessors, we use front-of-queue spinning: If a node |
280 |
* appears to be the first unmatched node in the queue, it |
281 |
* spins a bit before blocking. In either case, before blocking |
282 |
* it tries to unsplice any nodes between the current "head" |
283 |
* and the first unmatched node. |
284 |
* |
285 |
* Front-of-queue spinning vastly improves performance of |
286 |
* heavily contended queues. And so long as it is relatively |
287 |
* brief and "quiet", spinning does not much impact performance |
288 |
* of less-contended queues. During spins threads check their |
289 |
* interrupt status and generate a thread-local random number |
290 |
* to decide to occasionally perform a Thread.yield. While |
291 |
* yield has underdefined specs, we assume that might it help, |
292 |
* and will not hurt in limiting impact of spinning on busy |
293 |
* systems. We also use much smaller (1/4) spins for nodes |
294 |
* that are not known to be front but whose predecessors have |
295 |
* not blocked -- these "chained" spins avoid artifacts of |
296 |
* front-of-queue rules which otherwise lead to alternating |
297 |
* nodes spinning vs blocking. Further, front threads that |
298 |
* represent phase changes (from data to request node or vice |
299 |
* versa) compared to their predecessors receive additional |
300 |
* spins, reflecting the longer code path lengths necessary to |
301 |
* release them under contention. |
302 |
*/ |
303 |
|
304 |
/** True if on multiprocessor */ |
305 |
private static final boolean MP = |
306 |
Runtime.getRuntime().availableProcessors() > 1; |
307 |
|
308 |
/** |
309 |
* The number of times to spin (with on average one randomly |
310 |
* interspersed call to Thread.yield) on multiprocessor before |
311 |
* blocking when a node is apparently the first waiter in the |
312 |
* queue. See above for explanation. Must be a power of two. The |
313 |
* value is empirically derived -- it works pretty well across a |
314 |
* variety of processors, numbers of CPUs, and OSes. |
315 |
*/ |
316 |
private static final int FRONT_SPINS = 1 << 7; |
317 |
|
318 |
/** |
319 |
* The number of times to spin before blocking when a node is |
320 |
* preceded by another node that is apparently spinning. |
321 |
*/ |
322 |
private static final int CHAINED_SPINS = FRONT_SPINS >>> 2; |
323 |
|
324 |
/** |
325 |
* Queue nodes. Uses Object, not E, for items to allow forgetting |
326 |
* them after use. Relies heavily on Unsafe mechanics to minimize |
327 |
* unnecessary ordering constraints: Writes that intrinsically |
328 |
* precede or follow CASes use simple relaxed forms. Other |
329 |
* cleanups use releasing/lazy writes. |
330 |
*/ |
331 |
static final class Node { |
332 |
final boolean isData; // false if this is a request node |
333 |
volatile Object item; // initially non-null if isData; CASed to match |
334 |
volatile Node next; |
335 |
volatile Thread waiter; // null until waiting |
336 |
|
337 |
// CAS methods for fields |
338 |
final boolean casNext(Node cmp, Node val) { |
339 |
return UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val); |
340 |
} |
341 |
|
342 |
final boolean casItem(Object cmp, Object val) { |
343 |
return UNSAFE.compareAndSwapObject(this, itemOffset, cmp, val); |
344 |
} |
345 |
|
346 |
/** |
347 |
* Creates a new node. Uses relaxed write because item can only |
348 |
* be seen if followed by CAS. |
349 |
*/ |
350 |
Node(Object item, boolean isData) { |
351 |
UNSAFE.putObject(this, itemOffset, item); // relaxed write |
352 |
this.isData = isData; |
353 |
} |
354 |
|
355 |
/** |
356 |
* Links node to itself to avoid garbage retention. Called |
357 |
* only after CASing head field, so uses relaxed write. |
358 |
*/ |
359 |
final void forgetNext() { |
360 |
UNSAFE.putObject(this, nextOffset, this); |
361 |
} |
362 |
|
363 |
/** |
364 |
* Sets item to self (using a releasing/lazy write) and waiter |
365 |
* to null, to avoid garbage retention after extracting or |
366 |
* cancelling. |
367 |
*/ |
368 |
final void forgetContents() { |
369 |
UNSAFE.putOrderedObject(this, itemOffset, this); |
370 |
UNSAFE.putOrderedObject(this, waiterOffset, null); |
371 |
} |
372 |
|
373 |
/** |
374 |
* Returns true if this node has been matched, including the |
375 |
* case of artificial matches due to cancellation. |
376 |
*/ |
377 |
final boolean isMatched() { |
378 |
Object x = item; |
379 |
return x == this || (x != null) != isData; |
380 |
} |
381 |
|
382 |
/** |
383 |
* Returns true if a node with the given mode cannot be |
384 |
* appended to this node because this node is unmatched and |
385 |
* has opposite data mode. |
386 |
*/ |
387 |
final boolean cannotPrecede(boolean haveData) { |
388 |
boolean d = isData; |
389 |
Object x; |
390 |
return d != haveData && (x = item) != this && (x != null) == d; |
391 |
} |
392 |
|
393 |
/** |
394 |
* Tries to artificially match a data node -- used by remove. |
395 |
*/ |
396 |
final boolean tryMatchData() { |
397 |
Object x = item; |
398 |
if (x != null && x != this && casItem(x, null)) { |
399 |
LockSupport.unpark(waiter); |
400 |
return true; |
401 |
} |
402 |
return false; |
403 |
} |
404 |
|
405 |
// Unsafe mechanics |
406 |
private static final sun.misc.Unsafe UNSAFE = getUnsafe(); |
407 |
private static final long nextOffset = |
408 |
objectFieldOffset(UNSAFE, "next", Node.class); |
409 |
private static final long itemOffset = |
410 |
objectFieldOffset(UNSAFE, "item", Node.class); |
411 |
private static final long waiterOffset = |
412 |
objectFieldOffset(UNSAFE, "waiter", Node.class); |
413 |
|
414 |
private static final long serialVersionUID = -3375979862319811754L; |
415 |
} |
416 |
|
417 |
/** head of the queue; null until first enqueue */ |
418 |
private transient volatile Node head; |
419 |
|
420 |
/** predecessor of dangling unspliceable node */ |
421 |
private transient volatile Node cleanMe; // decl here to reduce contention |
422 |
|
423 |
/** tail of the queue; null until first append */ |
424 |
private transient volatile Node tail; |
425 |
|
426 |
// CAS methods for fields |
427 |
private boolean casTail(Node cmp, Node val) { |
428 |
return UNSAFE.compareAndSwapObject(this, tailOffset, cmp, val); |
429 |
} |
430 |
|
431 |
private boolean casHead(Node cmp, Node val) { |
432 |
return UNSAFE.compareAndSwapObject(this, headOffset, cmp, val); |
433 |
} |
434 |
|
435 |
private boolean casCleanMe(Node cmp, Node val) { |
436 |
return UNSAFE.compareAndSwapObject(this, cleanMeOffset, cmp, val); |
437 |
} |
438 |
|
439 |
/* |
440 |
* Possible values for "how" argument in xfer method. Beware that |
441 |
* the order of assigned numerical values matters. |
442 |
*/ |
443 |
private static final int NOW = 0; // for untimed poll, tryTransfer |
444 |
private static final int ASYNC = 1; // for offer, put, add |
445 |
private static final int SYNC = 2; // for transfer, take |
446 |
private static final int TIMEOUT = 3; // for timed poll, tryTransfer |
447 |
|
448 |
/** |
449 |
* Implements all queuing methods. See above for explanation. |
450 |
* |
451 |
* @param e the item or null for take |
452 |
* @param haveData true if this is a put, else a take |
453 |
* @param how NOW, ASYNC, SYNC, or TIMEOUT |
454 |
* @param nanos timeout in nanosecs, used only if mode is TIMEOUT |
455 |
* @return an item if matched, else e |
456 |
* @throws NullPointerException if haveData mode but e is null |
457 |
*/ |
458 |
private Object xfer(Object e, boolean haveData, int how, long nanos) { |
459 |
if (haveData && (e == null)) |
460 |
throw new NullPointerException(); |
461 |
Node s = null; // the node to append, if needed |
462 |
|
463 |
retry: for (;;) { // restart on append race |
464 |
|
465 |
for (Node h = head, p = h; p != null;) { // find & match first node |
466 |
boolean isData = p.isData; |
467 |
Object item = p.item; |
468 |
if (item != p && (item != null) == isData) { // unmatched |
469 |
if (isData == haveData) // can't match |
470 |
break; |
471 |
if (p.casItem(item, e)) { // match |
472 |
Thread w = p.waiter; |
473 |
while (p != h) { // update head |
474 |
Node n = p.next; // by 2 unless singleton |
475 |
if (n != null) |
476 |
p = n; |
477 |
if (head == h && casHead(h, p)) { |
478 |
h.forgetNext(); |
479 |
break; |
480 |
} // advance and retry |
481 |
if ((h = head) == null || |
482 |
(p = h.next) == null || !p.isMatched()) |
483 |
break; // unless slack < 2 |
484 |
} |
485 |
LockSupport.unpark(w); |
486 |
return item; |
487 |
} |
488 |
} |
489 |
Node n = p.next; |
490 |
p = (p != n) ? n : (h = head); // Use head if p offlist |
491 |
} |
492 |
|
493 |
if (how >= ASYNC) { // No matches available |
494 |
if (s == null) |
495 |
s = new Node(e, haveData); |
496 |
Node pred = tryAppend(s, haveData); |
497 |
if (pred == null) |
498 |
continue retry; // lost race vs opposite mode |
499 |
if (how >= SYNC) |
500 |
return awaitMatch(pred, s, e, how, nanos); |
501 |
} |
502 |
return e; // not waiting |
503 |
} |
504 |
} |
505 |
|
506 |
/** |
507 |
* Tries to append node s as tail. |
508 |
* |
509 |
* @param haveData true if appending in data mode |
510 |
* @param s the node to append |
511 |
* @return null on failure due to losing race with append in |
512 |
* different mode, else s's predecessor, or s itself if no |
513 |
* predecessor |
514 |
*/ |
515 |
private Node tryAppend(Node s, boolean haveData) { |
516 |
for (Node t = tail, p = t;;) { // move p to actual tail and append |
517 |
Node n, u; // temps for reads of next & tail |
518 |
if (p == null && (p = head) == null) { |
519 |
if (casHead(null, s)) |
520 |
return s; // initialize |
521 |
} |
522 |
else if (p.cannotPrecede(haveData)) |
523 |
return null; // lost race vs opposite mode |
524 |
else if ((n = p.next) != null) // Not tail; keep traversing |
525 |
p = p != t && t != (u = tail) ? (t = u) : // stale tail |
526 |
(p != n) ? n : null; // restart if off list |
527 |
else if (!p.casNext(null, s)) |
528 |
p = p.next; // re-read on CAS failure |
529 |
else { |
530 |
if (p != t) { // Update if slack now >= 2 |
531 |
while ((tail != t || !casTail(t, s)) && |
532 |
(t = tail) != null && |
533 |
(s = t.next) != null && // advance and retry |
534 |
(s = s.next) != null && s != t); |
535 |
} |
536 |
return p; |
537 |
} |
538 |
} |
539 |
} |
540 |
|
541 |
/** |
542 |
* Spins/yields/blocks until node s is matched or caller gives up. |
543 |
* |
544 |
* @param pred the predecessor of s or s or null if none |
545 |
* @param s the waiting node |
546 |
* @param e the comparison value for checking match |
547 |
* @param how either SYNC or TIMEOUT |
548 |
* @param nanos timeout value |
549 |
* @return matched item, or e if unmatched on interrupt or timeout |
550 |
*/ |
551 |
private Object awaitMatch(Node pred, Node s, Object e, |
552 |
int how, long nanos) { |
553 |
long lastTime = (how == TIMEOUT) ? System.nanoTime() : 0L; |
554 |
Thread w = Thread.currentThread(); |
555 |
int spins = -1; // initialized after first item and cancel checks |
556 |
ThreadLocalRandom randomYields = null; // bound if needed |
557 |
|
558 |
for (;;) { |
559 |
Object item = s.item; |
560 |
if (item != e) { // matched |
561 |
s.forgetContents(); // avoid garbage |
562 |
return item; |
563 |
} |
564 |
if ((w.isInterrupted() || (how == TIMEOUT && nanos <= 0)) && |
565 |
s.casItem(e, s)) { // cancel |
566 |
unsplice(pred, s); |
567 |
return e; |
568 |
} |
569 |
|
570 |
if (spins < 0) { // establish spins at/near front |
571 |
if ((spins = spinsFor(pred, s.isData)) > 0) |
572 |
randomYields = ThreadLocalRandom.current(); |
573 |
} |
574 |
else if (spins > 0) { // spin, occasionally yield |
575 |
if (randomYields.nextInt(FRONT_SPINS) == 0) |
576 |
Thread.yield(); |
577 |
--spins; |
578 |
} |
579 |
else if (s.waiter == null) { |
580 |
shortenHeadPath(); // reduce slack before blocking |
581 |
s.waiter = w; // request unpark |
582 |
} |
583 |
else if (how == TIMEOUT) { |
584 |
long now = System.nanoTime(); |
585 |
if ((nanos -= now - lastTime) > 0) |
586 |
LockSupport.parkNanos(this, nanos); |
587 |
lastTime = now; |
588 |
} |
589 |
else { |
590 |
LockSupport.park(this); |
591 |
spins = -1; // spin if front upon wakeup |
592 |
} |
593 |
} |
594 |
} |
595 |
|
596 |
/** |
597 |
* Returns spin/yield value for a node with given predecessor and |
598 |
* data mode. See above for explanation. |
599 |
*/ |
600 |
private static int spinsFor(Node pred, boolean haveData) { |
601 |
if (MP && pred != null) { |
602 |
boolean predData = pred.isData; |
603 |
if (predData != haveData) // front and phase change |
604 |
return FRONT_SPINS + (FRONT_SPINS >>> 1); |
605 |
if (predData != (pred.item != null)) // probably at front |
606 |
return FRONT_SPINS; |
607 |
if (pred.waiter == null) // pred apparently spinning |
608 |
return CHAINED_SPINS; |
609 |
} |
610 |
return 0; |
611 |
} |
612 |
|
613 |
/** |
614 |
* Tries (once) to unsplice nodes between head and first unmatched |
615 |
* or trailing node; failing on contention. |
616 |
*/ |
617 |
private void shortenHeadPath() { |
618 |
Node h, hn, p, q; |
619 |
if ((p = h = head) != null && h.isMatched() && |
620 |
(q = hn = h.next) != null) { |
621 |
Node n; |
622 |
while ((n = q.next) != q) { |
623 |
if (n == null || !q.isMatched()) { |
624 |
if (hn != q && h.next == hn) |
625 |
h.casNext(hn, q); |
626 |
break; |
627 |
} |
628 |
p = q; |
629 |
q = n; |
630 |
} |
631 |
} |
632 |
} |
633 |
|
634 |
/* -------------- Traversal methods -------------- */ |
635 |
|
636 |
/** |
637 |
* Returns the first unmatched node of the given mode, or null if |
638 |
* none. Used by methods isEmpty, hasWaitingConsumer. |
639 |
*/ |
640 |
private Node firstOfMode(boolean data) { |
641 |
for (Node p = head; p != null; ) { |
642 |
if (!p.isMatched()) |
643 |
return (p.isData == data) ? p : null; |
644 |
Node n = p.next; |
645 |
p = (n != p) ? n : head; |
646 |
} |
647 |
return null; |
648 |
} |
649 |
|
650 |
/** |
651 |
* Returns the item in the first unmatched node with isData; or |
652 |
* null if none. Used by peek. |
653 |
*/ |
654 |
private Object firstDataItem() { |
655 |
for (Node p = head; p != null; ) { |
656 |
boolean isData = p.isData; |
657 |
Object item = p.item; |
658 |
if (item != p && (item != null) == isData) |
659 |
return isData ? item : null; |
660 |
Node n = p.next; |
661 |
p = (n != p) ? n : head; |
662 |
} |
663 |
return null; |
664 |
} |
665 |
|
666 |
/** |
667 |
* Traverses and counts unmatched nodes of the given mode. |
668 |
* Used by methods size and getWaitingConsumerCount. |
669 |
*/ |
670 |
private int countOfMode(boolean data) { |
671 |
int count = 0; |
672 |
for (Node p = head; p != null; ) { |
673 |
if (!p.isMatched()) { |
674 |
if (p.isData != data) |
675 |
return 0; |
676 |
if (++count == Integer.MAX_VALUE) // saturated |
677 |
break; |
678 |
} |
679 |
Node n = p.next; |
680 |
if (n != p) |
681 |
p = n; |
682 |
else { |
683 |
count = 0; |
684 |
p = head; |
685 |
} |
686 |
} |
687 |
return count; |
688 |
} |
689 |
|
690 |
final class Itr implements Iterator<E> { |
691 |
private Node nextNode; // next node to return item for |
692 |
private Object nextItem; // the corresponding item |
693 |
private Node lastRet; // last returned node, to support remove |
694 |
|
695 |
/** |
696 |
* Moves to next node after prev, or first node if prev null. |
697 |
*/ |
698 |
private void advance(Node prev) { |
699 |
lastRet = prev; |
700 |
Node p; |
701 |
if (prev == null || (p = prev.next) == prev) |
702 |
p = head; |
703 |
while (p != null) { |
704 |
Object item = p.item; |
705 |
if (p.isData) { |
706 |
if (item != null && item != p) { |
707 |
nextItem = item; |
708 |
nextNode = p; |
709 |
return; |
710 |
} |
711 |
} |
712 |
else if (item == null) |
713 |
break; |
714 |
Node n = p.next; |
715 |
p = (n != p) ? n : head; |
716 |
} |
717 |
nextNode = null; |
718 |
} |
719 |
|
720 |
Itr() { |
721 |
advance(null); |
722 |
} |
723 |
|
724 |
public final boolean hasNext() { |
725 |
return nextNode != null; |
726 |
} |
727 |
|
728 |
public final E next() { |
729 |
Node p = nextNode; |
730 |
if (p == null) throw new NoSuchElementException(); |
731 |
Object e = nextItem; |
732 |
advance(p); |
733 |
return (E) e; |
734 |
} |
735 |
|
736 |
public final void remove() { |
737 |
Node p = lastRet; |
738 |
if (p == null) throw new IllegalStateException(); |
739 |
lastRet = null; |
740 |
findAndRemoveNode(p); |
741 |
} |
742 |
} |
743 |
|
744 |
/* -------------- Removal methods -------------- */ |
745 |
|
746 |
/** |
747 |
* Unsplices (now or later) the given deleted/cancelled node with |
748 |
* the given predecessor. |
749 |
* |
750 |
* @param pred predecessor of node to be unspliced |
751 |
* @param s the node to be unspliced |
752 |
*/ |
753 |
private void unsplice(Node pred, Node s) { |
754 |
s.forgetContents(); // clear unneeded fields |
755 |
/* |
756 |
* At any given time, exactly one node on list cannot be |
757 |
* deleted -- the last inserted node. To accommodate this, if |
758 |
* we cannot delete s, we save its predecessor as "cleanMe", |
759 |
* processing the previously saved version first. Because only |
760 |
* one node in the list can have a null next, at least one of |
761 |
* node s or the node previously saved can always be |
762 |
* processed, so this always terminates. |
763 |
*/ |
764 |
if (pred != null && pred != s) { |
765 |
while (pred.next == s) { |
766 |
Node oldpred = (cleanMe == null) ? null : reclean(); |
767 |
Node n = s.next; |
768 |
if (n != null) { |
769 |
if (n != s) |
770 |
pred.casNext(s, n); |
771 |
break; |
772 |
} |
773 |
if (oldpred == pred || // Already saved |
774 |
(oldpred == null && casCleanMe(null, pred))) |
775 |
break; // Postpone cleaning |
776 |
} |
777 |
} |
778 |
} |
779 |
|
780 |
/** |
781 |
* Tries to unsplice the deleted/cancelled node held in cleanMe |
782 |
* that was previously uncleanable because it was at tail. |
783 |
* |
784 |
* @return current cleanMe node (or null) |
785 |
*/ |
786 |
private Node reclean() { |
787 |
/* |
788 |
* cleanMe is, or at one time was, predecessor of a cancelled |
789 |
* node s that was the tail so could not be unspliced. If it |
790 |
* is no longer the tail, try to unsplice if necessary and |
791 |
* make cleanMe slot available. This differs from similar |
792 |
* code in unsplice() because we must check that pred still |
793 |
* points to a matched node that can be unspliced -- if not, |
794 |
* we can (must) clear cleanMe without unsplicing. This can |
795 |
* loop only due to contention. |
796 |
*/ |
797 |
Node pred; |
798 |
while ((pred = cleanMe) != null) { |
799 |
Node s = pred.next; |
800 |
Node n; |
801 |
if (s == null || s == pred || !s.isMatched()) |
802 |
casCleanMe(pred, null); // already gone |
803 |
else if ((n = s.next) != null) { |
804 |
if (n != s) |
805 |
pred.casNext(s, n); |
806 |
casCleanMe(pred, null); |
807 |
} |
808 |
else |
809 |
break; |
810 |
} |
811 |
return pred; |
812 |
} |
813 |
|
814 |
/** |
815 |
* Main implementation of Iterator.remove(). Find |
816 |
* and unsplice the given node. |
817 |
*/ |
818 |
final void findAndRemoveNode(Node s) { |
819 |
if (s.tryMatchData()) { |
820 |
Node pred = null; |
821 |
Node p = head; |
822 |
while (p != null) { |
823 |
if (p == s) { |
824 |
unsplice(pred, p); |
825 |
break; |
826 |
} |
827 |
if (!p.isData && !p.isMatched()) |
828 |
break; |
829 |
pred = p; |
830 |
if ((p = p.next) == pred) { // stale |
831 |
pred = null; |
832 |
p = head; |
833 |
} |
834 |
} |
835 |
} |
836 |
} |
837 |
|
838 |
/** |
839 |
* Main implementation of remove(Object) |
840 |
*/ |
841 |
private boolean findAndRemove(Object e) { |
842 |
if (e != null) { |
843 |
Node pred = null; |
844 |
Node p = head; |
845 |
while (p != null) { |
846 |
Object item = p.item; |
847 |
if (p.isData) { |
848 |
if (item != null && item != p && e.equals(item) && |
849 |
p.tryMatchData()) { |
850 |
unsplice(pred, p); |
851 |
return true; |
852 |
} |
853 |
} |
854 |
else if (item == null) |
855 |
break; |
856 |
pred = p; |
857 |
if ((p = p.next) == pred) { |
858 |
pred = null; |
859 |
p = head; |
860 |
} |
861 |
} |
862 |
} |
863 |
return false; |
864 |
} |
865 |
|
866 |
|
867 |
/** |
868 |
* Creates an initially empty {@code LinkedTransferQueue}. |
869 |
*/ |
870 |
public LinkedTransferQueue() { |
871 |
} |
872 |
|
873 |
/** |
874 |
* Creates a {@code LinkedTransferQueue} |
875 |
* initially containing the elements of the given collection, |
876 |
* added in traversal order of the collection's iterator. |
877 |
* |
878 |
* @param c the collection of elements to initially contain |
879 |
* @throws NullPointerException if the specified collection or any |
880 |
* of its elements are null |
881 |
*/ |
882 |
public LinkedTransferQueue(Collection<? extends E> c) { |
883 |
this(); |
884 |
addAll(c); |
885 |
} |
886 |
|
887 |
/** |
888 |
* Inserts the specified element at the tail of this queue. |
889 |
* As the queue is unbounded, this method will never block. |
890 |
* |
891 |
* @throws NullPointerException if the specified element is null |
892 |
*/ |
893 |
public void put(E e) { |
894 |
xfer(e, true, ASYNC, 0); |
895 |
} |
896 |
|
897 |
/** |
898 |
* Inserts the specified element at the tail of this queue. |
899 |
* As the queue is unbounded, this method will never block or |
900 |
* return {@code false}. |
901 |
* |
902 |
* @return {@code true} (as specified by |
903 |
* {@link BlockingQueue#offer(Object,long,TimeUnit) BlockingQueue.offer}) |
904 |
* @throws NullPointerException if the specified element is null |
905 |
*/ |
906 |
public boolean offer(E e, long timeout, TimeUnit unit) { |
907 |
xfer(e, true, ASYNC, 0); |
908 |
return true; |
909 |
} |
910 |
|
911 |
/** |
912 |
* Inserts the specified element at the tail of this queue. |
913 |
* As the queue is unbounded, this method will never return {@code false}. |
914 |
* |
915 |
* @return {@code true} (as specified by |
916 |
* {@link BlockingQueue#offer(Object) BlockingQueue.offer}) |
917 |
* @throws NullPointerException if the specified element is null |
918 |
*/ |
919 |
public boolean offer(E e) { |
920 |
xfer(e, true, ASYNC, 0); |
921 |
return true; |
922 |
} |
923 |
|
924 |
/** |
925 |
* Inserts the specified element at the tail of this queue. |
926 |
* As the queue is unbounded, this method will never throw |
927 |
* {@link IllegalStateException} or return {@code false}. |
928 |
* |
929 |
* @return {@code true} (as specified by {@link Collection#add}) |
930 |
* @throws NullPointerException if the specified element is null |
931 |
*/ |
932 |
public boolean add(E e) { |
933 |
xfer(e, true, ASYNC, 0); |
934 |
return true; |
935 |
} |
936 |
|
937 |
/** |
938 |
* Transfers the element to a waiting consumer immediately, if possible. |
939 |
* |
940 |
* <p>More precisely, transfers the specified element immediately |
941 |
* if there exists a consumer already waiting to receive it (in |
942 |
* {@link #take} or timed {@link #poll(long,TimeUnit) poll}), |
943 |
* otherwise returning {@code false} without enqueuing the element. |
944 |
* |
945 |
* @throws NullPointerException if the specified element is null |
946 |
*/ |
947 |
public boolean tryTransfer(E e) { |
948 |
return xfer(e, true, NOW, 0) == null; |
949 |
} |
950 |
|
951 |
/** |
952 |
* Transfers the element to a consumer, waiting if necessary to do so. |
953 |
* |
954 |
* <p>More precisely, transfers the specified element immediately |
955 |
* if there exists a consumer already waiting to receive it (in |
956 |
* {@link #take} or timed {@link #poll(long,TimeUnit) poll}), |
957 |
* else inserts the specified element at the tail of this queue |
958 |
* and waits until the element is received by a consumer. |
959 |
* |
960 |
* @throws NullPointerException if the specified element is null |
961 |
*/ |
962 |
public void transfer(E e) throws InterruptedException { |
963 |
if (xfer(e, true, SYNC, 0) != null) { |
964 |
Thread.interrupted(); // failure possible only due to interrupt |
965 |
throw new InterruptedException(); |
966 |
} |
967 |
} |
968 |
|
969 |
/** |
970 |
* Transfers the element to a consumer if it is possible to do so |
971 |
* before the timeout elapses. |
972 |
* |
973 |
* <p>More precisely, transfers the specified element immediately |
974 |
* if there exists a consumer already waiting to receive it (in |
975 |
* {@link #take} or timed {@link #poll(long,TimeUnit) poll}), |
976 |
* else inserts the specified element at the tail of this queue |
977 |
* and waits until the element is received by a consumer, |
978 |
* returning {@code false} if the specified wait time elapses |
979 |
* before the element can be transferred. |
980 |
* |
981 |
* @throws NullPointerException if the specified element is null |
982 |
*/ |
983 |
public boolean tryTransfer(E e, long timeout, TimeUnit unit) |
984 |
throws InterruptedException { |
985 |
if (xfer(e, true, TIMEOUT, unit.toNanos(timeout)) == null) |
986 |
return true; |
987 |
if (!Thread.interrupted()) |
988 |
return false; |
989 |
throw new InterruptedException(); |
990 |
} |
991 |
|
992 |
public E take() throws InterruptedException { |
993 |
Object e = xfer(null, false, SYNC, 0); |
994 |
if (e != null) |
995 |
return (E)e; |
996 |
Thread.interrupted(); |
997 |
throw new InterruptedException(); |
998 |
} |
999 |
|
1000 |
public E poll(long timeout, TimeUnit unit) throws InterruptedException { |
1001 |
Object e = xfer(null, false, TIMEOUT, unit.toNanos(timeout)); |
1002 |
if (e != null || !Thread.interrupted()) |
1003 |
return (E)e; |
1004 |
throw new InterruptedException(); |
1005 |
} |
1006 |
|
1007 |
public E poll() { |
1008 |
return (E)xfer(null, false, NOW, 0); |
1009 |
} |
1010 |
|
1011 |
/** |
1012 |
* @throws NullPointerException {@inheritDoc} |
1013 |
* @throws IllegalArgumentException {@inheritDoc} |
1014 |
*/ |
1015 |
public int drainTo(Collection<? super E> c) { |
1016 |
if (c == null) |
1017 |
throw new NullPointerException(); |
1018 |
if (c == this) |
1019 |
throw new IllegalArgumentException(); |
1020 |
int n = 0; |
1021 |
E e; |
1022 |
while ( (e = poll()) != null) { |
1023 |
c.add(e); |
1024 |
++n; |
1025 |
} |
1026 |
return n; |
1027 |
} |
1028 |
|
1029 |
/** |
1030 |
* @throws NullPointerException {@inheritDoc} |
1031 |
* @throws IllegalArgumentException {@inheritDoc} |
1032 |
*/ |
1033 |
public int drainTo(Collection<? super E> c, int maxElements) { |
1034 |
if (c == null) |
1035 |
throw new NullPointerException(); |
1036 |
if (c == this) |
1037 |
throw new IllegalArgumentException(); |
1038 |
int n = 0; |
1039 |
E e; |
1040 |
while (n < maxElements && (e = poll()) != null) { |
1041 |
c.add(e); |
1042 |
++n; |
1043 |
} |
1044 |
return n; |
1045 |
} |
1046 |
|
1047 |
/** |
1048 |
* Returns an iterator over the elements in this queue in proper |
1049 |
* sequence, from head to tail. |
1050 |
* |
1051 |
* <p>The returned iterator is a "weakly consistent" iterator that |
1052 |
* will never throw |
1053 |
* {@link ConcurrentModificationException ConcurrentModificationException}, |
1054 |
* and guarantees to traverse elements as they existed upon |
1055 |
* construction of the iterator, and may (but is not guaranteed |
1056 |
* to) reflect any modifications subsequent to construction. |
1057 |
* |
1058 |
* @return an iterator over the elements in this queue in proper sequence |
1059 |
*/ |
1060 |
public Iterator<E> iterator() { |
1061 |
return new Itr(); |
1062 |
} |
1063 |
|
1064 |
public E peek() { |
1065 |
return (E) firstDataItem(); |
1066 |
} |
1067 |
|
1068 |
/** |
1069 |
* Returns {@code true} if this queue contains no elements. |
1070 |
* |
1071 |
* @return {@code true} if this queue contains no elements |
1072 |
*/ |
1073 |
public boolean isEmpty() { |
1074 |
return firstOfMode(true) == null; |
1075 |
} |
1076 |
|
1077 |
public boolean hasWaitingConsumer() { |
1078 |
return firstOfMode(false) != null; |
1079 |
} |
1080 |
|
1081 |
/** |
1082 |
* Returns the number of elements in this queue. If this queue |
1083 |
* contains more than {@code Integer.MAX_VALUE} elements, returns |
1084 |
* {@code Integer.MAX_VALUE}. |
1085 |
* |
1086 |
* <p>Beware that, unlike in most collections, this method is |
1087 |
* <em>NOT</em> a constant-time operation. Because of the |
1088 |
* asynchronous nature of these queues, determining the current |
1089 |
* number of elements requires an O(n) traversal. |
1090 |
* |
1091 |
* @return the number of elements in this queue |
1092 |
*/ |
1093 |
public int size() { |
1094 |
return countOfMode(true); |
1095 |
} |
1096 |
|
1097 |
public int getWaitingConsumerCount() { |
1098 |
return countOfMode(false); |
1099 |
} |
1100 |
|
1101 |
/** |
1102 |
* Removes a single instance of the specified element from this queue, |
1103 |
* if it is present. More formally, removes an element {@code e} such |
1104 |
* that {@code o.equals(e)}, if this queue contains one or more such |
1105 |
* elements. |
1106 |
* Returns {@code true} if this queue contained the specified element |
1107 |
* (or equivalently, if this queue changed as a result of the call). |
1108 |
* |
1109 |
* @param o element to be removed from this queue, if present |
1110 |
* @return {@code true} if this queue changed as a result of the call |
1111 |
*/ |
1112 |
public boolean remove(Object o) { |
1113 |
return findAndRemove(o); |
1114 |
} |
1115 |
|
1116 |
/** |
1117 |
* Always returns {@code Integer.MAX_VALUE} because a |
1118 |
* {@code LinkedTransferQueue} is not capacity constrained. |
1119 |
* |
1120 |
* @return {@code Integer.MAX_VALUE} (as specified by |
1121 |
* {@link BlockingQueue#remainingCapacity()}) |
1122 |
*/ |
1123 |
public int remainingCapacity() { |
1124 |
return Integer.MAX_VALUE; |
1125 |
} |
1126 |
|
1127 |
/** |
1128 |
* Saves the state to a stream (that is, serializes it). |
1129 |
* |
1130 |
* @serialData All of the elements (each an {@code E}) in |
1131 |
* the proper order, followed by a null |
1132 |
* @param s the stream |
1133 |
*/ |
1134 |
private void writeObject(java.io.ObjectOutputStream s) |
1135 |
throws java.io.IOException { |
1136 |
s.defaultWriteObject(); |
1137 |
for (E e : this) |
1138 |
s.writeObject(e); |
1139 |
// Use trailing null as sentinel |
1140 |
s.writeObject(null); |
1141 |
} |
1142 |
|
1143 |
/** |
1144 |
* Reconstitutes the Queue instance from a stream (that is, |
1145 |
* deserializes it). |
1146 |
* |
1147 |
* @param s the stream |
1148 |
*/ |
1149 |
private void readObject(java.io.ObjectInputStream s) |
1150 |
throws java.io.IOException, ClassNotFoundException { |
1151 |
s.defaultReadObject(); |
1152 |
for (;;) { |
1153 |
@SuppressWarnings("unchecked") E item = (E) s.readObject(); |
1154 |
if (item == null) |
1155 |
break; |
1156 |
else |
1157 |
offer(item); |
1158 |
} |
1159 |
} |
1160 |
|
1161 |
|
1162 |
// Unsafe mechanics |
1163 |
|
1164 |
private static final sun.misc.Unsafe UNSAFE = getUnsafe(); |
1165 |
private static final long headOffset = |
1166 |
objectFieldOffset(UNSAFE, "head", LinkedTransferQueue.class); |
1167 |
private static final long tailOffset = |
1168 |
objectFieldOffset(UNSAFE, "tail", LinkedTransferQueue.class); |
1169 |
private static final long cleanMeOffset = |
1170 |
objectFieldOffset(UNSAFE, "cleanMe", LinkedTransferQueue.class); |
1171 |
|
1172 |
static long objectFieldOffset(sun.misc.Unsafe UNSAFE, |
1173 |
String field, Class<?> klazz) { |
1174 |
try { |
1175 |
return UNSAFE.objectFieldOffset(klazz.getDeclaredField(field)); |
1176 |
} catch (NoSuchFieldException e) { |
1177 |
// Convert Exception to corresponding Error |
1178 |
NoSuchFieldError error = new NoSuchFieldError(field); |
1179 |
error.initCause(e); |
1180 |
throw error; |
1181 |
} |
1182 |
} |
1183 |
|
1184 |
private static sun.misc.Unsafe getUnsafe() { |
1185 |
try { |
1186 |
return sun.misc.Unsafe.getUnsafe(); |
1187 |
} catch (SecurityException se) { |
1188 |
try { |
1189 |
return java.security.AccessController.doPrivileged |
1190 |
(new java.security |
1191 |
.PrivilegedExceptionAction<sun.misc.Unsafe>() { |
1192 |
public sun.misc.Unsafe run() throws Exception { |
1193 |
java.lang.reflect.Field f = sun.misc |
1194 |
.Unsafe.class.getDeclaredField("theUnsafe"); |
1195 |
f.setAccessible(true); |
1196 |
return (sun.misc.Unsafe) f.get(null); |
1197 |
}}); |
1198 |
} catch (java.security.PrivilegedActionException e) { |
1199 |
throw new RuntimeException("Could not initialize intrinsics", |
1200 |
e.getCause()); |
1201 |
} |
1202 |
} |
1203 |
} |
1204 |
|
1205 |
} |