29 |
|
import java.util.function.Consumer; |
30 |
|
import java.util.function.Function; |
31 |
|
import java.util.function.Predicate; |
32 |
+ |
import java.util.concurrent.atomic.LongAdder; |
33 |
|
|
34 |
|
/** |
35 |
|
* A scalable concurrent {@link ConcurrentNavigableMap} implementation. |
58 |
|
* associated map using {@code put}, {@code putIfAbsent}, or |
59 |
|
* {@code replace}, depending on exactly which effect you need.) |
60 |
|
* |
61 |
< |
* <p>Beware that, unlike in most collections, the {@code size} |
61 |
< |
* method is <em>not</em> a constant-time operation. Because of the |
62 |
< |
* asynchronous nature of these maps, determining the current number |
63 |
< |
* of elements requires a traversal of the elements, and so may report |
64 |
< |
* inaccurate results if this collection is modified during traversal. |
65 |
< |
* Additionally, the bulk operations {@code putAll}, {@code equals}, |
61 |
> |
* <p>Beware that bulk operations {@code putAll}, {@code equals}, |
62 |
|
* {@code toArray}, {@code containsValue}, and {@code clear} are |
63 |
|
* <em>not</em> guaranteed to be performed atomically. For example, an |
64 |
|
* iterator operating concurrently with a {@code putAll} operation |
125 |
|
* be slow and space-intensive using AtomicMarkedReference), nodes |
126 |
|
* use direct CAS'able next pointers. On deletion, instead of |
127 |
|
* marking a pointer, they splice in another node that can be |
128 |
< |
* thought of as standing for a marked pointer (indicating this by |
129 |
< |
* using otherwise impossible field values). Using plain nodes |
130 |
< |
* acts roughly like "boxed" implementations of marked pointers, |
131 |
< |
* but uses new nodes only when nodes are deleted, not for every |
132 |
< |
* link. This requires less space and supports faster |
133 |
< |
* traversal. Even if marked references were better supported by |
134 |
< |
* JVMs, traversal using this technique might still be faster |
135 |
< |
* because any search need only read ahead one more node than |
136 |
< |
* otherwise required (to check for trailing marker) rather than |
137 |
< |
* unmasking mark bits or whatever on each read. |
128 |
> |
* thought of as standing for a marked pointer (see method |
129 |
> |
* unlinkNode). Using plain nodes acts roughly like "boxed" |
130 |
> |
* implementations of marked pointers, but uses new nodes only |
131 |
> |
* when nodes are deleted, not for every link. This requires less |
132 |
> |
* space and supports faster traversal. Even if marked references |
133 |
> |
* were better supported by JVMs, traversal using this technique |
134 |
> |
* might still be faster because any search need only read ahead |
135 |
> |
* one more node than otherwise required (to check for trailing |
136 |
> |
* marker) rather than unmasking mark bits or whatever on each |
137 |
> |
* read. |
138 |
|
* |
139 |
|
* This approach maintains the essential property needed in the HM |
140 |
|
* algorithm of changing the next-pointer of a deleted node so |
141 |
|
* that any other CAS of it will fail, but implements the idea by |
142 |
< |
* changing the pointer to point to a different node, not by |
143 |
< |
* marking it. While it would be possible to further squeeze |
144 |
< |
* space by defining marker nodes not to have key/value fields, it |
145 |
< |
* isn't worth the extra type-testing overhead. The deletion |
146 |
< |
* markers are rarely encountered during traversal and are |
147 |
< |
* normally quickly garbage collected. (Note that this technique |
148 |
< |
* would not work well in systems without garbage collection.) |
142 |
> |
* changing the pointer to point to a different node (with |
143 |
> |
* otherwise illegal null fields), not by marking it. While it |
144 |
> |
* would be possible to further squeeze space by defining marker |
145 |
> |
* nodes not to have key/value fields, it isn't worth the extra |
146 |
> |
* type-testing overhead. The deletion markers are rarely |
147 |
> |
* encountered during traversal, are easily detected via null |
148 |
> |
* checks that are needed anyway, and are normally quickly garbage |
149 |
> |
* collected. (Note that this technique would not work well in |
150 |
> |
* systems without garbage collection.) |
151 |
|
* |
152 |
|
* In addition to using deletion markers, the lists also use |
153 |
|
* nullness of value fields to indicate deletion, in a style |
154 |
|
* similar to typical lazy-deletion schemes. If a node's value is |
155 |
|
* null, then it is considered logically deleted and ignored even |
156 |
< |
* though it is still reachable. This maintains proper control of |
159 |
< |
* concurrent replace vs delete operations -- an attempted replace |
160 |
< |
* must fail if a delete beat it by nulling field, and a delete |
161 |
< |
* must return the last non-null value held in the field. (Note: |
162 |
< |
* Null, rather than some special marker, is used for value fields |
163 |
< |
* here because it just so happens to mesh with the Map API |
164 |
< |
* requirement that method get returns null if there is no |
165 |
< |
* mapping, which allows nodes to remain concurrently readable |
166 |
< |
* even when deleted. Using any other marker value here would be |
167 |
< |
* messy at best.) |
156 |
> |
* though it is still reachable. |
157 |
|
* |
158 |
|
* Here's the sequence of events for a deletion of node n with |
159 |
|
* predecessor b and successor f, initially: |
163 |
|
* +------+ +------+ +------+ |
164 |
|
* |
165 |
|
* 1. CAS n's value field from non-null to null. |
166 |
< |
* From this point on, no public operations encountering |
167 |
< |
* the node consider this mapping to exist. However, other |
179 |
< |
* ongoing insertions and deletions might still modify |
166 |
> |
* Traversals encountering a node with null value ignore it. |
167 |
> |
* However, ongoing insertions and deletions might still modify |
168 |
|
* n's next pointer. |
169 |
|
* |
170 |
|
* 2. CAS n's next pointer to point to a new marker node. |
187 |
|
* thread noticed during a traversal a node with null value and |
188 |
|
* helped out by marking and/or unlinking. This helping-out |
189 |
|
* ensures that no thread can become stuck waiting for progress of |
190 |
< |
* the deleting thread. The use of marker nodes slightly |
203 |
< |
* complicates helping-out code because traversals must track |
204 |
< |
* consistent reads of up to four nodes (b, n, marker, f), not |
205 |
< |
* just (b, n, f), although the next field of a marker is |
206 |
< |
* immutable, and once a next field is CAS'ed to point to a |
207 |
< |
* marker, it never again changes, so this requires less care. |
190 |
> |
* the deleting thread. |
191 |
|
* |
192 |
|
* Skip lists add indexing to this scheme, so that the base-level |
193 |
|
* traversals start close to the locations being found, inserted |
197 |
|
* b) that are not (structurally) deleted, otherwise retrying |
198 |
|
* after processing the deletion. |
199 |
|
* |
200 |
< |
* Index levels are maintained as lists with volatile next fields, |
201 |
< |
* using CAS to link and unlink. Races are allowed in index-list |
202 |
< |
* operations that can (rarely) fail to link in a new index node |
203 |
< |
* or delete one. (We can't do this of course for data nodes.) |
204 |
< |
* However, even when this happens, the index lists remain sorted, |
205 |
< |
* so correctly serve as indices. This can impact performance, |
206 |
< |
* but since skip lists are probabilistic anyway, the net result |
207 |
< |
* is that under contention, the effective "p" value may be lower |
208 |
< |
* than its nominal value. And race windows are kept small enough |
209 |
< |
* that in practice these failures are rare, even under a lot of |
210 |
< |
* contention. |
211 |
< |
* |
212 |
< |
* The fact that retries (for both base and index lists) are |
213 |
< |
* relatively cheap due to indexing allows some minor |
214 |
< |
* simplifications of retry logic. Traversal restarts are |
215 |
< |
* performed after most "helping-out" CASes. This isn't always |
233 |
< |
* strictly necessary, but the implicit backoffs tend to help |
234 |
< |
* reduce other downstream failed CAS's enough to outweigh restart |
235 |
< |
* cost. This worsens the worst case, but seems to improve even |
236 |
< |
* highly contended cases. |
237 |
< |
* |
238 |
< |
* Unlike most skip-list implementations, index insertion and |
239 |
< |
* deletion here require a separate traversal pass occurring after |
240 |
< |
* the base-level action, to add or remove index nodes. This adds |
241 |
< |
* to single-threaded overhead, but improves contended |
242 |
< |
* multithreaded performance by narrowing interference windows, |
243 |
< |
* and allows deletion to ensure that all index nodes will be made |
244 |
< |
* unreachable upon return from a public remove operation, thus |
245 |
< |
* avoiding unwanted garbage retention. This is more important |
246 |
< |
* here than in some other data structures because we cannot null |
247 |
< |
* out node fields referencing user keys since they might still be |
248 |
< |
* read by other ongoing traversals. |
200 |
> |
* Index levels are maintained using CAS to link and unlink |
201 |
> |
* successors ("right" fields). Races are allowed in index-list |
202 |
> |
* operations that can (rarely) fail to link in a new index node. |
203 |
> |
* (We can't do this of course for data nodes.) However, even |
204 |
> |
* when this happens, the index lists correctly guide search. |
205 |
> |
* This can impact performance, but since skip lists are |
206 |
> |
* probabilistic anyway, the net result is that under contention, |
207 |
> |
* the effective "p" value may be lower than its nominal value. |
208 |
> |
* |
209 |
> |
* Index insertion and deletion sometimes require a separate |
210 |
> |
* traversal pass occurring after the base-level action, to add or |
211 |
> |
* remove index nodes. This adds to single-threaded overhead, but |
212 |
> |
* improves contended multithreaded performance by narrowing |
213 |
> |
* interference windows, and allows deletion to ensure that all |
214 |
> |
* index nodes will be made unreachable upon return from a public |
215 |
> |
* remove operation, thus avoiding unwanted garbage retention. |
216 |
|
* |
217 |
|
* Indexing uses skip list parameters that maintain good search |
218 |
|
* performance while using sparser-than-usual indices: The |
219 |
< |
* hardwired parameters k=1, p=0.5 (see method doPut) mean |
220 |
< |
* that about one-quarter of the nodes have indices. Of those that |
221 |
< |
* do, half have one level, a quarter have two, and so on (see |
222 |
< |
* Pugh's Skip List Cookbook, sec 3.4). The expected total space |
223 |
< |
* requirement for a map is slightly less than for the current |
224 |
< |
* implementation of java.util.TreeMap. |
219 |
> |
* hardwired parameters k=1, p=0.5 (see method doPut) mean that |
220 |
> |
* about one-quarter of the nodes have indices. Of those that do, |
221 |
> |
* half have one level, a quarter have two, and so on (see Pugh's |
222 |
> |
* Skip List Cookbook, sec 3.4), up to a maximum of 62 levels |
223 |
> |
* (appropriate for up to 2^63 elements). The expected total |
224 |
> |
* space requirement for a map is slightly less than for the |
225 |
> |
* current implementation of java.util.TreeMap. |
226 |
|
* |
227 |
|
* Changing the level of the index (i.e, the height of the |
228 |
< |
* tree-like structure) also uses CAS. The head index has initial |
229 |
< |
* level/height of one. Creation of an index with height greater |
230 |
< |
* than the current level adds a level to the head index by |
231 |
< |
* CAS'ing on a new top-most head. To maintain good performance |
232 |
< |
* after a lot of removals, deletion methods heuristically try to |
233 |
< |
* reduce the height if the topmost levels appear to be empty. |
234 |
< |
* This may encounter races in which it possible (but rare) to |
235 |
< |
* reduce and "lose" a level just as it is about to contain an |
236 |
< |
* index (that will then never be encountered). This does no |
237 |
< |
* structural harm, and in practice appears to be a better option |
238 |
< |
* than allowing unrestrained growth of levels. |
239 |
< |
* |
240 |
< |
* The code for all this is more verbose than you'd like. Most |
241 |
< |
* operations entail locating an element (or position to insert an |
242 |
< |
* element). The code to do this can't be nicely factored out |
243 |
< |
* because subsequent uses require a snapshot of predecessor |
244 |
< |
* and/or successor and/or value fields which can't be returned |
245 |
< |
* all at once, at least not without creating yet another object |
246 |
< |
* to hold them -- creating such little objects is an especially |
247 |
< |
* bad idea for basic internal search operations because it adds |
248 |
< |
* to GC overhead. (This is one of the few times I've wished Java |
249 |
< |
* had macros.) Instead, some traversal code is interleaved within |
250 |
< |
* insertion and removal operations. The control logic to handle |
251 |
< |
* all the retry conditions is sometimes twisty. Most search is |
252 |
< |
* broken into 2 parts. findPredecessor() searches index nodes |
253 |
< |
* only, returning a base-level predecessor of the key. findNode() |
254 |
< |
* finishes out the base-level search. Even with this factoring, |
255 |
< |
* there is a fair amount of near-duplication of code to handle |
256 |
< |
* variants. |
228 |
> |
* tree-like structure) also uses CAS. Creation of an index with |
229 |
> |
* height greater than the current level adds a level to the head |
230 |
> |
* index by CAS'ing on a new top-most head. To maintain good |
231 |
> |
* performance after a lot of removals, deletion methods |
232 |
> |
* heuristically try to reduce the height if the topmost levels |
233 |
> |
* appear to be empty. This may encounter races in which it is |
234 |
> |
* possible (but rare) to reduce and "lose" a level just as it is |
235 |
> |
* about to contain an index (that will then never be |
236 |
> |
* encountered). This does no structural harm, and in practice |
237 |
> |
* appears to be a better option than allowing unrestrained growth |
238 |
> |
* of levels. |
239 |
> |
* |
240 |
> |
* This class provides concurrent-reader-style memory consistency, |
241 |
> |
* ensuring that read-only methods report status and/or values no |
242 |
> |
* staler than those holding at method entry. This is done by |
243 |
> |
* performing all publication and structural updates using |
244 |
> |
* (volatile) CAS, placing an acquireFence in a few access |
245 |
> |
* methods, and ensuring that linked objects are transitively |
246 |
> |
* acquired via dependent reads (normally once) unless performing |
247 |
> |
* a volatile-mode CAS operation (that also acts as an acquire and |
248 |
> |
* release). This is a form of fence-hoisting is similar to RCU |
249 |
> |
* and related techniques (see McKenney's online book |
250 |
> |
* https://www.kernel.org/pub/linux/kernel/people/paulmck/perfbook/perfbook.html) |
251 |
> |
* It minimizes overhead that may otherwise occur when using so |
252 |
> |
* many volatile-mode reads. Using explicit acquireFences is |
253 |
> |
* logistically easier than targeting particular fields to be read |
254 |
> |
* in acquire mode: fences are just hoisted up as far as possible, |
255 |
> |
* to the entry points or loop headers of a few methods. A |
256 |
> |
* potential disadvantage is that these few remaining fences are |
257 |
> |
* not easily optimized away by compilers under exclusively |
258 |
> |
* single-thread use. It requires some care avoid volatile mode |
259 |
> |
* reads of other fields. (Note that the memory semantics of a |
260 |
> |
* reference dependently read in plain mode exactly once are |
261 |
> |
* equivalent to those for atomic opaque mode.) Iterators and |
262 |
> |
* other traversals encounter each node and value exactly once. |
263 |
> |
* Other operations locate an element (or position to insert an |
264 |
> |
* element) via a sequence of dereferences. This search is broken |
265 |
> |
* into two parts. Method findPredecessor (and its specialized |
266 |
> |
* embeddings) searches index nodes only, returning a base-level |
267 |
> |
* predecessor of the key. Callers carry out the base-level |
268 |
> |
* search, restarting if encountering a marker preventing link |
269 |
> |
* modification. In some cases, it is possible to encounter a |
270 |
> |
* node multiple times while descending levels. For mutative |
271 |
> |
* operations, the reported value is validated using CAS (else |
272 |
> |
* retrying), preserving linearizability with respect to each |
273 |
> |
* other. Others may return any (non-null) value holding in the |
274 |
> |
* course of the method call. (Search-based methods also include |
275 |
> |
* some useless-looking explicit null checks designed to allow |
276 |
> |
* more fields to be nulled out upon removal, to reduce floating |
277 |
> |
* garbage, but which is not currently done, pending discovery of |
278 |
> |
* a way to do this with less impact on other operations.) |
279 |
|
* |
280 |
|
* To produce random values without interference across threads, |
281 |
|
* we use within-JDK thread local random support (via the |
282 |
|
* "secondary seed", to avoid interference with user-level |
283 |
|
* ThreadLocalRandom.) |
284 |
|
* |
295 |
– |
* A previous version of this class wrapped non-comparable keys |
296 |
– |
* with their comparators to emulate Comparables when using |
297 |
– |
* comparators vs Comparables. However, JVMs now appear to better |
298 |
– |
* handle infusing comparator-vs-comparable choice into search |
299 |
– |
* loops. Static method cpr(comparator, x, y) is used for all |
300 |
– |
* comparisons, which works well as long as the comparator |
301 |
– |
* argument is set up outside of loops (thus sometimes passed as |
302 |
– |
* an argument to internal methods) to avoid field re-reads. |
303 |
– |
* |
285 |
|
* For explanation of algorithms sharing at least a couple of |
286 |
|
* features with this one, see Mikhail Fomitchev's thesis |
287 |
|
* (http://www.cs.yorku.ca/~mikhail/), Keir Fraser's thesis |
288 |
|
* (http://www.cl.cam.ac.uk/users/kaf24/), and Hakan Sundell's |
289 |
|
* thesis (http://www.cs.chalmers.se/~phs/). |
290 |
|
* |
310 |
– |
* Given the use of tree-like index nodes, you might wonder why |
311 |
– |
* this doesn't use some kind of search tree instead, which would |
312 |
– |
* support somewhat faster search operations. The reason is that |
313 |
– |
* there are no known efficient lock-free insertion and deletion |
314 |
– |
* algorithms for search trees. The immutability of the "down" |
315 |
– |
* links of index nodes (as opposed to mutable "left" fields in |
316 |
– |
* true trees) makes this tractable using only CAS operations. |
317 |
– |
* |
291 |
|
* Notation guide for local variables |
292 |
< |
* Node: b, n, f for predecessor, node, successor |
292 |
> |
* Node: b, n, f, p for predecessor, node, successor, aux |
293 |
|
* Index: q, r, d for index node, right, down. |
321 |
– |
* t for another index node |
294 |
|
* Head: h |
323 |
– |
* Levels: j |
295 |
|
* Keys: k, key |
296 |
|
* Values: v, value |
297 |
|
* Comparisons: c |
298 |
+ |
* |
299 |
+ |
* Note that, with VarHandles, a boolean result of |
300 |
+ |
* compareAndSet must be declared even if not used. |
301 |
|
*/ |
302 |
|
|
303 |
|
private static final long serialVersionUID = -8627078645895051609L; |
304 |
|
|
305 |
|
/** |
332 |
– |
* Special value used to identify base-level header. |
333 |
– |
*/ |
334 |
– |
static final Object BASE_HEADER = new Object(); |
335 |
– |
|
336 |
– |
/** |
337 |
– |
* The topmost head index of the skiplist. |
338 |
– |
*/ |
339 |
– |
private transient volatile HeadIndex<K,V> head; |
340 |
– |
|
341 |
– |
/** |
306 |
|
* The comparator used to maintain order in this map, or null if |
307 |
|
* using natural ordering. (Non-private to simplify access in |
308 |
|
* nested classes.) |
310 |
|
*/ |
311 |
|
final Comparator<? super K> comparator; |
312 |
|
|
313 |
+ |
/** Lazily initialized topmost index of the skiplist. */ |
314 |
+ |
private transient Index<K,V> head; |
315 |
+ |
/** Lazily initialized element count */ |
316 |
+ |
private transient LongAdder adder; |
317 |
|
/** Lazily initialized key set */ |
318 |
|
private transient KeySet<K,V> keySet; |
319 |
|
/** Lazily initialized values collection */ |
324 |
|
private transient SubMap<K,V> descendingMap; |
325 |
|
|
326 |
|
/** |
359 |
– |
* Initializes or resets state. Needed by constructors, clone, |
360 |
– |
* clear, readObject. and ConcurrentSkipListSet.clone. |
361 |
– |
* (Note that comparator must be separately initialized.) |
362 |
– |
*/ |
363 |
– |
private void initialize() { |
364 |
– |
keySet = null; |
365 |
– |
entrySet = null; |
366 |
– |
values = null; |
367 |
– |
descendingMap = null; |
368 |
– |
head = new HeadIndex<K,V>(new Node<K,V>(null, BASE_HEADER, null), |
369 |
– |
null, null, 1); |
370 |
– |
} |
371 |
– |
|
372 |
– |
/** |
373 |
– |
* compareAndSet head node. |
374 |
– |
*/ |
375 |
– |
private boolean casHead(HeadIndex<K,V> cmp, HeadIndex<K,V> val) { |
376 |
– |
return HEAD.compareAndSet(this, cmp, val); |
377 |
– |
} |
378 |
– |
|
379 |
– |
/* ---------------- Nodes -------------- */ |
380 |
– |
|
381 |
– |
/** |
327 |
|
* Nodes hold keys and values, and are singly linked in sorted |
328 |
|
* order, possibly with some intervening marker nodes. The list is |
329 |
< |
* headed by a dummy node accessible as head.node. The value field |
330 |
< |
* is declared only as Object because it takes special non-V |
331 |
< |
* values for marker and header nodes. |
329 |
> |
* headed by a header node accessible as head.node. Headers and |
330 |
> |
* marker nodes have have null keys. The val field (but currently |
331 |
> |
* not the key field) is nulled out upon deletion. |
332 |
|
*/ |
333 |
|
static final class Node<K,V> { |
334 |
< |
final K key; |
335 |
< |
volatile Object value; |
336 |
< |
volatile Node<K,V> next; |
337 |
< |
|
393 |
< |
/** |
394 |
< |
* Creates a new regular node. |
395 |
< |
*/ |
396 |
< |
Node(K key, Object value, Node<K,V> next) { |
334 |
> |
final K key; // currently, never detached |
335 |
> |
V val; |
336 |
> |
Node<K,V> next; |
337 |
> |
Node(K key, V value, Node<K,V> next) { |
338 |
|
this.key = key; |
339 |
< |
this.value = value; |
339 |
> |
this.val = value; |
340 |
|
this.next = next; |
341 |
|
} |
401 |
– |
|
402 |
– |
/** |
403 |
– |
* Creates a new marker node. A marker is distinguished by |
404 |
– |
* having its value field point to itself. Marker nodes also |
405 |
– |
* have null keys, a fact that is exploited in a few places, |
406 |
– |
* but this doesn't distinguish markers from the base-level |
407 |
– |
* header node (head.node), which also has a null key. |
408 |
– |
*/ |
409 |
– |
Node(Node<K,V> next) { |
410 |
– |
this.key = null; |
411 |
– |
this.value = this; |
412 |
– |
this.next = next; |
413 |
– |
} |
414 |
– |
|
415 |
– |
/** |
416 |
– |
* compareAndSet value field. |
417 |
– |
*/ |
418 |
– |
boolean casValue(Object cmp, Object val) { |
419 |
– |
return VALUE.compareAndSet(this, cmp, val); |
420 |
– |
} |
421 |
– |
|
422 |
– |
/** |
423 |
– |
* compareAndSet next field. |
424 |
– |
*/ |
425 |
– |
boolean casNext(Node<K,V> cmp, Node<K,V> val) { |
426 |
– |
return NEXT.compareAndSet(this, cmp, val); |
427 |
– |
} |
428 |
– |
|
429 |
– |
/** |
430 |
– |
* Returns true if this node is a marker. This method isn't |
431 |
– |
* actually called in any current code checking for markers |
432 |
– |
* because callers will have already read value field and need |
433 |
– |
* to use that read (not another done here) and so directly |
434 |
– |
* test if value points to node. |
435 |
– |
* |
436 |
– |
* @return true if this node is a marker node |
437 |
– |
*/ |
438 |
– |
boolean isMarker() { |
439 |
– |
return value == this; |
440 |
– |
} |
441 |
– |
|
442 |
– |
/** |
443 |
– |
* Returns true if this node is the header of base-level list. |
444 |
– |
* @return true if this node is header node |
445 |
– |
*/ |
446 |
– |
boolean isBaseHeader() { |
447 |
– |
return value == BASE_HEADER; |
448 |
– |
} |
449 |
– |
|
450 |
– |
/** |
451 |
– |
* Tries to append a deletion marker to this node. |
452 |
– |
* @param f the assumed current successor of this node |
453 |
– |
* @return true if successful |
454 |
– |
*/ |
455 |
– |
boolean appendMarker(Node<K,V> f) { |
456 |
– |
return casNext(f, new Node<K,V>(f)); |
457 |
– |
} |
458 |
– |
|
459 |
– |
/** |
460 |
– |
* Helps out a deletion by appending marker or unlinking from |
461 |
– |
* predecessor. This is called during traversals when value |
462 |
– |
* field seen to be null. |
463 |
– |
* @param b predecessor |
464 |
– |
* @param f successor |
465 |
– |
*/ |
466 |
– |
void helpDelete(Node<K,V> b, Node<K,V> f) { |
467 |
– |
/* |
468 |
– |
* Rechecking links and then doing only one of the |
469 |
– |
* help-out stages per call tends to minimize CAS |
470 |
– |
* interference among helping threads. |
471 |
– |
*/ |
472 |
– |
if (f == next && this == b.next) { |
473 |
– |
if (f == null || f.value != f) // not already marked |
474 |
– |
casNext(f, new Node<K,V>(f)); |
475 |
– |
else |
476 |
– |
b.casNext(this, f.next); |
477 |
– |
} |
478 |
– |
} |
479 |
– |
|
480 |
– |
/** |
481 |
– |
* Returns value if this node contains a valid key-value pair, |
482 |
– |
* else null. |
483 |
– |
* @return this node's value if it isn't a marker or header or |
484 |
– |
* is deleted, else null |
485 |
– |
*/ |
486 |
– |
V getValidValue() { |
487 |
– |
Object v = value; |
488 |
– |
if (v == this || v == BASE_HEADER) |
489 |
– |
return null; |
490 |
– |
@SuppressWarnings("unchecked") V vv = (V)v; |
491 |
– |
return vv; |
492 |
– |
} |
493 |
– |
|
494 |
– |
/** |
495 |
– |
* Creates and returns a new SimpleImmutableEntry holding current |
496 |
– |
* mapping if this node holds a valid value, else null. |
497 |
– |
* @return new entry or null |
498 |
– |
*/ |
499 |
– |
AbstractMap.SimpleImmutableEntry<K,V> createSnapshot() { |
500 |
– |
Object v = value; |
501 |
– |
if (v == null || v == this || v == BASE_HEADER) |
502 |
– |
return null; |
503 |
– |
@SuppressWarnings("unchecked") V vv = (V)v; |
504 |
– |
return new AbstractMap.SimpleImmutableEntry<K,V>(key, vv); |
505 |
– |
} |
506 |
– |
|
507 |
– |
// VarHandle mechanics |
508 |
– |
private static final VarHandle VALUE; |
509 |
– |
private static final VarHandle NEXT; |
510 |
– |
static { |
511 |
– |
try { |
512 |
– |
MethodHandles.Lookup l = MethodHandles.lookup(); |
513 |
– |
VALUE = l.findVarHandle(Node.class, "value", Object.class); |
514 |
– |
NEXT = l.findVarHandle(Node.class, "next", Node.class); |
515 |
– |
} catch (ReflectiveOperationException e) { |
516 |
– |
throw new Error(e); |
517 |
– |
} |
518 |
– |
} |
342 |
|
} |
343 |
|
|
521 |
– |
/* ---------------- Indexing -------------- */ |
522 |
– |
|
344 |
|
/** |
345 |
< |
* Index nodes represent the levels of the skip list. Note that |
525 |
< |
* even though both Nodes and Indexes have forward-pointing |
526 |
< |
* fields, they have different types and are handled in different |
527 |
< |
* ways, that can't nicely be captured by placing field in a |
528 |
< |
* shared abstract class. |
345 |
> |
* Index nodes represent the levels of the skip list. |
346 |
|
*/ |
347 |
< |
static class Index<K,V> { |
348 |
< |
final Node<K,V> node; |
347 |
> |
static final class Index<K,V> { |
348 |
> |
final Node<K,V> node; // currently, never detached |
349 |
|
final Index<K,V> down; |
350 |
< |
volatile Index<K,V> right; |
534 |
< |
|
535 |
< |
/** |
536 |
< |
* Creates index node with given values. |
537 |
< |
*/ |
350 |
> |
Index<K,V> right; |
351 |
|
Index(Node<K,V> node, Index<K,V> down, Index<K,V> right) { |
352 |
|
this.node = node; |
353 |
|
this.down = down; |
354 |
|
this.right = right; |
355 |
|
} |
356 |
+ |
} |
357 |
|
|
358 |
< |
/** |
545 |
< |
* compareAndSet right field. |
546 |
< |
*/ |
547 |
< |
final boolean casRight(Index<K,V> cmp, Index<K,V> val) { |
548 |
< |
return RIGHT.compareAndSet(this, cmp, val); |
549 |
< |
} |
550 |
< |
|
551 |
< |
/** |
552 |
< |
* Returns true if the node this indexes has been deleted. |
553 |
< |
* @return true if indexed node is known to be deleted |
554 |
< |
*/ |
555 |
< |
final boolean indexesDeletedNode() { |
556 |
< |
return node.value == null; |
557 |
< |
} |
358 |
> |
/* ---------------- Utilities -------------- */ |
359 |
|
|
360 |
< |
/** |
361 |
< |
* Tries to CAS newSucc as successor. To minimize races with |
362 |
< |
* unlink that may lose this index node, if the node being |
363 |
< |
* indexed is known to be deleted, it doesn't try to link in. |
364 |
< |
* @param succ the expected current successor |
365 |
< |
* @param newSucc the new successor |
366 |
< |
* @return true if successful |
367 |
< |
*/ |
567 |
< |
final boolean link(Index<K,V> succ, Index<K,V> newSucc) { |
568 |
< |
Node<K,V> n = node; |
569 |
< |
newSucc.right = succ; |
570 |
< |
return n.value != null && casRight(succ, newSucc); |
571 |
< |
} |
360 |
> |
/** |
361 |
> |
* Compares using comparator or natural ordering if null. |
362 |
> |
* Called only by methods that have performed required type checks. |
363 |
> |
*/ |
364 |
> |
@SuppressWarnings({"unchecked", "rawtypes"}) |
365 |
> |
static int cpr(Comparator c, Object x, Object y) { |
366 |
> |
return (c != null) ? c.compare(x, y) : ((Comparable)x).compareTo(y); |
367 |
> |
} |
368 |
|
|
369 |
< |
/** |
370 |
< |
* Tries to CAS right field to skip over apparent successor |
371 |
< |
* succ. Fails (forcing a retraversal by caller) if this node |
372 |
< |
* is known to be deleted. |
373 |
< |
* @param succ the expected current successor |
374 |
< |
* @return true if successful |
375 |
< |
*/ |
376 |
< |
final boolean unlink(Index<K,V> succ) { |
581 |
< |
return node.value != null && casRight(succ, succ.right); |
582 |
< |
} |
369 |
> |
/** |
370 |
> |
* Returns the header for base node list, or null if uninitialized |
371 |
> |
*/ |
372 |
> |
final Node<K,V> baseHead() { |
373 |
> |
Index<K,V> h; |
374 |
> |
VarHandle.acquireFence(); |
375 |
> |
return ((h = head) == null) ? null : h.node; |
376 |
> |
} |
377 |
|
|
378 |
< |
// VarHandle mechanics |
379 |
< |
private static final VarHandle RIGHT; |
380 |
< |
static { |
381 |
< |
try { |
382 |
< |
MethodHandles.Lookup l = MethodHandles.lookup(); |
383 |
< |
RIGHT = l.findVarHandle(Index.class, "right", Index.class); |
384 |
< |
} catch (ReflectiveOperationException e) { |
385 |
< |
throw new Error(e); |
378 |
> |
/** |
379 |
> |
* Tries to unlink deleted node n from predecessor b (if both |
380 |
> |
* exist), by first splicing in a marker if not already present. |
381 |
> |
* Upon return, node n is sure to be unlinked from b, possibly |
382 |
> |
* via the actions of some other thread. |
383 |
> |
* |
384 |
> |
* @param b if nonnull, predecessor |
385 |
> |
* @param n if nonnull, node known to be deleted |
386 |
> |
*/ |
387 |
> |
static <K,V> void unlinkNode(Node<K,V> b, Node<K,V> n) { |
388 |
> |
if (b != null && n != null) { |
389 |
> |
Node<K,V> f, p; |
390 |
> |
for (;;) { |
391 |
> |
if ((f = n.next) != null && f.key == null) { |
392 |
> |
p = f.next; // already marked |
393 |
> |
break; |
394 |
> |
} |
395 |
> |
else if (NEXT.compareAndSet(n, f, |
396 |
> |
new Node<K,V>(null, null, f))) { |
397 |
> |
p = f; // add marker |
398 |
> |
break; |
399 |
> |
} |
400 |
|
} |
401 |
+ |
boolean cas = NEXT.compareAndSet(b, n, p); |
402 |
|
} |
403 |
|
} |
404 |
|
|
596 |
– |
/* ---------------- Head nodes -------------- */ |
597 |
– |
|
405 |
|
/** |
406 |
< |
* Nodes heading each level keep track of their level. |
406 |
> |
* Adds to element count, initializing adder if necessary |
407 |
> |
* |
408 |
> |
* @param c count to add |
409 |
|
*/ |
410 |
< |
static final class HeadIndex<K,V> extends Index<K,V> { |
411 |
< |
final int level; |
412 |
< |
HeadIndex(Node<K,V> node, Index<K,V> down, Index<K,V> right, int level) { |
413 |
< |
super(node, down, right); |
414 |
< |
this.level = level; |
606 |
< |
} |
410 |
> |
private void addCount(long c) { |
411 |
> |
LongAdder a; |
412 |
> |
do {} while ((a = adder) == null && |
413 |
> |
!ADDER.compareAndSet(this, null, a = new LongAdder())); |
414 |
> |
a.add(c); |
415 |
|
} |
416 |
|
|
609 |
– |
/* ---------------- Comparison utilities -------------- */ |
610 |
– |
|
417 |
|
/** |
418 |
< |
* Compares using comparator or natural ordering if null. |
613 |
< |
* Called only by methods that have performed required type checks. |
418 |
> |
* Returns element count, initializing adder if necessary. |
419 |
|
*/ |
420 |
< |
@SuppressWarnings({"unchecked", "rawtypes"}) |
421 |
< |
static final int cpr(Comparator c, Object x, Object y) { |
422 |
< |
return (c != null) ? c.compare(x, y) : ((Comparable)x).compareTo(y); |
420 |
> |
final long getAdderCount() { |
421 |
> |
LongAdder a; long c; |
422 |
> |
do {} while ((a = adder) == null && |
423 |
> |
!ADDER.compareAndSet(this, null, a = new LongAdder())); |
424 |
> |
return ((c = a.sum()) <= 0L) ? 0L : c; // ignore transient negatives |
425 |
|
} |
426 |
|
|
427 |
|
/* ---------------- Traversal -------------- */ |
428 |
|
|
429 |
|
/** |
430 |
< |
* Returns a base-level node with key strictly less than given key, |
431 |
< |
* or the base-level header if there is no such node. Also |
432 |
< |
* unlinks indexes to deleted nodes found along the way. Callers |
433 |
< |
* rely on this side-effect of clearing indices to deleted nodes. |
434 |
< |
* @param key the key |
435 |
< |
* @return a predecessor of key |
430 |
> |
* Returns an index node with key strictly less than given key. |
431 |
> |
* Also unlinks indexes to deleted nodes found along the way. |
432 |
> |
* Callers rely on this side-effect of clearing indices to deleted |
433 |
> |
* nodes. |
434 |
> |
* |
435 |
> |
* @param key if nonnull the key |
436 |
> |
* @return a predecessor node of key, or null if uninitialized or null key |
437 |
|
*/ |
438 |
|
private Node<K,V> findPredecessor(Object key, Comparator<? super K> cmp) { |
439 |
< |
if (key == null) |
440 |
< |
throw new NullPointerException(); // don't postpone errors |
441 |
< |
for (;;) { |
442 |
< |
for (Index<K,V> q = head, r = q.right, d;;) { |
443 |
< |
if (r != null) { |
444 |
< |
Node<K,V> n = r.node; |
445 |
< |
K k = n.key; |
446 |
< |
if (n.value == null) { |
447 |
< |
if (!q.unlink(r)) |
448 |
< |
break; // restart |
449 |
< |
r = q.right; // reread r |
642 |
< |
continue; |
439 |
> |
Index<K,V> q; |
440 |
> |
VarHandle.acquireFence(); |
441 |
> |
if ((q = head) == null || key == null) |
442 |
> |
return null; |
443 |
> |
else { |
444 |
> |
for (Index<K,V> r, d;;) { |
445 |
> |
while ((r = q.right) != null) { |
446 |
> |
Node<K,V> p; K k; |
447 |
> |
if ((p = r.node) == null || (k = p.key) == null || |
448 |
> |
p.val == null) { // unlink index to deleted node |
449 |
> |
boolean cas = RIGHT.compareAndSet(q, r, r.right); |
450 |
|
} |
451 |
< |
if (cpr(cmp, key, k) > 0) { |
451 |
> |
else if (cpr(cmp, key, k) > 0) |
452 |
|
q = r; |
453 |
< |
r = r.right; |
454 |
< |
continue; |
648 |
< |
} |
453 |
> |
else |
454 |
> |
break; |
455 |
|
} |
456 |
< |
if ((d = q.down) == null) |
456 |
> |
if ((d = q.down) != null) |
457 |
> |
q = d; |
458 |
> |
else |
459 |
|
return q.node; |
652 |
– |
q = d; |
653 |
– |
r = d.right; |
460 |
|
} |
461 |
|
} |
462 |
|
} |
466 |
|
* deleted nodes seen along the way. Repeatedly traverses at |
467 |
|
* base-level looking for key starting at predecessor returned |
468 |
|
* from findPredecessor, processing base-level deletions as |
469 |
< |
* encountered. Some callers rely on this side-effect of clearing |
470 |
< |
* deleted nodes. |
471 |
< |
* |
472 |
< |
* Restarts occur, at traversal step centered on node n, if: |
473 |
< |
* |
668 |
< |
* (1) After reading n's next field, n is no longer assumed |
669 |
< |
* predecessor b's current successor, which means that |
670 |
< |
* we don't have a consistent 3-node snapshot and so cannot |
671 |
< |
* unlink any subsequent deleted nodes encountered. |
672 |
< |
* |
673 |
< |
* (2) n's value field is null, indicating n is deleted, in |
674 |
< |
* which case we help out an ongoing structural deletion |
675 |
< |
* before retrying. Even though there are cases where such |
676 |
< |
* unlinking doesn't require restart, they aren't sorted out |
677 |
< |
* here because doing so would not usually outweigh cost of |
678 |
< |
* restarting. |
679 |
< |
* |
680 |
< |
* (3) n is a marker or n's predecessor's value field is null, |
681 |
< |
* indicating (among other possibilities) that |
682 |
< |
* findPredecessor returned a deleted node. We can't unlink |
683 |
< |
* the node because we don't know its predecessor, so rely |
684 |
< |
* on another call to findPredecessor to notice and return |
685 |
< |
* some earlier predecessor, which it will do. This check is |
686 |
< |
* only strictly needed at beginning of loop, (and the |
687 |
< |
* b.value check isn't strictly needed at all) but is done |
688 |
< |
* each iteration to help avoid contention with other |
689 |
< |
* threads by callers that will fail to be able to change |
690 |
< |
* links, and so will retry anyway. |
691 |
< |
* |
692 |
< |
* The traversal loops in doPut, doRemove, and findNear all |
693 |
< |
* include the same three kinds of checks. And specialized |
694 |
< |
* versions appear in findFirst, and findLast and their variants. |
695 |
< |
* They can't easily share code because each uses the reads of |
696 |
< |
* fields held in locals occurring in the orders they were |
697 |
< |
* performed. |
469 |
> |
* encountered. Restarts occur, at traversal step encountering |
470 |
> |
* node n, if n's key field is null, indicating it is a marker, so |
471 |
> |
* its predecessor is deleted before continuing, which we help do |
472 |
> |
* by re-finding a valid predecessor. The traversal loops in |
473 |
> |
* doPut, doRemove, and findNear all include the same checks. |
474 |
|
* |
475 |
|
* @param key the key |
476 |
|
* @return node holding key, or null if no such |
479 |
|
if (key == null) |
480 |
|
throw new NullPointerException(); // don't postpone errors |
481 |
|
Comparator<? super K> cmp = comparator; |
482 |
< |
outer: for (;;) { |
483 |
< |
for (Node<K,V> b = findPredecessor(key, cmp), n = b.next;;) { |
484 |
< |
Object v; int c; |
485 |
< |
if (n == null) |
486 |
< |
break outer; |
487 |
< |
Node<K,V> f = n.next; |
488 |
< |
if (n != b.next) // inconsistent read |
489 |
< |
break; |
490 |
< |
if ((v = n.value) == null) { // n is deleted |
491 |
< |
n.helpDelete(b, f); |
492 |
< |
break; |
493 |
< |
} |
494 |
< |
if (b.value == null || v == n) // b is deleted |
719 |
< |
break; |
720 |
< |
if ((c = cpr(cmp, key, n.key)) == 0) |
482 |
> |
Node<K,V> b; |
483 |
> |
outer: while ((b = findPredecessor(key, cmp)) != null) { |
484 |
> |
for (;;) { |
485 |
> |
Node<K,V> n; K k; V v; int c; |
486 |
> |
if ((n = b.next) == null) |
487 |
> |
break outer; // empty |
488 |
> |
else if ((k = n.key) == null) |
489 |
> |
break; // b is deleted |
490 |
> |
else if ((v = n.val) == null) |
491 |
> |
unlinkNode(b, n); // n is deleted |
492 |
> |
else if ((c = cpr(cmp, key, k)) > 0) |
493 |
> |
b = n; |
494 |
> |
else if (c == 0) |
495 |
|
return n; |
496 |
< |
if (c < 0) |
496 |
> |
else |
497 |
|
break outer; |
724 |
– |
b = n; |
725 |
– |
n = f; |
498 |
|
} |
499 |
|
} |
500 |
|
return null; |
501 |
|
} |
502 |
|
|
503 |
|
/** |
504 |
< |
* Gets value for key. Almost the same as findNode, but returns |
505 |
< |
* the found value (to avoid retries during re-reads) |
504 |
> |
* Gets value for key. Same idea as findNode, except skips over |
505 |
> |
* deletions and markers, and returns first encountered value to |
506 |
> |
* avoid possibly inconsistent rereads. |
507 |
|
* |
508 |
|
* @param key the key |
509 |
|
* @return the value, or null if absent |
510 |
|
*/ |
511 |
|
private V doGet(Object key) { |
512 |
+ |
Index<K,V> q; |
513 |
+ |
VarHandle.acquireFence(); |
514 |
|
if (key == null) |
515 |
|
throw new NullPointerException(); |
516 |
|
Comparator<? super K> cmp = comparator; |
517 |
< |
outer: for (;;) { |
518 |
< |
for (Node<K,V> b = findPredecessor(key, cmp), n = b.next;;) { |
519 |
< |
Object v; int c; |
520 |
< |
if (n == null) |
521 |
< |
break outer; |
522 |
< |
Node<K,V> f = n.next; |
523 |
< |
if (n != b.next) // inconsistent read |
524 |
< |
break; |
525 |
< |
if ((v = n.value) == null) { // n is deleted |
526 |
< |
n.helpDelete(b, f); |
527 |
< |
break; |
517 |
> |
V result = null; |
518 |
> |
if ((q = head) != null) { |
519 |
> |
outer: for (Index<K,V> r, d;;) { |
520 |
> |
while ((r = q.right) != null) { |
521 |
> |
Node<K,V> p; K k; V v; int c; |
522 |
> |
if ((p = r.node) == null || (k = p.key) == null || |
523 |
> |
(v = p.val) == null) { |
524 |
> |
boolean cas = RIGHT.compareAndSet(q, r, r.right); |
525 |
> |
} |
526 |
> |
else if ((c = cpr(cmp, key, k)) > 0) |
527 |
> |
q = r; |
528 |
> |
else if (c == 0) { |
529 |
> |
result = v; |
530 |
> |
break outer; |
531 |
> |
} |
532 |
> |
else |
533 |
> |
break; |
534 |
|
} |
535 |
< |
if (b.value == null || v == n) // b is deleted |
535 |
> |
if ((d = q.down) != null) |
536 |
> |
q = d; |
537 |
> |
else { |
538 |
> |
Node<K,V> b, n; |
539 |
> |
if ((b = q.node) != null) { |
540 |
> |
while ((n = b.next) != null) { |
541 |
> |
V v; int c; |
542 |
> |
K k = n.key; |
543 |
> |
if ((v = n.val) == null || k == null || |
544 |
> |
(c = cpr(cmp, key, k)) > 0) |
545 |
> |
b = n; |
546 |
> |
else { |
547 |
> |
if (c == 0) |
548 |
> |
result = v; |
549 |
> |
break; |
550 |
> |
} |
551 |
> |
} |
552 |
> |
} |
553 |
|
break; |
756 |
– |
if ((c = cpr(cmp, key, n.key)) == 0) { |
757 |
– |
@SuppressWarnings("unchecked") V vv = (V)v; |
758 |
– |
return vv; |
554 |
|
} |
760 |
– |
if (c < 0) |
761 |
– |
break outer; |
762 |
– |
b = n; |
763 |
– |
n = f; |
555 |
|
} |
556 |
|
} |
557 |
< |
return null; |
557 |
> |
return result; |
558 |
|
} |
559 |
|
|
560 |
|
/* ---------------- Insertion -------------- */ |
562 |
|
/** |
563 |
|
* Main insertion method. Adds element if not present, or |
564 |
|
* replaces value if present and onlyIfAbsent is false. |
565 |
+ |
* |
566 |
|
* @param key the key |
567 |
|
* @param value the value that must be associated with key |
568 |
|
* @param onlyIfAbsent if should not insert if already present |
569 |
|
* @return the old value, or null if newly inserted |
570 |
|
*/ |
571 |
|
private V doPut(K key, V value, boolean onlyIfAbsent) { |
780 |
– |
Node<K,V> z; // added node |
572 |
|
if (key == null) |
573 |
|
throw new NullPointerException(); |
574 |
|
Comparator<? super K> cmp = comparator; |
575 |
< |
outer: for (;;) { |
576 |
< |
for (Node<K,V> b = findPredecessor(key, cmp), n = b.next;;) { |
577 |
< |
if (n != null) { |
578 |
< |
Object v; int c; |
579 |
< |
Node<K,V> f = n.next; |
580 |
< |
if (n != b.next) // inconsistent read |
581 |
< |
break; |
582 |
< |
if ((v = n.value) == null) { // n is deleted |
583 |
< |
n.helpDelete(b, f); |
584 |
< |
break; |
575 |
> |
for (;;) { |
576 |
> |
Index<K,V> h; Node<K,V> b; |
577 |
> |
VarHandle.acquireFence(); |
578 |
> |
int levels = 0; // number of levels descended |
579 |
> |
if ((h = head) == null) { // try to initialize |
580 |
> |
Node<K,V> base = new Node<K,V>(null, null, null); |
581 |
> |
h = new Index<K,V>(base, null, null); |
582 |
> |
b = (HEAD.compareAndSet(this, null, h)) ? base : null; |
583 |
> |
} |
584 |
> |
else { |
585 |
> |
for (Index<K,V> q = h, r, d;;) { // count while descending |
586 |
> |
while ((r = q.right) != null) { |
587 |
> |
Node<K,V> p; K k; |
588 |
> |
if ((p = r.node) == null || (k = p.key) == null || |
589 |
> |
p.val == null) { |
590 |
> |
boolean cas = RIGHT.compareAndSet(q, r, r.right); |
591 |
> |
} |
592 |
> |
else if (cpr(cmp, key, k) > 0) |
593 |
> |
q = r; |
594 |
> |
else |
595 |
> |
break; |
596 |
|
} |
597 |
< |
if (b.value == null || v == n) // b is deleted |
598 |
< |
break; |
599 |
< |
if ((c = cpr(cmp, key, n.key)) > 0) { |
798 |
< |
b = n; |
799 |
< |
n = f; |
800 |
< |
continue; |
597 |
> |
if ((d = q.down) != null) { |
598 |
> |
++levels; |
599 |
> |
q = d; |
600 |
|
} |
601 |
< |
if (c == 0) { |
602 |
< |
if (onlyIfAbsent || n.casValue(v, value)) { |
603 |
< |
@SuppressWarnings("unchecked") V vv = (V)v; |
805 |
< |
return vv; |
806 |
< |
} |
807 |
< |
break; // restart if lost race to replace value |
601 |
> |
else { |
602 |
> |
b = q.node; |
603 |
> |
break; |
604 |
|
} |
605 |
< |
// else c < 0; fall through |
810 |
< |
} else if (b == head.node) { |
811 |
< |
// map is empty, so type check key now |
812 |
< |
cpr(cmp, key, key); |
813 |
< |
} |
814 |
< |
|
815 |
< |
z = new Node<K,V>(key, value, n); |
816 |
< |
if (!b.casNext(n, z)) |
817 |
< |
break; // restart if lost race to append to b |
818 |
< |
break outer; |
819 |
< |
} |
820 |
< |
} |
821 |
< |
|
822 |
< |
int rnd = ThreadLocalRandom.nextSecondarySeed(); |
823 |
< |
if ((rnd & 0x80000001) == 0) { // test highest and lowest bits |
824 |
< |
int level = 1, max; |
825 |
< |
while (((rnd >>>= 1) & 1) != 0) |
826 |
< |
++level; |
827 |
< |
Index<K,V> idx = null; |
828 |
< |
HeadIndex<K,V> h = head; |
829 |
< |
if (level <= (max = h.level)) { |
830 |
< |
for (int i = 1; i <= level; ++i) |
831 |
< |
idx = new Index<K,V>(z, idx, null); |
605 |
> |
} |
606 |
|
} |
607 |
< |
else { // try to grow by one level |
608 |
< |
level = max + 1; // hold in array and later pick the one to use |
609 |
< |
@SuppressWarnings("unchecked")Index<K,V>[] idxs = |
610 |
< |
(Index<K,V>[])new Index<?,?>[level+1]; |
611 |
< |
for (int i = 1; i <= level; ++i) |
612 |
< |
idxs[i] = idx = new Index<K,V>(z, idx, null); |
613 |
< |
for (;;) { |
614 |
< |
h = head; |
615 |
< |
int oldLevel = h.level; |
616 |
< |
if (level <= oldLevel) // lost race to add level |
617 |
< |
break; |
618 |
< |
HeadIndex<K,V> newh = h; |
619 |
< |
Node<K,V> oldbase = h.node; |
620 |
< |
for (int j = oldLevel+1; j <= level; ++j) |
621 |
< |
newh = new HeadIndex<K,V>(oldbase, newh, idxs[j], j); |
622 |
< |
if (casHead(h, newh)) { |
623 |
< |
h = newh; |
624 |
< |
idx = idxs[level = oldLevel]; |
607 |
> |
if (b != null) { |
608 |
> |
Node<K,V> z = null; // new node, if inserted |
609 |
> |
for (;;) { // find insertion point |
610 |
> |
Node<K,V> n, p; K k; V v; int c; |
611 |
> |
if ((n = b.next) == null) { |
612 |
> |
if (b.key == null) // if empty, type check key now |
613 |
> |
cpr(cmp, key, key); |
614 |
> |
c = -1; |
615 |
> |
} |
616 |
> |
else if ((k = n.key) == null) |
617 |
> |
break; // can't append; restart |
618 |
> |
else if ((v = n.val) == null) { |
619 |
> |
unlinkNode(b, n); |
620 |
> |
c = 1; |
621 |
> |
} |
622 |
> |
else if ((c = cpr(cmp, key, k)) > 0) |
623 |
> |
b = n; |
624 |
> |
else if (c == 0 && |
625 |
> |
(onlyIfAbsent || VAL.compareAndSet(n, v, value))) |
626 |
> |
return v; |
627 |
> |
|
628 |
> |
if (c < 0 && |
629 |
> |
NEXT.compareAndSet(b, n, |
630 |
> |
p = new Node<K,V>(key, value, n))) { |
631 |
> |
z = p; |
632 |
|
break; |
633 |
|
} |
634 |
|
} |
635 |
< |
} |
636 |
< |
// find insertion points and splice in |
637 |
< |
splice: for (int insertionLevel = level;;) { |
638 |
< |
int j = h.level; |
639 |
< |
for (Index<K,V> q = h, r = q.right, t = idx;;) { |
640 |
< |
if (q == null || t == null) |
641 |
< |
break splice; |
642 |
< |
if (r != null) { |
643 |
< |
Node<K,V> n = r.node; |
644 |
< |
// compare before deletion check avoids needing recheck |
645 |
< |
int c = cpr(cmp, key, n.key); |
865 |
< |
if (n.value == null) { |
866 |
< |
if (!q.unlink(r)) |
635 |
> |
|
636 |
> |
if (z != null) { |
637 |
> |
int lr = ThreadLocalRandom.nextSecondarySeed(); |
638 |
> |
if ((lr & 0x3) == 0) { // add indices with 1/4 prob |
639 |
> |
int hr = ThreadLocalRandom.nextSecondarySeed(); |
640 |
> |
long rnd = ((long)hr << 32) | ((long)lr & 0xffffffffL); |
641 |
> |
int skips = levels; // levels to descend before add |
642 |
> |
Index<K,V> x = null; |
643 |
> |
for (;;) { // create at most 62 indices |
644 |
> |
x = new Index<K,V>(z, x, null); |
645 |
> |
if (rnd >= 0L || --skips < 0) |
646 |
|
break; |
647 |
< |
r = q.right; |
648 |
< |
continue; |
647 |
> |
else |
648 |
> |
rnd <<= 1; |
649 |
|
} |
650 |
< |
if (c > 0) { |
651 |
< |
q = r; |
652 |
< |
r = r.right; |
653 |
< |
continue; |
650 |
> |
if (addIndices(h, skips, x, cmp) && skips < 0 && |
651 |
> |
head == h) { // try to add new level |
652 |
> |
Index<K,V> hx = new Index<K,V>(z, x, null); |
653 |
> |
Index<K,V> nh = new Index<K,V>(h.node, h, hx); |
654 |
> |
boolean cas = HEAD.compareAndSet(this, h, nh); |
655 |
|
} |
656 |
+ |
if (z.val == null) // deleted while adding indices |
657 |
+ |
findPredecessor(key, cmp); // clean |
658 |
|
} |
659 |
+ |
addCount(1L); |
660 |
+ |
return null; |
661 |
+ |
} |
662 |
+ |
} |
663 |
+ |
} |
664 |
+ |
} |
665 |
|
|
666 |
< |
if (j == insertionLevel) { |
667 |
< |
if (!q.link(r, t)) |
668 |
< |
break; // restart |
669 |
< |
if (t.node.value == null) { |
670 |
< |
findNode(key); |
671 |
< |
break splice; |
672 |
< |
} |
673 |
< |
if (--insertionLevel == 0) |
674 |
< |
break splice; |
666 |
> |
/** |
667 |
> |
* Add indices after an insertion. Descends iteratively to the |
668 |
> |
* highest level of insertion, then recursively, to chain index |
669 |
> |
* nodes to lower ones. Returns null on (staleness) failure, |
670 |
> |
* disabling higher-level insertions. Recursion depths are |
671 |
> |
* exponentially less probable. |
672 |
> |
* |
673 |
> |
* @param q starting index for current level |
674 |
> |
* @param skips levels to skip before inserting |
675 |
> |
* @param x index for this insertion |
676 |
> |
* @param cmp comparator |
677 |
> |
*/ |
678 |
> |
static <K,V> boolean addIndices(Index<K,V> q, int skips, Index<K,V> x, |
679 |
> |
Comparator<? super K> cmp) { |
680 |
> |
Node<K,V> z; K key; |
681 |
> |
if (x != null && (z = x.node) != null && (key = z.key) != null && |
682 |
> |
q != null) { // hoist checks |
683 |
> |
boolean retrying = false; |
684 |
> |
for (;;) { // find splice point |
685 |
> |
Index<K,V> r, d; int c; |
686 |
> |
if ((r = q.right) != null) { |
687 |
> |
Node<K,V> p; K k; |
688 |
> |
if ((p = r.node) == null || (k = p.key) == null || |
689 |
> |
p.val == null) { |
690 |
> |
boolean cas = RIGHT.compareAndSet(q, r, r.right); |
691 |
> |
c = 0; |
692 |
|
} |
693 |
+ |
else if ((c = cpr(cmp, key, k)) > 0) |
694 |
+ |
q = r; |
695 |
+ |
else if (c == 0) |
696 |
+ |
break; // stale |
697 |
+ |
} |
698 |
+ |
else |
699 |
+ |
c = -1; |
700 |
|
|
701 |
< |
if (--j >= insertionLevel && j < level) |
702 |
< |
t = t.down; |
703 |
< |
q = q.down; |
704 |
< |
r = q.right; |
701 |
> |
if (c < 0) { |
702 |
> |
if ((d = q.down) != null && skips > 0) { |
703 |
> |
--skips; |
704 |
> |
q = d; |
705 |
> |
} |
706 |
> |
else if (d != null && !retrying && |
707 |
> |
!addIndices(d, 0, x.down, cmp)) |
708 |
> |
break; |
709 |
> |
else { |
710 |
> |
x.right = r; |
711 |
> |
if (RIGHT.compareAndSet(q, r, x)) |
712 |
> |
return true; |
713 |
> |
else |
714 |
> |
retrying = true; // re-find splice point |
715 |
> |
} |
716 |
|
} |
717 |
|
} |
718 |
|
} |
719 |
< |
return null; |
719 |
> |
return false; |
720 |
|
} |
721 |
|
|
722 |
|
/* ---------------- Deletion -------------- */ |
726 |
|
* deletion marker, unlinks predecessor, removes associated index |
727 |
|
* nodes, and possibly reduces head index level. |
728 |
|
* |
906 |
– |
* Index nodes are cleared out simply by calling findPredecessor. |
907 |
– |
* which unlinks indexes to deleted nodes found along path to key, |
908 |
– |
* which will include the indexes to this node. This is done |
909 |
– |
* unconditionally. We can't check beforehand whether there are |
910 |
– |
* index nodes because it might be the case that some or all |
911 |
– |
* indexes hadn't been inserted yet for this node during initial |
912 |
– |
* search for it, and we'd like to ensure lack of garbage |
913 |
– |
* retention, so must call to be sure. |
914 |
– |
* |
729 |
|
* @param key the key |
730 |
|
* @param value if non-null, the value that must be |
731 |
|
* associated with key |
735 |
|
if (key == null) |
736 |
|
throw new NullPointerException(); |
737 |
|
Comparator<? super K> cmp = comparator; |
738 |
< |
outer: for (;;) { |
739 |
< |
for (Node<K,V> b = findPredecessor(key, cmp), n = b.next;;) { |
740 |
< |
Object v; int c; |
741 |
< |
if (n == null) |
738 |
> |
V result = null; |
739 |
> |
Node<K,V> b; |
740 |
> |
outer: while ((b = findPredecessor(key, cmp)) != null && |
741 |
> |
result == null) { |
742 |
> |
for (;;) { |
743 |
> |
Node<K,V> n; K k; V v; int c; |
744 |
> |
if ((n = b.next) == null) |
745 |
|
break outer; |
746 |
< |
Node<K,V> f = n.next; |
930 |
< |
if (n != b.next) // inconsistent read |
931 |
< |
break; |
932 |
< |
if ((v = n.value) == null) { // n is deleted |
933 |
< |
n.helpDelete(b, f); |
934 |
< |
break; |
935 |
< |
} |
936 |
< |
if (b.value == null || v == n) // b is deleted |
746 |
> |
else if ((k = n.key) == null) |
747 |
|
break; |
748 |
< |
if ((c = cpr(cmp, key, n.key)) < 0) |
749 |
< |
break outer; |
750 |
< |
if (c > 0) { |
748 |
> |
else if ((v = n.val) == null) |
749 |
> |
unlinkNode(b, n); |
750 |
> |
else if ((c = cpr(cmp, key, k)) > 0) |
751 |
|
b = n; |
752 |
< |
n = f; |
943 |
< |
continue; |
944 |
< |
} |
945 |
< |
if (value != null && !value.equals(v)) |
752 |
> |
else if (c < 0) |
753 |
|
break outer; |
754 |
< |
if (!n.casValue(v, null)) |
755 |
< |
break; |
756 |
< |
if (!n.appendMarker(f) || !b.casNext(n, f)) |
757 |
< |
findNode(key); // retry via findNode |
758 |
< |
else { |
759 |
< |
findPredecessor(key, cmp); // clean index |
953 |
< |
if (head.right == null) |
954 |
< |
tryReduceLevel(); |
754 |
> |
else if (value != null && !value.equals(v)) |
755 |
> |
break outer; |
756 |
> |
else if (VAL.compareAndSet(n, v, null)) { |
757 |
> |
result = v; |
758 |
> |
unlinkNode(b, n); |
759 |
> |
break; // loop to clean up |
760 |
|
} |
956 |
– |
@SuppressWarnings("unchecked") V vv = (V)v; |
957 |
– |
return vv; |
761 |
|
} |
762 |
|
} |
763 |
< |
return null; |
763 |
> |
if (result != null) { |
764 |
> |
tryReduceLevel(); |
765 |
> |
addCount(-1L); |
766 |
> |
} |
767 |
> |
return result; |
768 |
|
} |
769 |
|
|
770 |
|
/** |
788 |
|
* reduction. |
789 |
|
*/ |
790 |
|
private void tryReduceLevel() { |
791 |
< |
HeadIndex<K,V> h = head; |
792 |
< |
HeadIndex<K,V> d; |
793 |
< |
HeadIndex<K,V> e; |
794 |
< |
if (h.level > 3 && |
795 |
< |
(d = (HeadIndex<K,V>)h.down) != null && |
796 |
< |
(e = (HeadIndex<K,V>)d.down) != null && |
797 |
< |
e.right == null && |
798 |
< |
d.right == null && |
992 |
< |
h.right == null && |
993 |
< |
casHead(h, d) && // try to set |
994 |
< |
h.right != null) // recheck |
995 |
< |
casHead(d, h); // try to backout |
791 |
> |
Index<K,V> h, d, e; |
792 |
> |
if ((h = head) != null && h.right == null && |
793 |
> |
(d = h.down) != null && d.right == null && |
794 |
> |
(e = d.down) != null && e.right == null && |
795 |
> |
HEAD.compareAndSet(this, h, d) && |
796 |
> |
h.right != null) { // recheck |
797 |
> |
boolean cas = HEAD.compareAndSet(this, d, h); // try to backout |
798 |
> |
} |
799 |
|
} |
800 |
|
|
801 |
|
/* ---------------- Finding and removing first element -------------- */ |
802 |
|
|
803 |
|
/** |
804 |
< |
* Specialized variant of findNode to get first valid node. |
804 |
> |
* Gets first valid node, unlinking deleted nodes if encountered. |
805 |
|
* @return first node or null if empty |
806 |
|
*/ |
807 |
|
final Node<K,V> findFirst() { |
808 |
< |
for (Node<K,V> b, n;;) { |
809 |
< |
if ((n = (b = head.node).next) == null) |
810 |
< |
return null; |
811 |
< |
if (n.value != null) |
812 |
< |
return n; |
813 |
< |
n.helpDelete(b, n.next); |
814 |
< |
} |
1012 |
< |
} |
1013 |
< |
|
1014 |
< |
/** |
1015 |
< |
* Removes first entry; returns its snapshot. |
1016 |
< |
* @return null if empty, else snapshot of first entry |
1017 |
< |
*/ |
1018 |
< |
private Map.Entry<K,V> doRemoveFirstEntry() { |
1019 |
< |
for (Node<K,V> b, n;;) { |
1020 |
< |
if ((n = (b = head.node).next) == null) |
1021 |
< |
return null; |
1022 |
< |
Node<K,V> f = n.next; |
1023 |
< |
if (n != b.next) |
1024 |
< |
continue; |
1025 |
< |
Object v = n.value; |
1026 |
< |
if (v == null) { |
1027 |
< |
n.helpDelete(b, f); |
1028 |
< |
continue; |
808 |
> |
Node<K,V> b, n; |
809 |
> |
if ((b = baseHead()) != null) { |
810 |
> |
while ((n = b.next) != null) { |
811 |
> |
if (n.val == null) |
812 |
> |
unlinkNode(b, n); |
813 |
> |
else |
814 |
> |
return n; |
815 |
|
} |
1030 |
– |
if (!n.casValue(v, null)) |
1031 |
– |
continue; |
1032 |
– |
if (!n.appendMarker(f) || !b.casNext(n, f)) |
1033 |
– |
findFirst(); // retry |
1034 |
– |
clearIndexToFirst(); |
1035 |
– |
@SuppressWarnings("unchecked") V vv = (V)v; |
1036 |
– |
return new AbstractMap.SimpleImmutableEntry<K,V>(n.key, vv); |
816 |
|
} |
817 |
+ |
return null; |
818 |
|
} |
819 |
|
|
820 |
|
/** |
821 |
< |
* Clears out index nodes associated with deleted first entry. |
821 |
> |
* Entry snapshot version of findFirst |
822 |
|
*/ |
823 |
< |
private void clearIndexToFirst() { |
824 |
< |
for (;;) { |
825 |
< |
for (Index<K,V> q = head;;) { |
826 |
< |
Index<K,V> r = q.right; |
827 |
< |
if (r != null && r.indexesDeletedNode() && !q.unlink(r)) |
828 |
< |
break; |
829 |
< |
if ((q = q.down) == null) { |
830 |
< |
if (head.right == null) |
1051 |
< |
tryReduceLevel(); |
1052 |
< |
return; |
1053 |
< |
} |
823 |
> |
final AbstractMap.SimpleImmutableEntry<K,V> findFirstEntry() { |
824 |
> |
Node<K,V> b, n; V v; |
825 |
> |
if ((b = baseHead()) != null) { |
826 |
> |
while ((n = b.next) != null) { |
827 |
> |
if ((v = n.val) == null) |
828 |
> |
unlinkNode(b, n); |
829 |
> |
else |
830 |
> |
return new AbstractMap.SimpleImmutableEntry<K,V>(n.key, v); |
831 |
|
} |
832 |
|
} |
833 |
+ |
return null; |
834 |
|
} |
835 |
|
|
836 |
|
/** |
837 |
< |
* Removes last entry; returns its snapshot. |
838 |
< |
* Specialized variant of doRemove. |
1061 |
< |
* @return null if empty, else snapshot of last entry |
837 |
> |
* Removes first entry; returns its snapshot. |
838 |
> |
* @return null if empty, else snapshot of first entry |
839 |
|
*/ |
840 |
< |
private Map.Entry<K,V> doRemoveLastEntry() { |
841 |
< |
for (;;) { |
842 |
< |
Node<K,V> b = findPredecessorOfLast(); |
843 |
< |
Node<K,V> n = b.next; |
844 |
< |
if (n == null) { |
845 |
< |
if (b.isBaseHeader()) // empty |
846 |
< |
return null; |
847 |
< |
else |
1071 |
< |
continue; // all b's successors are deleted; retry |
1072 |
< |
} |
1073 |
< |
for (;;) { |
1074 |
< |
Node<K,V> f = n.next; |
1075 |
< |
if (n != b.next) // inconsistent read |
1076 |
< |
break; |
1077 |
< |
Object v = n.value; |
1078 |
< |
if (v == null) { // n is deleted |
1079 |
< |
n.helpDelete(b, f); |
1080 |
< |
break; |
1081 |
< |
} |
1082 |
< |
if (b.value == null || v == n) // b is deleted |
1083 |
< |
break; |
1084 |
< |
if (f != null) { |
1085 |
< |
b = n; |
1086 |
< |
n = f; |
1087 |
< |
continue; |
1088 |
< |
} |
1089 |
< |
if (!n.casValue(v, null)) |
1090 |
< |
break; |
1091 |
< |
K key = n.key; |
1092 |
< |
if (!n.appendMarker(f) || !b.casNext(n, f)) |
1093 |
< |
findNode(key); // retry via findNode |
1094 |
< |
else { // clean index |
1095 |
< |
findPredecessor(key, comparator); |
1096 |
< |
if (head.right == null) |
840 |
> |
private AbstractMap.SimpleImmutableEntry<K,V> doRemoveFirstEntry() { |
841 |
> |
Node<K,V> b, n; V v; |
842 |
> |
if ((b = baseHead()) != null) { |
843 |
> |
while ((n = b.next) != null) { |
844 |
> |
if ((v = n.val) == null || VAL.compareAndSet(n, v, null)) { |
845 |
> |
K k = n.key; |
846 |
> |
unlinkNode(b, n); |
847 |
> |
if (v != null) { |
848 |
|
tryReduceLevel(); |
849 |
+ |
findPredecessor(k, comparator); // clean index |
850 |
+ |
addCount(-1L); |
851 |
+ |
return new AbstractMap.SimpleImmutableEntry<K,V>(k, v); |
852 |
+ |
} |
853 |
|
} |
1099 |
– |
@SuppressWarnings("unchecked") V vv = (V)v; |
1100 |
– |
return new AbstractMap.SimpleImmutableEntry<K,V>(key, vv); |
854 |
|
} |
855 |
|
} |
856 |
+ |
return null; |
857 |
|
} |
858 |
|
|
859 |
|
/* ---------------- Finding and removing last element -------------- */ |
863 |
|
* @return last node or null if empty |
864 |
|
*/ |
865 |
|
final Node<K,V> findLast() { |
866 |
< |
/* |
867 |
< |
* findPredecessor can't be used to traverse index level |
868 |
< |
* because this doesn't use comparisons. So traversals of |
869 |
< |
* both levels are folded together. |
870 |
< |
*/ |
871 |
< |
Index<K,V> q = head; |
872 |
< |
for (;;) { |
873 |
< |
Index<K,V> d, r; |
874 |
< |
if ((r = q.right) != null) { |
875 |
< |
if (r.indexesDeletedNode()) { |
876 |
< |
q.unlink(r); |
877 |
< |
q = head; // restart |
866 |
> |
outer: for (;;) { |
867 |
> |
Index<K,V> q; Node<K,V> b; |
868 |
> |
VarHandle.acquireFence(); |
869 |
> |
if ((q = head) == null) |
870 |
> |
break; |
871 |
> |
for (Index<K,V> r, d;;) { |
872 |
> |
while ((r = q.right) != null) { |
873 |
> |
Node<K,V> p; |
874 |
> |
if ((p = r.node) == null || p.val == null) { |
875 |
> |
boolean cas = RIGHT.compareAndSet(q, r, r.right); |
876 |
> |
} |
877 |
> |
else |
878 |
> |
q = r; |
879 |
|
} |
880 |
< |
else |
881 |
< |
q = r; |
882 |
< |
} else if ((d = q.down) != null) { |
883 |
< |
q = d; |
884 |
< |
} else { |
885 |
< |
for (Node<K,V> b = q.node, n = b.next;;) { |
886 |
< |
if (n == null) |
887 |
< |
return b.isBaseHeader() ? null : b; |
888 |
< |
Node<K,V> f = n.next; // inconsistent read |
889 |
< |
if (n != b.next) |
890 |
< |
break; |
891 |
< |
Object v = n.value; |
892 |
< |
if (v == null) { // n is deleted |
893 |
< |
n.helpDelete(b, f); |
894 |
< |
break; |
880 |
> |
if ((d = q.down) != null) |
881 |
> |
q = d; |
882 |
> |
else { |
883 |
> |
b = q.node; |
884 |
> |
break; |
885 |
> |
} |
886 |
> |
} |
887 |
> |
if (b != null) { |
888 |
> |
for (;;) { |
889 |
> |
Node<K,V> n; |
890 |
> |
if ((n = b.next) == null) { |
891 |
> |
if (b.key == null) // empty |
892 |
> |
break outer; |
893 |
> |
else |
894 |
> |
return b; |
895 |
|
} |
896 |
< |
if (b.value == null || v == n) // b is deleted |
896 |
> |
else if (n.key == null) |
897 |
|
break; |
898 |
< |
b = n; |
899 |
< |
n = f; |
898 |
> |
else if (n.val == null) |
899 |
> |
unlinkNode(b, n); |
900 |
> |
else |
901 |
> |
b = n; |
902 |
|
} |
1146 |
– |
q = head; // restart |
903 |
|
} |
904 |
|
} |
905 |
+ |
return null; |
906 |
|
} |
907 |
|
|
908 |
|
/** |
909 |
< |
* Specialized variant of findPredecessor to get predecessor of last |
910 |
< |
* valid node. Needed when removing the last entry. It is possible |
1154 |
< |
* that all successors of returned node will have been deleted upon |
1155 |
< |
* return, in which case this method can be retried. |
1156 |
< |
* @return likely predecessor of last node |
909 |
> |
* Entry version of findLast |
910 |
> |
* @return Entry for last node or null if empty |
911 |
|
*/ |
912 |
< |
private Node<K,V> findPredecessorOfLast() { |
912 |
> |
final AbstractMap.SimpleImmutableEntry<K,V> findLastEntry() { |
913 |
|
for (;;) { |
914 |
< |
for (Index<K,V> q = head;;) { |
915 |
< |
Index<K,V> d, r; |
916 |
< |
if ((r = q.right) != null) { |
917 |
< |
if (r.indexesDeletedNode()) { |
918 |
< |
q.unlink(r); |
919 |
< |
break; // must restart |
920 |
< |
} |
921 |
< |
// proceed as far across as possible without overshooting |
922 |
< |
if (r.node.next != null) { |
923 |
< |
q = r; |
924 |
< |
continue; |
914 |
> |
Node<K,V> n; V v; |
915 |
> |
if ((n = findLast()) == null) |
916 |
> |
return null; |
917 |
> |
if ((v = n.val) != null) |
918 |
> |
return new AbstractMap.SimpleImmutableEntry<K,V>(n.key, v); |
919 |
> |
} |
920 |
> |
} |
921 |
> |
|
922 |
> |
/** |
923 |
> |
* Removes last entry; returns its snapshot. |
924 |
> |
* Specialized variant of doRemove. |
925 |
> |
* @return null if empty, else snapshot of last entry |
926 |
> |
*/ |
927 |
> |
private Map.Entry<K,V> doRemoveLastEntry() { |
928 |
> |
outer: for (;;) { |
929 |
> |
Index<K,V> q; Node<K,V> b; |
930 |
> |
VarHandle.acquireFence(); |
931 |
> |
if ((q = head) == null) |
932 |
> |
break; |
933 |
> |
for (;;) { |
934 |
> |
Index<K,V> d, r; Node<K,V> p; |
935 |
> |
while ((r = q.right) != null) { |
936 |
> |
if ((p = r.node) == null || p.val == null) { |
937 |
> |
boolean cas = RIGHT.compareAndSet(q, r, r.right); |
938 |
|
} |
939 |
+ |
else if (p.next != null) |
940 |
+ |
q = r; // continue only if a successor |
941 |
+ |
else |
942 |
+ |
break; |
943 |
|
} |
944 |
|
if ((d = q.down) != null) |
945 |
|
q = d; |
946 |
< |
else |
947 |
< |
return q.node; |
946 |
> |
else { |
947 |
> |
b = q.node; |
948 |
> |
break; |
949 |
> |
} |
950 |
> |
} |
951 |
> |
if (b != null) { |
952 |
> |
for (;;) { |
953 |
> |
Node<K,V> n; K k; V v; |
954 |
> |
if ((n = b.next) == null) { |
955 |
> |
if (b.key == null) // empty |
956 |
> |
break outer; |
957 |
> |
else |
958 |
> |
break; // retry |
959 |
> |
} |
960 |
> |
else if ((k = n.key) == null) |
961 |
> |
break; |
962 |
> |
else if ((v = n.val) == null) |
963 |
> |
unlinkNode(b, n); |
964 |
> |
else if (n.next != null) |
965 |
> |
b = n; |
966 |
> |
else if (VAL.compareAndSet(n, v, null)) { |
967 |
> |
unlinkNode(b, n); |
968 |
> |
tryReduceLevel(); |
969 |
> |
findPredecessor(k, comparator); // clean index |
970 |
> |
addCount(-1L); |
971 |
> |
return new AbstractMap.SimpleImmutableEntry<K,V>(k, v); |
972 |
> |
} |
973 |
> |
} |
974 |
|
} |
975 |
|
} |
976 |
+ |
return null; |
977 |
|
} |
978 |
|
|
979 |
|
/* ---------------- Relational operations -------------- */ |
993 |
|
final Node<K,V> findNear(K key, int rel, Comparator<? super K> cmp) { |
994 |
|
if (key == null) |
995 |
|
throw new NullPointerException(); |
996 |
< |
for (;;) { |
997 |
< |
for (Node<K,V> b = findPredecessor(key, cmp), n = b.next;;) { |
998 |
< |
Object v; |
999 |
< |
if (n == null) |
1000 |
< |
return ((rel & LT) == 0 || b.isBaseHeader()) ? null : b; |
1001 |
< |
Node<K,V> f = n.next; |
1002 |
< |
if (n != b.next) // inconsistent read |
1003 |
< |
break; |
1004 |
< |
if ((v = n.value) == null) { // n is deleted |
1005 |
< |
n.helpDelete(b, f); |
1006 |
< |
break; |
996 |
> |
Node<K,V> result; |
997 |
> |
outer: for (Node<K,V> b;;) { |
998 |
> |
if ((b = findPredecessor(key, cmp)) == null) { |
999 |
> |
result = null; |
1000 |
> |
break; // empty |
1001 |
> |
} |
1002 |
> |
for (;;) { |
1003 |
> |
Node<K,V> n; K k; int c; |
1004 |
> |
if ((n = b.next) == null) { |
1005 |
> |
result = ((rel & LT) != 0 && b.key != null) ? b : null; |
1006 |
> |
break outer; |
1007 |
|
} |
1008 |
< |
if (b.value == null || v == n) // b is deleted |
1008 |
> |
else if ((k = n.key) == null) |
1009 |
|
break; |
1010 |
< |
int c = cpr(cmp, key, n.key); |
1011 |
< |
if ((c == 0 && (rel & EQ) != 0) || |
1012 |
< |
(c < 0 && (rel & LT) == 0)) |
1013 |
< |
return n; |
1014 |
< |
if ( c <= 0 && (rel & LT) != 0) |
1015 |
< |
return b.isBaseHeader() ? null : b; |
1016 |
< |
b = n; |
1017 |
< |
n = f; |
1010 |
> |
else if (n.val == null) |
1011 |
> |
unlinkNode(b, n); |
1012 |
> |
else if (((c = cpr(cmp, key, k)) == 0 && (rel & EQ) != 0) || |
1013 |
> |
(c < 0 && (rel & LT) == 0)) { |
1014 |
> |
result = n; |
1015 |
> |
break outer; |
1016 |
> |
} |
1017 |
> |
else if (c <= 0 && (rel & LT) != 0) { |
1018 |
> |
result = (b.key != null) ? b : null; |
1019 |
> |
break outer; |
1020 |
> |
} |
1021 |
> |
else |
1022 |
> |
b = n; |
1023 |
|
} |
1024 |
|
} |
1025 |
+ |
return result; |
1026 |
|
} |
1027 |
|
|
1028 |
|
/** |
1029 |
< |
* Returns SimpleImmutableEntry for results of findNear. |
1029 |
> |
* Variant of findNear returning SimpleImmutableEntry |
1030 |
|
* @param key the key |
1031 |
|
* @param rel the relation -- OR'ed combination of EQ, LT, GT |
1032 |
|
* @return Entry fitting relation, or null if no such |
1033 |
|
*/ |
1034 |
< |
final AbstractMap.SimpleImmutableEntry<K,V> getNear(K key, int rel) { |
1035 |
< |
Comparator<? super K> cmp = comparator; |
1034 |
> |
final AbstractMap.SimpleImmutableEntry<K,V> findNearEntry(K key, int rel, |
1035 |
> |
Comparator<? super K> cmp) { |
1036 |
|
for (;;) { |
1037 |
< |
Node<K,V> n = findNear(key, rel, cmp); |
1038 |
< |
if (n == null) |
1037 |
> |
Node<K,V> n; V v; |
1038 |
> |
if ((n = findNear(key, rel, cmp)) == null) |
1039 |
|
return null; |
1040 |
< |
AbstractMap.SimpleImmutableEntry<K,V> e = n.createSnapshot(); |
1041 |
< |
if (e != null) |
1238 |
< |
return e; |
1040 |
> |
if ((v = n.val) != null) |
1041 |
> |
return new AbstractMap.SimpleImmutableEntry<K,V>(n.key, v); |
1042 |
|
} |
1043 |
|
} |
1044 |
|
|
1050 |
|
*/ |
1051 |
|
public ConcurrentSkipListMap() { |
1052 |
|
this.comparator = null; |
1250 |
– |
initialize(); |
1053 |
|
} |
1054 |
|
|
1055 |
|
/** |
1062 |
|
*/ |
1063 |
|
public ConcurrentSkipListMap(Comparator<? super K> comparator) { |
1064 |
|
this.comparator = comparator; |
1263 |
– |
initialize(); |
1065 |
|
} |
1066 |
|
|
1067 |
|
/** |
1077 |
|
*/ |
1078 |
|
public ConcurrentSkipListMap(Map<? extends K, ? extends V> m) { |
1079 |
|
this.comparator = null; |
1279 |
– |
initialize(); |
1080 |
|
putAll(m); |
1081 |
|
} |
1082 |
|
|
1091 |
|
*/ |
1092 |
|
public ConcurrentSkipListMap(SortedMap<K, ? extends V> m) { |
1093 |
|
this.comparator = m.comparator(); |
1094 |
< |
initialize(); |
1295 |
< |
buildFromSorted(m); |
1094 |
> |
buildFromSorted(m); // initializes transients |
1095 |
|
} |
1096 |
|
|
1097 |
|
/** |
1105 |
|
@SuppressWarnings("unchecked") |
1106 |
|
ConcurrentSkipListMap<K,V> clone = |
1107 |
|
(ConcurrentSkipListMap<K,V>) super.clone(); |
1108 |
< |
clone.initialize(); |
1108 |
> |
clone.keySet = null; |
1109 |
> |
clone.entrySet = null; |
1110 |
> |
clone.values = null; |
1111 |
> |
clone.descendingMap = null; |
1112 |
|
clone.buildFromSorted(this); |
1113 |
|
return clone; |
1114 |
|
} catch (CloneNotSupportedException e) { |
1124 |
|
private void buildFromSorted(SortedMap<K, ? extends V> map) { |
1125 |
|
if (map == null) |
1126 |
|
throw new NullPointerException(); |
1325 |
– |
|
1326 |
– |
HeadIndex<K,V> h = head; |
1327 |
– |
Node<K,V> basepred = h.node; |
1328 |
– |
|
1329 |
– |
// Track the current rightmost node at each level. Uses an |
1330 |
– |
// ArrayList to avoid committing to initial or maximum level. |
1331 |
– |
ArrayList<Index<K,V>> preds = new ArrayList<>(); |
1332 |
– |
|
1333 |
– |
// initialize |
1334 |
– |
for (int i = 0; i <= h.level; ++i) |
1335 |
– |
preds.add(null); |
1336 |
– |
Index<K,V> q = h; |
1337 |
– |
for (int i = h.level; i > 0; --i) { |
1338 |
– |
preds.set(i, q); |
1339 |
– |
q = q.down; |
1340 |
– |
} |
1341 |
– |
|
1127 |
|
Iterator<? extends Map.Entry<? extends K, ? extends V>> it = |
1128 |
|
map.entrySet().iterator(); |
1129 |
+ |
|
1130 |
+ |
/* |
1131 |
+ |
* Add equally spaced indices at log intervals, using the bits |
1132 |
+ |
* of count during insertion. The maximum possible resulting |
1133 |
+ |
* level is less than the number of bits in a long (64). The |
1134 |
+ |
* preds array tracks the current rightmost node at each |
1135 |
+ |
* level. |
1136 |
+ |
*/ |
1137 |
+ |
@SuppressWarnings("unchecked") |
1138 |
+ |
Index<K,V>[] preds = (Index<K,V>[])new Index<?,?>[64]; |
1139 |
+ |
Node<K,V> bp = new Node<K,V>(null, null, null); |
1140 |
+ |
Index<K,V> h = preds[0] = new Index<K,V>(bp, null, null); |
1141 |
+ |
long count = 0; |
1142 |
+ |
|
1143 |
|
while (it.hasNext()) { |
1144 |
|
Map.Entry<? extends K, ? extends V> e = it.next(); |
1346 |
– |
int rnd = ThreadLocalRandom.current().nextInt(); |
1347 |
– |
int j = 0; |
1348 |
– |
if ((rnd & 0x80000001) == 0) { |
1349 |
– |
do { |
1350 |
– |
++j; |
1351 |
– |
} while (((rnd >>>= 1) & 1) != 0); |
1352 |
– |
if (j > h.level) j = h.level + 1; |
1353 |
– |
} |
1145 |
|
K k = e.getKey(); |
1146 |
|
V v = e.getValue(); |
1147 |
|
if (k == null || v == null) |
1148 |
|
throw new NullPointerException(); |
1149 |
|
Node<K,V> z = new Node<K,V>(k, v, null); |
1150 |
< |
basepred.next = z; |
1151 |
< |
basepred = z; |
1152 |
< |
if (j > 0) { |
1153 |
< |
Index<K,V> idx = null; |
1154 |
< |
for (int i = 1; i <= j; ++i) { |
1150 |
> |
bp = bp.next = z; |
1151 |
> |
if ((++count & 3L) == 0L) { |
1152 |
> |
long m = count >>> 2; |
1153 |
> |
int i = 0; |
1154 |
> |
Index<K,V> idx = null, q; |
1155 |
> |
do { |
1156 |
|
idx = new Index<K,V>(z, idx, null); |
1157 |
< |
if (i > h.level) |
1158 |
< |
h = new HeadIndex<K,V>(h.node, h, idx, i); |
1159 |
< |
|
1160 |
< |
if (i < preds.size()) { |
1161 |
< |
preds.get(i).right = idx; |
1370 |
< |
preds.set(i, idx); |
1371 |
< |
} else |
1372 |
< |
preds.add(idx); |
1373 |
< |
} |
1157 |
> |
if ((q = preds[i]) == null) |
1158 |
> |
preds[i] = h = new Index<K,V>(h.node, h, idx); |
1159 |
> |
else |
1160 |
> |
preds[i] = q.right = idx; |
1161 |
> |
} while (++i < preds.length && ((m >>>= 1) & 1L) != 0L); |
1162 |
|
} |
1163 |
|
} |
1164 |
< |
head = h; |
1164 |
> |
if (count != 0L) { |
1165 |
> |
VarHandle.releaseFence(); // emulate volatile stores |
1166 |
> |
addCount(count); |
1167 |
> |
head = h; |
1168 |
> |
VarHandle.fullFence(); |
1169 |
> |
} |
1170 |
|
} |
1171 |
|
|
1172 |
|
/* ---------------- Serialization -------------- */ |
1188 |
|
s.defaultWriteObject(); |
1189 |
|
|
1190 |
|
// Write out keys and values (alternating) |
1191 |
< |
for (Node<K,V> n = findFirst(); n != null; n = n.next) { |
1192 |
< |
V v = n.getValidValue(); |
1193 |
< |
if (v != null) { |
1194 |
< |
s.writeObject(n.key); |
1195 |
< |
s.writeObject(v); |
1191 |
> |
Node<K,V> b, n; V v; |
1192 |
> |
if ((b = baseHead()) != null) { |
1193 |
> |
while ((n = b.next) != null) { |
1194 |
> |
if ((v = n.val) != null) { |
1195 |
> |
s.writeObject(n.key); |
1196 |
> |
s.writeObject(v); |
1197 |
> |
} |
1198 |
> |
b = n; |
1199 |
|
} |
1200 |
|
} |
1201 |
|
s.writeObject(null); |
1213 |
|
throws java.io.IOException, ClassNotFoundException { |
1214 |
|
// Read in the Comparator and any hidden stuff |
1215 |
|
s.defaultReadObject(); |
1420 |
– |
// Reset transients |
1421 |
– |
initialize(); |
1422 |
– |
|
1423 |
– |
/* |
1424 |
– |
* This is nearly identical to buildFromSorted, but is |
1425 |
– |
* distinct because readObject calls can't be nicely adapted |
1426 |
– |
* as the kind of iterator needed by buildFromSorted. (They |
1427 |
– |
* can be, but doing so requires type cheats and/or creation |
1428 |
– |
* of adapter classes.) It is simpler to just adapt the code. |
1429 |
– |
*/ |
1216 |
|
|
1217 |
< |
HeadIndex<K,V> h = head; |
1218 |
< |
Node<K,V> basepred = h.node; |
1219 |
< |
ArrayList<Index<K,V>> preds = new ArrayList<>(); |
1220 |
< |
for (int i = 0; i <= h.level; ++i) |
1221 |
< |
preds.add(null); |
1222 |
< |
Index<K,V> q = h; |
1223 |
< |
for (int i = h.level; i > 0; --i) { |
1224 |
< |
preds.set(i, q); |
1439 |
< |
q = q.down; |
1440 |
< |
} |
1217 |
> |
// Same idea as buildFromSorted |
1218 |
> |
@SuppressWarnings("unchecked") |
1219 |
> |
Index<K,V>[] preds = (Index<K,V>[])new Index<?,?>[64]; |
1220 |
> |
Node<K,V> bp = new Node<K,V>(null, null, null); |
1221 |
> |
Index<K,V> h = preds[0] = new Index<K,V>(bp, null, null); |
1222 |
> |
Comparator<? super K> cmp = comparator; |
1223 |
> |
K prevKey = null; |
1224 |
> |
long count = 0; |
1225 |
|
|
1226 |
|
for (;;) { |
1227 |
< |
Object k = s.readObject(); |
1227 |
> |
K k = (K)s.readObject(); |
1228 |
|
if (k == null) |
1229 |
|
break; |
1230 |
< |
Object v = s.readObject(); |
1230 |
> |
V v = (V)s.readObject(); |
1231 |
|
if (v == null) |
1232 |
|
throw new NullPointerException(); |
1233 |
< |
K key = (K) k; |
1234 |
< |
V val = (V) v; |
1235 |
< |
int rnd = ThreadLocalRandom.current().nextInt(); |
1236 |
< |
int j = 0; |
1237 |
< |
if ((rnd & 0x80000001) == 0) { |
1233 |
> |
if (prevKey != null && cpr(cmp, prevKey, k) > 0) |
1234 |
> |
throw new IllegalStateException("out of order"); |
1235 |
> |
prevKey = k; |
1236 |
> |
Node<K,V> z = new Node<K,V>(k, v, null); |
1237 |
> |
bp = bp.next = z; |
1238 |
> |
if ((++count & 3L) == 0L) { |
1239 |
> |
long m = count >>> 2; |
1240 |
> |
int i = 0; |
1241 |
> |
Index<K,V> idx = null, q; |
1242 |
|
do { |
1455 |
– |
++j; |
1456 |
– |
} while (((rnd >>>= 1) & 1) != 0); |
1457 |
– |
if (j > h.level) j = h.level + 1; |
1458 |
– |
} |
1459 |
– |
Node<K,V> z = new Node<K,V>(key, val, null); |
1460 |
– |
basepred.next = z; |
1461 |
– |
basepred = z; |
1462 |
– |
if (j > 0) { |
1463 |
– |
Index<K,V> idx = null; |
1464 |
– |
for (int i = 1; i <= j; ++i) { |
1243 |
|
idx = new Index<K,V>(z, idx, null); |
1244 |
< |
if (i > h.level) |
1245 |
< |
h = new HeadIndex<K,V>(h.node, h, idx, i); |
1246 |
< |
|
1247 |
< |
if (i < preds.size()) { |
1248 |
< |
preds.get(i).right = idx; |
1471 |
< |
preds.set(i, idx); |
1472 |
< |
} else |
1473 |
< |
preds.add(idx); |
1474 |
< |
} |
1244 |
> |
if ((q = preds[i]) == null) |
1245 |
> |
preds[i] = h = new Index<K,V>(h.node, h, idx); |
1246 |
> |
else |
1247 |
> |
preds[i] = q.right = idx; |
1248 |
> |
} while (++i < preds.length && ((m >>>= 1) & 1L) != 0L); |
1249 |
|
} |
1250 |
|
} |
1251 |
< |
head = h; |
1251 |
> |
if (count != 0L) { |
1252 |
> |
VarHandle.releaseFence(); |
1253 |
> |
addCount(count); |
1254 |
> |
head = h; |
1255 |
> |
VarHandle.fullFence(); |
1256 |
> |
} |
1257 |
|
} |
1258 |
|
|
1259 |
|
/* ------ Map API methods ------ */ |
1354 |
|
public boolean containsValue(Object value) { |
1355 |
|
if (value == null) |
1356 |
|
throw new NullPointerException(); |
1357 |
< |
for (Node<K,V> n = findFirst(); n != null; n = n.next) { |
1358 |
< |
V v = n.getValidValue(); |
1359 |
< |
if (v != null && value.equals(v)) |
1360 |
< |
return true; |
1357 |
> |
Node<K,V> b, n; V v; |
1358 |
> |
if ((b = baseHead()) != null) { |
1359 |
> |
while ((n = b.next) != null) { |
1360 |
> |
if ((v = n.val) != null && value.equals(v)) |
1361 |
> |
return true; |
1362 |
> |
else |
1363 |
> |
b = n; |
1364 |
> |
} |
1365 |
|
} |
1366 |
|
return false; |
1367 |
|
} |
1368 |
|
|
1369 |
|
/** |
1370 |
< |
* Returns the number of key-value mappings in this map. If this map |
1588 |
< |
* contains more than {@code Integer.MAX_VALUE} elements, it |
1589 |
< |
* returns {@code Integer.MAX_VALUE}. |
1590 |
< |
* |
1591 |
< |
* <p>Beware that, unlike in most collections, this method is |
1592 |
< |
* <em>NOT</em> a constant-time operation. Because of the |
1593 |
< |
* asynchronous nature of these maps, determining the current |
1594 |
< |
* number of elements requires traversing them all to count them. |
1595 |
< |
* Additionally, it is possible for the size to change during |
1596 |
< |
* execution of this method, in which case the returned result |
1597 |
< |
* will be inaccurate. Thus, this method is typically not very |
1598 |
< |
* useful in concurrent applications. |
1599 |
< |
* |
1600 |
< |
* @return the number of elements in this map |
1370 |
> |
* {@inheritDoc} |
1371 |
|
*/ |
1372 |
|
public int size() { |
1373 |
< |
long count = 0; |
1374 |
< |
for (Node<K,V> n = findFirst(); n != null; n = n.next) { |
1375 |
< |
if (n.getValidValue() != null) |
1376 |
< |
++count; |
1607 |
< |
} |
1608 |
< |
return (count >= Integer.MAX_VALUE) ? Integer.MAX_VALUE : (int) count; |
1373 |
> |
long c; |
1374 |
> |
return ((baseHead() == null) ? 0 : |
1375 |
> |
((c = getAdderCount()) >= Integer.MAX_VALUE) ? |
1376 |
> |
Integer.MAX_VALUE : (int) c); |
1377 |
|
} |
1378 |
|
|
1379 |
|
/** |
1380 |
< |
* Returns {@code true} if this map contains no key-value mappings. |
1613 |
< |
* @return {@code true} if this map contains no key-value mappings |
1380 |
> |
* {@inheritDoc} |
1381 |
|
*/ |
1382 |
|
public boolean isEmpty() { |
1383 |
|
return findFirst() == null; |
1387 |
|
* Removes all of the mappings from this map. |
1388 |
|
*/ |
1389 |
|
public void clear() { |
1390 |
< |
for (;;) { |
1391 |
< |
Node<K,V> b, n; |
1392 |
< |
HeadIndex<K,V> h = head, d = (HeadIndex<K,V>)h.down; |
1393 |
< |
if (d != null) |
1394 |
< |
casHead(h, d); // remove levels |
1395 |
< |
else if ((b = h.node) != null && (n = b.next) != null) { |
1396 |
< |
Node<K,V> f = n.next; // remove values |
1397 |
< |
if (n == b.next) { |
1398 |
< |
Object v = n.value; |
1399 |
< |
if (v == null) |
1400 |
< |
n.helpDelete(b, f); |
1401 |
< |
else if (n.casValue(v, null) && n.appendMarker(f)) |
1402 |
< |
b.casNext(n, f); |
1390 |
> |
Index<K,V> h, r, d; Node<K,V> b; |
1391 |
> |
VarHandle.acquireFence(); |
1392 |
> |
while ((h = head) != null) { |
1393 |
> |
if ((r = h.right) != null) { // remove indices |
1394 |
> |
boolean cas = RIGHT.compareAndSet(h, r, null); |
1395 |
> |
} |
1396 |
> |
else if ((d = h.down) != null) { // remove levels |
1397 |
> |
boolean cas = HEAD.compareAndSet(this, h, d); |
1398 |
> |
} |
1399 |
> |
else { |
1400 |
> |
long count = 0L; |
1401 |
> |
if ((b = h.node) != null) { // remove nodes |
1402 |
> |
Node<K,V> n; V v; |
1403 |
> |
while ((n = b.next) != null) { |
1404 |
> |
if ((v = n.val) != null && |
1405 |
> |
VAL.compareAndSet(n, v, null)) { |
1406 |
> |
--count; |
1407 |
> |
v = null; |
1408 |
> |
} |
1409 |
> |
if (v == null) |
1410 |
> |
unlinkNode(b, n); |
1411 |
> |
} |
1412 |
|
} |
1413 |
+ |
if (count != 0L) |
1414 |
+ |
addCount(count); |
1415 |
+ |
else |
1416 |
+ |
break; |
1417 |
|
} |
1638 |
– |
else |
1639 |
– |
break; |
1418 |
|
} |
1419 |
|
} |
1420 |
|
|
1461 |
|
BiFunction<? super K, ? super V, ? extends V> remappingFunction) { |
1462 |
|
if (key == null || remappingFunction == null) |
1463 |
|
throw new NullPointerException(); |
1464 |
< |
Node<K,V> n; Object v; |
1464 |
> |
Node<K,V> n; V v; |
1465 |
|
while ((n = findNode(key)) != null) { |
1466 |
< |
if ((v = n.value) != null) { |
1467 |
< |
@SuppressWarnings("unchecked") V vv = (V) v; |
1690 |
< |
V r = remappingFunction.apply(key, vv); |
1466 |
> |
if ((v = n.val) != null) { |
1467 |
> |
V r = remappingFunction.apply(key, v); |
1468 |
|
if (r != null) { |
1469 |
< |
if (n.casValue(vv, r)) |
1469 |
> |
if (VAL.compareAndSet(n, v, r)) |
1470 |
|
return r; |
1471 |
|
} |
1472 |
< |
else if (doRemove(key, vv) != null) |
1472 |
> |
else if (doRemove(key, v) != null) |
1473 |
|
break; |
1474 |
|
} |
1475 |
|
} |
1494 |
|
if (key == null || remappingFunction == null) |
1495 |
|
throw new NullPointerException(); |
1496 |
|
for (;;) { |
1497 |
< |
Node<K,V> n; Object v; V r; |
1497 |
> |
Node<K,V> n; V v; V r; |
1498 |
|
if ((n = findNode(key)) == null) { |
1499 |
|
if ((r = remappingFunction.apply(key, null)) == null) |
1500 |
|
break; |
1501 |
|
if (doPut(key, r, true) == null) |
1502 |
|
return r; |
1503 |
|
} |
1504 |
< |
else if ((v = n.value) != null) { |
1505 |
< |
@SuppressWarnings("unchecked") V vv = (V) v; |
1506 |
< |
if ((r = remappingFunction.apply(key, vv)) != null) { |
1730 |
< |
if (n.casValue(vv, r)) |
1504 |
> |
else if ((v = n.val) != null) { |
1505 |
> |
if ((r = remappingFunction.apply(key, v)) != null) { |
1506 |
> |
if (VAL.compareAndSet(n, v, r)) |
1507 |
|
return r; |
1508 |
|
} |
1509 |
< |
else if (doRemove(key, vv) != null) |
1509 |
> |
else if (doRemove(key, v) != null) |
1510 |
|
break; |
1511 |
|
} |
1512 |
|
} |
1533 |
|
if (key == null || value == null || remappingFunction == null) |
1534 |
|
throw new NullPointerException(); |
1535 |
|
for (;;) { |
1536 |
< |
Node<K,V> n; Object v; V r; |
1536 |
> |
Node<K,V> n; V v; V r; |
1537 |
|
if ((n = findNode(key)) == null) { |
1538 |
|
if (doPut(key, value, true) == null) |
1539 |
|
return value; |
1540 |
|
} |
1541 |
< |
else if ((v = n.value) != null) { |
1542 |
< |
@SuppressWarnings("unchecked") V vv = (V) v; |
1543 |
< |
if ((r = remappingFunction.apply(vv, value)) != null) { |
1768 |
< |
if (n.casValue(vv, r)) |
1541 |
> |
else if ((v = n.val) != null) { |
1542 |
> |
if ((r = remappingFunction.apply(v, value)) != null) { |
1543 |
> |
if (VAL.compareAndSet(n, v, r)) |
1544 |
|
return r; |
1545 |
|
} |
1546 |
< |
else if (doRemove(key, vv) != null) |
1546 |
> |
else if (doRemove(key, v) != null) |
1547 |
|
return null; |
1548 |
|
} |
1549 |
|
} |
1692 |
|
return false; |
1693 |
|
Map<?,?> m = (Map<?,?>) o; |
1694 |
|
try { |
1695 |
< |
for (Map.Entry<K,V> e : this.entrySet()) |
1696 |
< |
if (! e.getValue().equals(m.get(e.getKey()))) |
1697 |
< |
return false; |
1698 |
< |
for (Map.Entry<?,?> e : m.entrySet()) { |
1699 |
< |
Object k = e.getKey(); |
1700 |
< |
Object v = e.getValue(); |
1701 |
< |
if (k == null || v == null || !v.equals(get(k))) |
1702 |
< |
return false; |
1695 |
> |
@SuppressWarnings("unchecked") |
1696 |
> |
Iterator<Map.Entry<?,?>> it = |
1697 |
> |
(Iterator<Map.Entry<?,?>>)m.entrySet().iterator(); |
1698 |
> |
if (m instanceof SortedMap && |
1699 |
> |
((SortedMap<?,?>)m).comparator() == comparator) { |
1700 |
> |
Node<K,V> b, n; |
1701 |
> |
if ((b = baseHead()) != null) { |
1702 |
> |
while ((n = b.next) != null) { |
1703 |
> |
K k; V v; |
1704 |
> |
if ((v = n.val) != null && (k = n.key) != null) { |
1705 |
> |
if (!it.hasNext()) |
1706 |
> |
return false; |
1707 |
> |
Map.Entry<?,?> e = it.next(); |
1708 |
> |
Object mk = e.getKey(); |
1709 |
> |
Object mv = e.getValue(); |
1710 |
> |
if (mk == null || mv == null || |
1711 |
> |
!mk.equals(k) || !mv.equals(v)) |
1712 |
> |
return false; |
1713 |
> |
} |
1714 |
> |
b = n; |
1715 |
> |
} |
1716 |
> |
} |
1717 |
> |
return !it.hasNext(); |
1718 |
> |
} |
1719 |
> |
else { |
1720 |
> |
while (it.hasNext()) { |
1721 |
> |
V v; |
1722 |
> |
Map.Entry<?,?> e = it.next(); |
1723 |
> |
Object mk = e.getKey(); |
1724 |
> |
Object mv = e.getValue(); |
1725 |
> |
if (mk == null || mv == null || |
1726 |
> |
(v = get(mk)) == null || !v.equals(mv)) |
1727 |
> |
return false; |
1728 |
> |
} |
1729 |
> |
Node<K,V> b, n; |
1730 |
> |
if ((b = baseHead()) != null) { |
1731 |
> |
K k; V v; Object mv; |
1732 |
> |
while ((n = b.next) != null) { |
1733 |
> |
if ((v = n.val) != null && (k = n.key) != null && |
1734 |
> |
((mv = m.get(k)) == null || !mv.equals(v))) |
1735 |
> |
return false; |
1736 |
> |
b = n; |
1737 |
> |
} |
1738 |
> |
} |
1739 |
> |
return true; |
1740 |
|
} |
1929 |
– |
return true; |
1741 |
|
} catch (ClassCastException unused) { |
1742 |
|
return false; |
1743 |
|
} catch (NullPointerException unused) { |
1786 |
|
if (key == null || oldValue == null || newValue == null) |
1787 |
|
throw new NullPointerException(); |
1788 |
|
for (;;) { |
1789 |
< |
Node<K,V> n; Object v; |
1789 |
> |
Node<K,V> n; V v; |
1790 |
|
if ((n = findNode(key)) == null) |
1791 |
|
return false; |
1792 |
< |
if ((v = n.value) != null) { |
1792 |
> |
if ((v = n.val) != null) { |
1793 |
|
if (!oldValue.equals(v)) |
1794 |
|
return false; |
1795 |
< |
if (n.casValue(v, newValue)) |
1795 |
> |
if (VAL.compareAndSet(n, v, newValue)) |
1796 |
|
return true; |
1797 |
|
} |
1798 |
|
} |
1811 |
|
if (key == null || value == null) |
1812 |
|
throw new NullPointerException(); |
1813 |
|
for (;;) { |
1814 |
< |
Node<K,V> n; Object v; |
1814 |
> |
Node<K,V> n; V v; |
1815 |
|
if ((n = findNode(key)) == null) |
1816 |
|
return null; |
1817 |
< |
if ((v = n.value) != null && n.casValue(v, value)) { |
1818 |
< |
@SuppressWarnings("unchecked") V vv = (V)v; |
2008 |
< |
return vv; |
2009 |
< |
} |
1817 |
> |
if ((v = n.val) != null && VAL.compareAndSet(n, v, value)) |
1818 |
> |
return v; |
1819 |
|
} |
1820 |
|
} |
1821 |
|
|
1925 |
|
* @throws NullPointerException if the specified key is null |
1926 |
|
*/ |
1927 |
|
public Map.Entry<K,V> lowerEntry(K key) { |
1928 |
< |
return getNear(key, LT); |
1928 |
> |
return findNearEntry(key, LT, comparator); |
1929 |
|
} |
1930 |
|
|
1931 |
|
/** |
1948 |
|
* @throws NullPointerException if the specified key is null |
1949 |
|
*/ |
1950 |
|
public Map.Entry<K,V> floorEntry(K key) { |
1951 |
< |
return getNear(key, LT|EQ); |
1951 |
> |
return findNearEntry(key, LT|EQ, comparator); |
1952 |
|
} |
1953 |
|
|
1954 |
|
/** |
1971 |
|
* @throws NullPointerException if the specified key is null |
1972 |
|
*/ |
1973 |
|
public Map.Entry<K,V> ceilingEntry(K key) { |
1974 |
< |
return getNear(key, GT|EQ); |
1974 |
> |
return findNearEntry(key, GT|EQ, comparator); |
1975 |
|
} |
1976 |
|
|
1977 |
|
/** |
1994 |
|
* @throws NullPointerException if the specified key is null |
1995 |
|
*/ |
1996 |
|
public Map.Entry<K,V> higherEntry(K key) { |
1997 |
< |
return getNear(key, GT); |
1997 |
> |
return findNearEntry(key, GT, comparator); |
1998 |
|
} |
1999 |
|
|
2000 |
|
/** |
2014 |
|
* the {@code Entry.setValue} method. |
2015 |
|
*/ |
2016 |
|
public Map.Entry<K,V> firstEntry() { |
2017 |
< |
for (;;) { |
2209 |
< |
Node<K,V> n = findFirst(); |
2210 |
< |
if (n == null) |
2211 |
< |
return null; |
2212 |
< |
AbstractMap.SimpleImmutableEntry<K,V> e = n.createSnapshot(); |
2213 |
< |
if (e != null) |
2214 |
< |
return e; |
2215 |
< |
} |
2017 |
> |
return findFirstEntry(); |
2018 |
|
} |
2019 |
|
|
2020 |
|
/** |
2024 |
|
* the {@code Entry.setValue} method. |
2025 |
|
*/ |
2026 |
|
public Map.Entry<K,V> lastEntry() { |
2027 |
< |
for (;;) { |
2226 |
< |
Node<K,V> n = findLast(); |
2227 |
< |
if (n == null) |
2228 |
< |
return null; |
2229 |
< |
AbstractMap.SimpleImmutableEntry<K,V> e = n.createSnapshot(); |
2230 |
< |
if (e != null) |
2231 |
< |
return e; |
2232 |
< |
} |
2027 |
> |
return findLastEntry(); |
2028 |
|
} |
2029 |
|
|
2030 |
|
/** |
2047 |
|
return doRemoveLastEntry(); |
2048 |
|
} |
2049 |
|
|
2255 |
– |
|
2050 |
|
/* ---------------- Iterators -------------- */ |
2051 |
|
|
2052 |
|
/** |
2053 |
< |
* Base of iterator classes: |
2053 |
> |
* Base of iterator classes |
2054 |
|
*/ |
2055 |
|
abstract class Iter<T> implements Iterator<T> { |
2056 |
|
/** the last node returned by next() */ |
2062 |
|
|
2063 |
|
/** Initializes ascending iterator for entire range. */ |
2064 |
|
Iter() { |
2065 |
< |
while ((next = findFirst()) != null) { |
2272 |
< |
Object x = next.value; |
2273 |
< |
if (x != null && x != next) { |
2274 |
< |
@SuppressWarnings("unchecked") V vv = (V)x; |
2275 |
< |
nextValue = vv; |
2276 |
< |
break; |
2277 |
< |
} |
2278 |
< |
} |
2065 |
> |
advance(baseHead()); |
2066 |
|
} |
2067 |
|
|
2068 |
|
public final boolean hasNext() { |
2070 |
|
} |
2071 |
|
|
2072 |
|
/** Advances next to higher entry. */ |
2073 |
< |
final void advance() { |
2074 |
< |
if (next == null) |
2075 |
< |
throw new NoSuchElementException(); |
2076 |
< |
lastReturned = next; |
2077 |
< |
while ((next = next.next) != null) { |
2078 |
< |
Object x = next.value; |
2079 |
< |
if (x != null && x != next) { |
2293 |
< |
@SuppressWarnings("unchecked") V vv = (V)x; |
2294 |
< |
nextValue = vv; |
2295 |
< |
break; |
2296 |
< |
} |
2297 |
< |
} |
2073 |
> |
final void advance(Node<K,V> b) { |
2074 |
> |
Node<K,V> n = null; |
2075 |
> |
V v = null; |
2076 |
> |
if ((lastReturned = b) != null) |
2077 |
> |
do {} while ((n = b.next) != null && (v = n.val) == null); |
2078 |
> |
nextValue = v; |
2079 |
> |
next = n; |
2080 |
|
} |
2081 |
|
|
2082 |
< |
public void remove() { |
2083 |
< |
Node<K,V> l = lastReturned; |
2084 |
< |
if (l == null) |
2082 |
> |
public final void remove() { |
2083 |
> |
Node<K,V> n; K k; |
2084 |
> |
if ((n = lastReturned) == null || (k = n.key) == null) |
2085 |
|
throw new IllegalStateException(); |
2086 |
|
// It would not be worth all of the overhead to directly |
2087 |
|
// unlink from here. Using remove is fast enough. |
2088 |
< |
ConcurrentSkipListMap.this.remove(l.key); |
2088 |
> |
ConcurrentSkipListMap.this.remove(k); |
2089 |
|
lastReturned = null; |
2090 |
|
} |
2309 |
– |
|
2091 |
|
} |
2092 |
|
|
2093 |
|
final class ValueIterator extends Iter<V> { |
2094 |
|
public V next() { |
2095 |
< |
V v = nextValue; |
2096 |
< |
advance(); |
2095 |
> |
V v; |
2096 |
> |
if ((v = nextValue) == null) |
2097 |
> |
throw new NoSuchElementException(); |
2098 |
> |
advance(next); |
2099 |
|
return v; |
2100 |
|
} |
2101 |
|
} |
2102 |
|
|
2103 |
|
final class KeyIterator extends Iter<K> { |
2104 |
|
public K next() { |
2105 |
< |
Node<K,V> n = next; |
2106 |
< |
advance(); |
2107 |
< |
return n.key; |
2105 |
> |
Node<K,V> n; |
2106 |
> |
if ((n = next) == null) |
2107 |
> |
throw new NoSuchElementException(); |
2108 |
> |
K k = n.key; |
2109 |
> |
advance(n); |
2110 |
> |
return k; |
2111 |
|
} |
2112 |
|
} |
2113 |
|
|
2114 |
|
final class EntryIterator extends Iter<Map.Entry<K,V>> { |
2115 |
|
public Map.Entry<K,V> next() { |
2116 |
< |
Node<K,V> n = next; |
2116 |
> |
Node<K,V> n; |
2117 |
> |
if ((n = next) == null) |
2118 |
> |
throw new NoSuchElementException(); |
2119 |
> |
K k = n.key; |
2120 |
|
V v = nextValue; |
2121 |
< |
advance(); |
2122 |
< |
return new AbstractMap.SimpleImmutableEntry<K,V>(n.key, v); |
2121 |
> |
advance(n); |
2122 |
> |
return new AbstractMap.SimpleImmutableEntry<K,V>(k, v); |
2123 |
|
} |
2124 |
|
} |
2125 |
|
|
2485 |
|
Map.Entry<K,V> lowestEntry() { |
2486 |
|
Comparator<? super K> cmp = m.comparator; |
2487 |
|
for (;;) { |
2488 |
< |
ConcurrentSkipListMap.Node<K,V> n = loNode(cmp); |
2489 |
< |
if (!isBeforeEnd(n, cmp)) |
2488 |
> |
ConcurrentSkipListMap.Node<K,V> n; V v; |
2489 |
> |
if ((n = loNode(cmp)) == null || !isBeforeEnd(n, cmp)) |
2490 |
|
return null; |
2491 |
< |
Map.Entry<K,V> e = n.createSnapshot(); |
2492 |
< |
if (e != null) |
2704 |
< |
return e; |
2491 |
> |
else if ((v = n.val) != null) |
2492 |
> |
return new AbstractMap.SimpleImmutableEntry<K,V>(n.key, v); |
2493 |
|
} |
2494 |
|
} |
2495 |
|
|
2496 |
|
Map.Entry<K,V> highestEntry() { |
2497 |
|
Comparator<? super K> cmp = m.comparator; |
2498 |
|
for (;;) { |
2499 |
< |
ConcurrentSkipListMap.Node<K,V> n = hiNode(cmp); |
2500 |
< |
if (n == null || !inBounds(n.key, cmp)) |
2499 |
> |
ConcurrentSkipListMap.Node<K,V> n; V v; |
2500 |
> |
if ((n = hiNode(cmp)) == null || !inBounds(n.key, cmp)) |
2501 |
|
return null; |
2502 |
< |
Map.Entry<K,V> e = n.createSnapshot(); |
2503 |
< |
if (e != null) |
2716 |
< |
return e; |
2502 |
> |
else if ((v = n.val) != null) |
2503 |
> |
return new AbstractMap.SimpleImmutableEntry<K,V>(n.key, v); |
2504 |
|
} |
2505 |
|
} |
2506 |
|
|
2507 |
|
Map.Entry<K,V> removeLowest() { |
2508 |
|
Comparator<? super K> cmp = m.comparator; |
2509 |
|
for (;;) { |
2510 |
< |
Node<K,V> n = loNode(cmp); |
2511 |
< |
if (n == null) |
2510 |
> |
ConcurrentSkipListMap.Node<K,V> n; K k; V v; |
2511 |
> |
if ((n = loNode(cmp)) == null) |
2512 |
|
return null; |
2513 |
< |
K k = n.key; |
2727 |
< |
if (!inBounds(k, cmp)) |
2513 |
> |
else if (!inBounds((k = n.key), cmp)) |
2514 |
|
return null; |
2515 |
< |
V v = m.doRemove(k, null); |
2730 |
< |
if (v != null) |
2515 |
> |
else if ((v = m.doRemove(k, null)) != null) |
2516 |
|
return new AbstractMap.SimpleImmutableEntry<K,V>(k, v); |
2517 |
|
} |
2518 |
|
} |
2520 |
|
Map.Entry<K,V> removeHighest() { |
2521 |
|
Comparator<? super K> cmp = m.comparator; |
2522 |
|
for (;;) { |
2523 |
< |
Node<K,V> n = hiNode(cmp); |
2524 |
< |
if (n == null) |
2523 |
> |
ConcurrentSkipListMap.Node<K,V> n; K k; V v; |
2524 |
> |
if ((n = hiNode(cmp)) == null) |
2525 |
|
return null; |
2526 |
< |
K k = n.key; |
2742 |
< |
if (!inBounds(k, cmp)) |
2526 |
> |
else if (!inBounds((k = n.key), cmp)) |
2527 |
|
return null; |
2528 |
< |
V v = m.doRemove(k, null); |
2745 |
< |
if (v != null) |
2528 |
> |
else if ((v = m.doRemove(k, null)) != null) |
2529 |
|
return new AbstractMap.SimpleImmutableEntry<K,V>(k, v); |
2530 |
|
} |
2531 |
|
} |
2532 |
|
|
2533 |
|
/** |
2534 |
< |
* Submap version of ConcurrentSkipListMap.getNearEntry. |
2534 |
> |
* Submap version of ConcurrentSkipListMap.findNearEntry. |
2535 |
|
*/ |
2536 |
|
Map.Entry<K,V> getNearEntry(K key, int rel) { |
2537 |
|
Comparator<? super K> cmp = m.comparator; |
2545 |
|
return ((rel & LT) != 0) ? null : lowestEntry(); |
2546 |
|
if (tooHigh(key, cmp)) |
2547 |
|
return ((rel & LT) != 0) ? highestEntry() : null; |
2548 |
< |
for (;;) { |
2549 |
< |
Node<K,V> n = m.findNear(key, rel, cmp); |
2550 |
< |
if (n == null || !inBounds(n.key, cmp)) |
2551 |
< |
return null; |
2552 |
< |
K k = n.key; |
2553 |
< |
V v = n.getValidValue(); |
2771 |
< |
if (v != null) |
2772 |
< |
return new AbstractMap.SimpleImmutableEntry<K,V>(k, v); |
2773 |
< |
} |
2548 |
> |
AbstractMap.SimpleImmutableEntry<K,V> e = |
2549 |
> |
m.findNearEntry(key, rel, cmp); |
2550 |
> |
if (e == null || !inBounds(e.getKey(), cmp)) |
2551 |
> |
return null; |
2552 |
> |
else |
2553 |
> |
return e; |
2554 |
|
} |
2555 |
|
|
2556 |
|
// Almost the same as getNearEntry, except for keys |
2585 |
|
Node<K,V> n = m.findNear(key, rel, cmp); |
2586 |
|
if (n == null || !inBounds(n.key, cmp)) |
2587 |
|
return null; |
2588 |
< |
K k = n.key; |
2589 |
< |
V v = n.getValidValue(); |
2810 |
< |
if (v != null) |
2811 |
< |
return k; |
2588 |
> |
if (n.val != null) |
2589 |
> |
return n.key; |
2590 |
|
} |
2591 |
|
} |
2592 |
|
|
2617 |
|
for (ConcurrentSkipListMap.Node<K,V> n = loNode(cmp); |
2618 |
|
isBeforeEnd(n, cmp); |
2619 |
|
n = n.next) { |
2620 |
< |
if (n.getValidValue() != null) |
2620 |
> |
if (n.val != null) |
2621 |
|
++count; |
2622 |
|
} |
2623 |
|
return count >= Integer.MAX_VALUE ? Integer.MAX_VALUE : (int)count; |
2635 |
|
for (ConcurrentSkipListMap.Node<K,V> n = loNode(cmp); |
2636 |
|
isBeforeEnd(n, cmp); |
2637 |
|
n = n.next) { |
2638 |
< |
V v = n.getValidValue(); |
2638 |
> |
V v = n.val; |
2639 |
|
if (v != null && value.equals(v)) |
2640 |
|
return true; |
2641 |
|
} |
2647 |
|
for (ConcurrentSkipListMap.Node<K,V> n = loNode(cmp); |
2648 |
|
isBeforeEnd(n, cmp); |
2649 |
|
n = n.next) { |
2650 |
< |
if (n.getValidValue() != null) |
2650 |
> |
if (n.val != null) |
2651 |
|
m.remove(n.key); |
2652 |
|
} |
2653 |
|
} |
2866 |
|
next = isDescending ? hiNode(cmp) : loNode(cmp); |
2867 |
|
if (next == null) |
2868 |
|
break; |
2869 |
< |
Object x = next.value; |
2870 |
< |
if (x != null && x != next) { |
2869 |
> |
V x = next.val; |
2870 |
> |
if (x != null) { |
2871 |
|
if (! inBounds(next.key, cmp)) |
2872 |
|
next = null; |
2873 |
< |
else { |
2874 |
< |
@SuppressWarnings("unchecked") V vv = (V)x; |
3097 |
< |
nextValue = vv; |
3098 |
< |
} |
2873 |
> |
else |
2874 |
> |
nextValue = x; |
2875 |
|
break; |
2876 |
|
} |
2877 |
|
} |
2897 |
|
next = next.next; |
2898 |
|
if (next == null) |
2899 |
|
break; |
2900 |
< |
Object x = next.value; |
2901 |
< |
if (x != null && x != next) { |
2900 |
> |
V x = next.val; |
2901 |
> |
if (x != null) { |
2902 |
|
if (tooHigh(next.key, cmp)) |
2903 |
|
next = null; |
2904 |
< |
else { |
2905 |
< |
@SuppressWarnings("unchecked") V vv = (V)x; |
3130 |
< |
nextValue = vv; |
3131 |
< |
} |
2904 |
> |
else |
2905 |
> |
nextValue = x; |
2906 |
|
break; |
2907 |
|
} |
2908 |
|
} |
2914 |
|
next = m.findNear(lastReturned.key, LT, cmp); |
2915 |
|
if (next == null) |
2916 |
|
break; |
2917 |
< |
Object x = next.value; |
2918 |
< |
if (x != null && x != next) { |
2917 |
> |
V x = next.val; |
2918 |
> |
if (x != null) { |
2919 |
|
if (tooLow(next.key, cmp)) |
2920 |
|
next = null; |
2921 |
< |
else { |
2922 |
< |
@SuppressWarnings("unchecked") V vv = (V)x; |
3149 |
< |
nextValue = vv; |
3150 |
< |
} |
2921 |
> |
else |
2922 |
> |
nextValue = x; |
2923 |
|
break; |
2924 |
|
} |
2925 |
|
} |
2999 |
|
|
3000 |
|
public void forEach(BiConsumer<? super K, ? super V> action) { |
3001 |
|
if (action == null) throw new NullPointerException(); |
3002 |
< |
V v; |
3003 |
< |
for (Node<K,V> n = findFirst(); n != null; n = n.next) { |
3004 |
< |
if ((v = n.getValidValue()) != null) |
3005 |
< |
action.accept(n.key, v); |
3002 |
> |
Node<K,V> b, n; V v; |
3003 |
> |
if ((b = baseHead()) != null) { |
3004 |
> |
while ((n = b.next) != null) { |
3005 |
> |
if ((v = n.val) != null) |
3006 |
> |
action.accept(n.key, v); |
3007 |
> |
b = n; |
3008 |
> |
} |
3009 |
|
} |
3010 |
|
} |
3011 |
|
|
3012 |
|
public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) { |
3013 |
|
if (function == null) throw new NullPointerException(); |
3014 |
< |
V v; |
3015 |
< |
for (Node<K,V> n = findFirst(); n != null; n = n.next) { |
3016 |
< |
while ((v = n.getValidValue()) != null) { |
3017 |
< |
V r = function.apply(n.key, v); |
3018 |
< |
if (r == null) throw new NullPointerException(); |
3019 |
< |
if (n.casValue(v, r)) |
3020 |
< |
break; |
3014 |
> |
Node<K,V> b, n; V v; |
3015 |
> |
if ((b = baseHead()) != null) { |
3016 |
> |
while ((n = b.next) != null) { |
3017 |
> |
while ((v = n.val) != null) { |
3018 |
> |
V r = function.apply(n.key, v); |
3019 |
> |
if (r == null) throw new NullPointerException(); |
3020 |
> |
if (VAL.compareAndSet(n, v, r)) |
3021 |
> |
break; |
3022 |
> |
} |
3023 |
> |
b = n; |
3024 |
|
} |
3025 |
|
} |
3026 |
|
} |
3031 |
|
boolean removeEntryIf(Predicate<? super Entry<K,V>> function) { |
3032 |
|
if (function == null) throw new NullPointerException(); |
3033 |
|
boolean removed = false; |
3034 |
< |
for (Node<K,V> n = findFirst(); n != null; n = n.next) { |
3035 |
< |
V v; |
3036 |
< |
if ((v = n.getValidValue()) != null) { |
3037 |
< |
K k = n.key; |
3038 |
< |
Map.Entry<K,V> e = new AbstractMap.SimpleImmutableEntry<>(k, v); |
3039 |
< |
if (function.test(e) && remove(k, v)) |
3040 |
< |
removed = true; |
3034 |
> |
Node<K,V> b, n; V v; |
3035 |
> |
if ((b = baseHead()) != null) { |
3036 |
> |
while ((n = b.next) != null) { |
3037 |
> |
if ((v = n.val) != null) { |
3038 |
> |
K k = n.key; |
3039 |
> |
Map.Entry<K,V> e = new AbstractMap.SimpleImmutableEntry<>(k, v); |
3040 |
> |
if (function.test(e) && remove(k, v)) |
3041 |
> |
removed = true; |
3042 |
> |
} |
3043 |
> |
b = n; |
3044 |
|
} |
3045 |
|
} |
3046 |
|
return removed; |
3052 |
|
boolean removeValueIf(Predicate<? super V> function) { |
3053 |
|
if (function == null) throw new NullPointerException(); |
3054 |
|
boolean removed = false; |
3055 |
< |
for (Node<K,V> n = findFirst(); n != null; n = n.next) { |
3056 |
< |
V v; |
3057 |
< |
if ((v = n.getValidValue()) != null) { |
3058 |
< |
K k = n.key; |
3278 |
< |
if (function.test(v) && remove(k, v)) |
3055 |
> |
Node<K,V> b, n; V v; |
3056 |
> |
if ((b = baseHead()) != null) { |
3057 |
> |
while ((n = b.next) != null) { |
3058 |
> |
if ((v = n.val) != null && function.test(v) && remove(n.key, v)) |
3059 |
|
removed = true; |
3060 |
+ |
b = n; |
3061 |
|
} |
3062 |
|
} |
3063 |
|
return removed; |
3075 |
|
* off, or the end of row is encountered. Control of the number of |
3076 |
|
* splits relies on some statistical estimation: The expected |
3077 |
|
* remaining number of elements of a skip list when advancing |
3078 |
< |
* either across or down decreases by about 25%. To make this |
3298 |
< |
* observation useful, we need to know initial size, which we |
3299 |
< |
* don't. But we can just use Integer.MAX_VALUE so that we |
3300 |
< |
* don't prematurely zero out while splitting. |
3078 |
> |
* either across or down decreases by about 25%. |
3079 |
|
*/ |
3080 |
|
abstract static class CSLMSpliterator<K,V> { |
3081 |
|
final Comparator<? super K> comparator; |
3082 |
|
final K fence; // exclusive upper bound for keys, or null if to end |
3083 |
|
Index<K,V> row; // the level to split out |
3084 |
|
Node<K,V> current; // current traversal node; initialize at origin |
3085 |
< |
int est; // pseudo-size estimate |
3085 |
> |
long est; // size estimate |
3086 |
|
CSLMSpliterator(Comparator<? super K> comparator, Index<K,V> row, |
3087 |
< |
Node<K,V> origin, K fence, int est) { |
3087 |
> |
Node<K,V> origin, K fence, long est) { |
3088 |
|
this.comparator = comparator; this.row = row; |
3089 |
|
this.current = origin; this.fence = fence; this.est = est; |
3090 |
|
} |
3091 |
|
|
3092 |
< |
public final long estimateSize() { return (long)est; } |
3092 |
> |
public final long estimateSize() { return est; } |
3093 |
|
} |
3094 |
|
|
3095 |
|
static final class KeySpliterator<K,V> extends CSLMSpliterator<K,V> |
3096 |
|
implements Spliterator<K> { |
3097 |
|
KeySpliterator(Comparator<? super K> comparator, Index<K,V> row, |
3098 |
< |
Node<K,V> origin, K fence, int est) { |
3098 |
> |
Node<K,V> origin, K fence, long est) { |
3099 |
|
super(comparator, row, origin, fence, est); |
3100 |
|
} |
3101 |
|
|
3107 |
|
for (Index<K,V> q = row; q != null; q = row = q.down) { |
3108 |
|
Index<K,V> s; Node<K,V> b, n; K sk; |
3109 |
|
if ((s = q.right) != null && (b = s.node) != null && |
3110 |
< |
(n = b.next) != null && n.value != null && |
3110 |
> |
(n = b.next) != null && n.val != null && |
3111 |
|
(sk = n.key) != null && cpr(cmp, sk, ek) > 0 && |
3112 |
|
(f == null || cpr(cmp, sk, f) < 0)) { |
3113 |
|
current = n; |
3128 |
|
Node<K,V> e = current; |
3129 |
|
current = null; |
3130 |
|
for (; e != null; e = e.next) { |
3131 |
< |
K k; Object v; |
3131 |
> |
K k; |
3132 |
|
if ((k = e.key) != null && f != null && cpr(cmp, f, k) <= 0) |
3133 |
|
break; |
3134 |
< |
if ((v = e.value) != null && v != e) |
3134 |
> |
if (e.val != null) |
3135 |
|
action.accept(k); |
3136 |
|
} |
3137 |
|
} |
3142 |
|
K f = fence; |
3143 |
|
Node<K,V> e = current; |
3144 |
|
for (; e != null; e = e.next) { |
3145 |
< |
K k; Object v; |
3145 |
> |
K k; |
3146 |
|
if ((k = e.key) != null && f != null && cpr(cmp, f, k) <= 0) { |
3147 |
|
e = null; |
3148 |
|
break; |
3149 |
|
} |
3150 |
< |
if ((v = e.value) != null && v != e) { |
3150 |
> |
if (e.val != null) { |
3151 |
|
current = e.next; |
3152 |
|
action.accept(k); |
3153 |
|
return true; |
3169 |
|
} |
3170 |
|
// factory method for KeySpliterator |
3171 |
|
final KeySpliterator<K,V> keySpliterator() { |
3172 |
< |
Comparator<? super K> cmp = comparator; |
3173 |
< |
for (;;) { // ensure h corresponds to origin p |
3174 |
< |
HeadIndex<K,V> h; Node<K,V> p; |
3175 |
< |
Node<K,V> b = (h = head).node; |
3176 |
< |
if ((p = b.next) == null || p.value != null) |
3177 |
< |
return new KeySpliterator<K,V>(cmp, h, p, null, (p == null) ? |
3178 |
< |
0 : Integer.MAX_VALUE); |
3179 |
< |
p.helpDelete(b, p.next); |
3172 |
> |
Index<K,V> h; Node<K,V> n; long est; |
3173 |
> |
VarHandle.acquireFence(); |
3174 |
> |
if ((h = head) == null) { |
3175 |
> |
n = null; |
3176 |
> |
est = 0L; |
3177 |
> |
} |
3178 |
> |
else { |
3179 |
> |
n = h.node; |
3180 |
> |
est = getAdderCount(); |
3181 |
|
} |
3182 |
+ |
return new KeySpliterator<K,V>(comparator, h, n, null, est); |
3183 |
|
} |
3184 |
|
|
3185 |
|
static final class ValueSpliterator<K,V> extends CSLMSpliterator<K,V> |
3186 |
|
implements Spliterator<V> { |
3187 |
|
ValueSpliterator(Comparator<? super K> comparator, Index<K,V> row, |
3188 |
< |
Node<K,V> origin, K fence, int est) { |
3188 |
> |
Node<K,V> origin, K fence, long est) { |
3189 |
|
super(comparator, row, origin, fence, est); |
3190 |
|
} |
3191 |
|
|
3197 |
|
for (Index<K,V> q = row; q != null; q = row = q.down) { |
3198 |
|
Index<K,V> s; Node<K,V> b, n; K sk; |
3199 |
|
if ((s = q.right) != null && (b = s.node) != null && |
3200 |
< |
(n = b.next) != null && n.value != null && |
3200 |
> |
(n = b.next) != null && n.val != null && |
3201 |
|
(sk = n.key) != null && cpr(cmp, sk, ek) > 0 && |
3202 |
|
(f == null || cpr(cmp, sk, f) < 0)) { |
3203 |
|
current = n; |
3218 |
|
Node<K,V> e = current; |
3219 |
|
current = null; |
3220 |
|
for (; e != null; e = e.next) { |
3221 |
< |
K k; Object v; |
3221 |
> |
K k; V v; |
3222 |
|
if ((k = e.key) != null && f != null && cpr(cmp, f, k) <= 0) |
3223 |
|
break; |
3224 |
< |
if ((v = e.value) != null && v != e) { |
3225 |
< |
@SuppressWarnings("unchecked") V vv = (V)v; |
3446 |
< |
action.accept(vv); |
3447 |
< |
} |
3224 |
> |
if ((v = e.val) != null) |
3225 |
> |
action.accept(v); |
3226 |
|
} |
3227 |
|
} |
3228 |
|
|
3232 |
|
K f = fence; |
3233 |
|
Node<K,V> e = current; |
3234 |
|
for (; e != null; e = e.next) { |
3235 |
< |
K k; Object v; |
3235 |
> |
K k; V v; |
3236 |
|
if ((k = e.key) != null && f != null && cpr(cmp, f, k) <= 0) { |
3237 |
|
e = null; |
3238 |
|
break; |
3239 |
|
} |
3240 |
< |
if ((v = e.value) != null && v != e) { |
3240 |
> |
if ((v = e.val) != null) { |
3241 |
|
current = e.next; |
3242 |
< |
@SuppressWarnings("unchecked") V vv = (V)v; |
3465 |
< |
action.accept(vv); |
3242 |
> |
action.accept(v); |
3243 |
|
return true; |
3244 |
|
} |
3245 |
|
} |
3255 |
|
|
3256 |
|
// Almost the same as keySpliterator() |
3257 |
|
final ValueSpliterator<K,V> valueSpliterator() { |
3258 |
< |
Comparator<? super K> cmp = comparator; |
3259 |
< |
for (;;) { |
3260 |
< |
HeadIndex<K,V> h; Node<K,V> p; |
3261 |
< |
Node<K,V> b = (h = head).node; |
3262 |
< |
if ((p = b.next) == null || p.value != null) |
3263 |
< |
return new ValueSpliterator<K,V>(cmp, h, p, null, (p == null) ? |
3264 |
< |
0 : Integer.MAX_VALUE); |
3265 |
< |
p.helpDelete(b, p.next); |
3258 |
> |
Index<K,V> h; Node<K,V> n; long est; |
3259 |
> |
VarHandle.acquireFence(); |
3260 |
> |
if ((h = head) == null) { |
3261 |
> |
n = null; |
3262 |
> |
est = 0L; |
3263 |
> |
} |
3264 |
> |
else { |
3265 |
> |
n = h.node; |
3266 |
> |
est = getAdderCount(); |
3267 |
|
} |
3268 |
+ |
return new ValueSpliterator<K,V>(comparator, h, n, null, est); |
3269 |
|
} |
3270 |
|
|
3271 |
|
static final class EntrySpliterator<K,V> extends CSLMSpliterator<K,V> |
3272 |
|
implements Spliterator<Map.Entry<K,V>> { |
3273 |
|
EntrySpliterator(Comparator<? super K> comparator, Index<K,V> row, |
3274 |
< |
Node<K,V> origin, K fence, int est) { |
3274 |
> |
Node<K,V> origin, K fence, long est) { |
3275 |
|
super(comparator, row, origin, fence, est); |
3276 |
|
} |
3277 |
|
|
3283 |
|
for (Index<K,V> q = row; q != null; q = row = q.down) { |
3284 |
|
Index<K,V> s; Node<K,V> b, n; K sk; |
3285 |
|
if ((s = q.right) != null && (b = s.node) != null && |
3286 |
< |
(n = b.next) != null && n.value != null && |
3286 |
> |
(n = b.next) != null && n.val != null && |
3287 |
|
(sk = n.key) != null && cpr(cmp, sk, ek) > 0 && |
3288 |
|
(f == null || cpr(cmp, sk, f) < 0)) { |
3289 |
|
current = n; |
3304 |
|
Node<K,V> e = current; |
3305 |
|
current = null; |
3306 |
|
for (; e != null; e = e.next) { |
3307 |
< |
K k; Object v; |
3307 |
> |
K k; V v; |
3308 |
|
if ((k = e.key) != null && f != null && cpr(cmp, f, k) <= 0) |
3309 |
|
break; |
3310 |
< |
if ((v = e.value) != null && v != e) { |
3532 |
< |
@SuppressWarnings("unchecked") V vv = (V)v; |
3310 |
> |
if ((v = e.val) != null) { |
3311 |
|
action.accept |
3312 |
< |
(new AbstractMap.SimpleImmutableEntry<K,V>(k, vv)); |
3312 |
> |
(new AbstractMap.SimpleImmutableEntry<K,V>(k, v)); |
3313 |
|
} |
3314 |
|
} |
3315 |
|
} |
3320 |
|
K f = fence; |
3321 |
|
Node<K,V> e = current; |
3322 |
|
for (; e != null; e = e.next) { |
3323 |
< |
K k; Object v; |
3323 |
> |
K k; V v; |
3324 |
|
if ((k = e.key) != null && f != null && cpr(cmp, f, k) <= 0) { |
3325 |
|
e = null; |
3326 |
|
break; |
3327 |
|
} |
3328 |
< |
if ((v = e.value) != null && v != e) { |
3328 |
> |
if ((v = e.val) != null) { |
3329 |
|
current = e.next; |
3552 |
– |
@SuppressWarnings("unchecked") V vv = (V)v; |
3330 |
|
action.accept |
3331 |
< |
(new AbstractMap.SimpleImmutableEntry<K,V>(k, vv)); |
3331 |
> |
(new AbstractMap.SimpleImmutableEntry<K,V>(k, v)); |
3332 |
|
return true; |
3333 |
|
} |
3334 |
|
} |
3359 |
|
|
3360 |
|
// Almost the same as keySpliterator() |
3361 |
|
final EntrySpliterator<K,V> entrySpliterator() { |
3362 |
< |
Comparator<? super K> cmp = comparator; |
3363 |
< |
for (;;) { // almost same as key version |
3364 |
< |
HeadIndex<K,V> h; Node<K,V> p; |
3365 |
< |
Node<K,V> b = (h = head).node; |
3366 |
< |
if ((p = b.next) == null || p.value != null) |
3367 |
< |
return new EntrySpliterator<K,V>(cmp, h, p, null, (p == null) ? |
3368 |
< |
0 : Integer.MAX_VALUE); |
3369 |
< |
p.helpDelete(b, p.next); |
3362 |
> |
Index<K,V> h; Node<K,V> n; long est; |
3363 |
> |
VarHandle.acquireFence(); |
3364 |
> |
if ((h = head) == null) { |
3365 |
> |
n = null; |
3366 |
> |
est = 0L; |
3367 |
> |
} |
3368 |
> |
else { |
3369 |
> |
n = h.node; |
3370 |
> |
est = getAdderCount(); |
3371 |
|
} |
3372 |
+ |
return new EntrySpliterator<K,V>(comparator, h, n, null, est); |
3373 |
|
} |
3374 |
|
|
3375 |
|
// VarHandle mechanics |
3376 |
|
private static final VarHandle HEAD; |
3377 |
+ |
private static final VarHandle ADDER; |
3378 |
+ |
private static final VarHandle NEXT; |
3379 |
+ |
private static final VarHandle VAL; |
3380 |
+ |
private static final VarHandle RIGHT; |
3381 |
|
static { |
3382 |
|
try { |
3383 |
|
MethodHandles.Lookup l = MethodHandles.lookup(); |
3384 |
|
HEAD = l.findVarHandle(ConcurrentSkipListMap.class, "head", |
3385 |
< |
HeadIndex.class); |
3385 |
> |
Index.class); |
3386 |
> |
ADDER = l.findVarHandle(ConcurrentSkipListMap.class, "adder", |
3387 |
> |
LongAdder.class); |
3388 |
> |
NEXT = l.findVarHandle(Node.class, "next", Node.class); |
3389 |
> |
VAL = l.findVarHandle(Node.class, "val", Object.class); |
3390 |
> |
RIGHT = l.findVarHandle(Index.class, "right", Index.class); |
3391 |
|
} catch (ReflectiveOperationException e) { |
3392 |
|
throw new Error(e); |
3393 |
|
} |