ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/jsr166e/ConcurrentHashMapV8.java
Revision: 1.41
Committed: Tue Jul 3 23:25:57 2012 UTC (11 years, 10 months ago) by dl
Branch: MAIN
Changes since 1.40: +545 -462 lines
Log Message:
Support Spliterable; enable removal via compute methods

File Contents

# Content
1 /*
2 * Written by Doug Lea with assistance from members of JCP JSR-166
3 * Expert Group and released to the public domain, as explained at
4 * http://creativecommons.org/publicdomain/zero/1.0/
5 */
6
7 package jsr166e;
8 import jsr166e.LongAdder;
9 import java.util.Arrays;
10 import java.util.Map;
11 import java.util.Set;
12 import java.util.Collection;
13 import java.util.AbstractMap;
14 import java.util.AbstractSet;
15 import java.util.AbstractCollection;
16 import java.util.Hashtable;
17 import java.util.HashMap;
18 import java.util.Iterator;
19 import java.util.Enumeration;
20 import java.util.ConcurrentModificationException;
21 import java.util.NoSuchElementException;
22 import java.util.concurrent.ConcurrentMap;
23 import java.util.concurrent.ThreadLocalRandom;
24 import java.util.concurrent.locks.LockSupport;
25 import java.util.concurrent.locks.AbstractQueuedSynchronizer;
26 import java.io.Serializable;
27
28 /**
29 * A hash table supporting full concurrency of retrievals and
30 * high expected concurrency for updates. This class obeys the
31 * same functional specification as {@link java.util.Hashtable}, and
32 * includes versions of methods corresponding to each method of
33 * {@code Hashtable}. However, even though all operations are
34 * thread-safe, retrieval operations do <em>not</em> entail locking,
35 * and there is <em>not</em> any support for locking the entire table
36 * in a way that prevents all access. This class is fully
37 * interoperable with {@code Hashtable} in programs that rely on its
38 * thread safety but not on its synchronization details.
39 *
40 * <p> Retrieval operations (including {@code get}) generally do not
41 * block, so may overlap with update operations (including {@code put}
42 * and {@code remove}). Retrievals reflect the results of the most
43 * recently <em>completed</em> update operations holding upon their
44 * onset. For aggregate operations such as {@code putAll} and {@code
45 * clear}, concurrent retrievals may reflect insertion or removal of
46 * only some entries. Similarly, Iterators and Enumerations return
47 * elements reflecting the state of the hash table at some point at or
48 * since the creation of the iterator/enumeration. They do
49 * <em>not</em> throw {@link ConcurrentModificationException}.
50 * However, iterators are designed to be used by only one thread at a
51 * time. Bear in mind that the results of aggregate status methods
52 * including {@code size}, {@code isEmpty}, and {@code containsValue}
53 * are typically useful only when a map is not undergoing concurrent
54 * updates in other threads. Otherwise the results of these methods
55 * reflect transient states that may be adequate for monitoring
56 * or estimation purposes, but not for program control.
57 *
58 * <p> The table is dynamically expanded when there are too many
59 * collisions (i.e., keys that have distinct hash codes but fall into
60 * the same slot modulo the table size), with the expected average
61 * effect of maintaining roughly two bins per mapping (corresponding
62 * to a 0.75 load factor threshold for resizing). There may be much
63 * variance around this average as mappings are added and removed, but
64 * overall, this maintains a commonly accepted time/space tradeoff for
65 * hash tables. However, resizing this or any other kind of hash
66 * table may be a relatively slow operation. When possible, it is a
67 * good idea to provide a size estimate as an optional {@code
68 * initialCapacity} constructor argument. An additional optional
69 * {@code loadFactor} constructor argument provides a further means of
70 * customizing initial table capacity by specifying the table density
71 * to be used in calculating the amount of space to allocate for the
72 * given number of elements. Also, for compatibility with previous
73 * versions of this class, constructors may optionally specify an
74 * expected {@code concurrencyLevel} as an additional hint for
75 * internal sizing. Note that using many keys with exactly the same
76 * {@code hashCode()} is a sure way to slow down performance of any
77 * hash table.
78 *
79 * <p>This class and its views and iterators implement all of the
80 * <em>optional</em> methods of the {@link Map} and {@link Iterator}
81 * interfaces.
82 *
83 * <p> Like {@link Hashtable} but unlike {@link HashMap}, this class
84 * does <em>not</em> allow {@code null} to be used as a key or value.
85 *
86 * <p>This class is a member of the
87 * <a href="{@docRoot}/../technotes/guides/collections/index.html">
88 * Java Collections Framework</a>.
89 *
90 * <p><em>jsr166e note: This class is a candidate replacement for
91 * java.util.concurrent.ConcurrentHashMap.<em>
92 *
93 * @since 1.5
94 * @author Doug Lea
95 * @param <K> the type of keys maintained by this map
96 * @param <V> the type of mapped values
97 */
98 public class ConcurrentHashMapV8<K, V>
99 implements ConcurrentMap<K, V>, Serializable {
100 private static final long serialVersionUID = 7249069246763182397L;
101
102 /**
103 * A function computing a mapping from the given key to a value.
104 * This is a place-holder for an upcoming JDK8 interface.
105 */
106 public static interface MappingFunction<K, V> {
107 /**
108 * Returns a value for the given key, or null if there is no mapping
109 *
110 * @param key the (non-null) key
111 * @return a value for the key, or null if none
112 */
113 V map(K key);
114 }
115
116 /**
117 * A function computing a new mapping given a key and its current
118 * mapped value (or {@code null} if there is no current
119 * mapping). This is a place-holder for an upcoming JDK8
120 * interface.
121 */
122 public static interface RemappingFunction<K, V> {
123 /**
124 * Returns a new value given a key and its current value.
125 *
126 * @param key the (non-null) key
127 * @param value the current value, or null if there is no mapping
128 * @return a value for the key, or null if none
129 */
130 V remap(K key, V value);
131 }
132
133 /**
134 * A partitionable iterator. A Spliterator can be traversed
135 * directly, but can also be partitioned (before traversal) by
136 * creating another Spliterator that covers a non-overlapping
137 * portion of the elements, and so may be amenable to parallel
138 * execution.
139 *
140 * <p> This interface exports a subset of expected JDK8
141 * functionality.
142 *
143 * <p>Sample usage: Here is one (of the several) ways to compute
144 * the sum of the values held in a map using the ForkJoin
145 * framework. As illustrated here, Spliterators are well suited to
146 * designs in which a task repeatedly splits off half its work
147 * into forked subtasks until small enough to process directly,
148 * and then joins these subtasks. Variants of this style can be
149 * also be used in completion-based designs.
150 *
151 * <pre>
152 * {@code ConcurrentHashMapV8<String, Long> m = ...
153 * // Uses parallel depth of log2 of size / (parallelism * slack of 8).
154 * int depth = 32 - Integer.numberOfLeadingZeros(m.size() / (aForkJoinPool.getParallelism() * 8));
155 * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), depth, null));
156 * // ...
157 * static class SumValues extends RecursiveTask<Long> {
158 * final Spliterator<Long> s;
159 * final int depth; // number of splits before processing
160 * final SumValues nextJoin; // records forked subtasks to join
161 * SumValues(Spliterator<Long> s, int depth, SumValues nextJoin) {
162 * this.s = s; this.depth = depth; this.nextJoin = nextJoin;
163 * }
164 * public Long compute() {
165 * long sum = 0;
166 * SumValues subtasks = null; // fork subtasks
167 * for (int d = depth - 1; d >= 0; --d)
168 * (subtasks = new SumValues(s.split(), d, subtasks)).fork();
169 * while (s.hasNext()) // directly process remaining elements
170 * sum += s.next();
171 * for (SumValues t = subtasks; t != null; t = t.nextJoin)
172 * sum += t.join(); // collect subtask results
173 * return sum;
174 * }
175 * }
176 * }</pre>
177 */
178 public static interface Spliterator<T> extends Iterator<T> {
179 /**
180 * Returns a Spliterator covering approximately half of the
181 * elements, guaranteed not to overlap with those subsequently
182 * returned by this Spliterator. After invoking this method,
183 * the current Spliterator will <em>not</em> produce any of
184 * the elements of the returned Spliterator, but the two
185 * Spliterators together will produce all of the elements that
186 * would have been produced by this Spliterator had this
187 * method not been called. The exact number of elements
188 * produced by the returned Spliterator is not guaranteed, and
189 * may be zero (i.e., with {@code hasNext()} reporting {@code
190 * false}) if this Spliterator cannot be further split.
191 *
192 * @return a Spliterator covering approximately half of the
193 * elements
194 * @throws IllegalStateException if this Spliterator has
195 * already commenced traversing elements.
196 */
197 Spliterator<T> split();
198
199 /**
200 * Returns a Spliterator producing the same elements as this
201 * Spliterator. This method may be used for example to create
202 * a second Spliterator before a traversal, in order to later
203 * perform a second traversal.
204 *
205 * @return a Spliterator covering the same range as this Spliterator.
206 * @throws IllegalStateException if this Spliterator has
207 * already commenced traversing elements.
208 */
209 Spliterator<T> clone();
210 }
211
212 /*
213 * Overview:
214 *
215 * The primary design goal of this hash table is to maintain
216 * concurrent readability (typically method get(), but also
217 * iterators and related methods) while minimizing update
218 * contention. Secondary goals are to keep space consumption about
219 * the same or better than java.util.HashMap, and to support high
220 * initial insertion rates on an empty table by many threads.
221 *
222 * Each key-value mapping is held in a Node. Because Node fields
223 * can contain special values, they are defined using plain Object
224 * types. Similarly in turn, all internal methods that use them
225 * work off Object types. And similarly, so do the internal
226 * methods of auxiliary iterator and view classes. All public
227 * generic typed methods relay in/out of these internal methods,
228 * supplying null-checks and casts as needed. This also allows
229 * many of the public methods to be factored into a smaller number
230 * of internal methods (although sadly not so for the five
231 * variants of put-related operations). The validation-based
232 * approach explained below leads to a lot of code sprawl because
233 * retry-control precludes factoring into smaller methods.
234 *
235 * The table is lazily initialized to a power-of-two size upon the
236 * first insertion. Each bin in the table normally contains a
237 * list of Nodes (most often, the list has only zero or one Node).
238 * Table accesses require volatile/atomic reads, writes, and
239 * CASes. Because there is no other way to arrange this without
240 * adding further indirections, we use intrinsics
241 * (sun.misc.Unsafe) operations. The lists of nodes within bins
242 * are always accurately traversable under volatile reads, so long
243 * as lookups check hash code and non-nullness of value before
244 * checking key equality.
245 *
246 * We use the top two bits of Node hash fields for control
247 * purposes -- they are available anyway because of addressing
248 * constraints. As explained further below, these top bits are
249 * used as follows:
250 * 00 - Normal
251 * 01 - Locked
252 * 11 - Locked and may have a thread waiting for lock
253 * 10 - Node is a forwarding node
254 *
255 * The lower 30 bits of each Node's hash field contain a
256 * transformation of the key's hash code, except for forwarding
257 * nodes, for which the lower bits are zero (and so always have
258 * hash field == MOVED).
259 *
260 * Insertion (via put or its variants) of the first node in an
261 * empty bin is performed by just CASing it to the bin. This is
262 * by far the most common case for put operations under most
263 * key/hash distributions. Other update operations (insert,
264 * delete, and replace) require locks. We do not want to waste
265 * the space required to associate a distinct lock object with
266 * each bin, so instead use the first node of a bin list itself as
267 * a lock. Blocking support for these locks relies on the builtin
268 * "synchronized" monitors. However, we also need a tryLock
269 * construction, so we overlay these by using bits of the Node
270 * hash field for lock control (see above), and so normally use
271 * builtin monitors only for blocking and signalling using
272 * wait/notifyAll constructions. See Node.tryAwaitLock.
273 *
274 * Using the first node of a list as a lock does not by itself
275 * suffice though: When a node is locked, any update must first
276 * validate that it is still the first node after locking it, and
277 * retry if not. Because new nodes are always appended to lists,
278 * once a node is first in a bin, it remains first until deleted
279 * or the bin becomes invalidated (upon resizing). However,
280 * operations that only conditionally update may inspect nodes
281 * until the point of update. This is a converse of sorts to the
282 * lazy locking technique described by Herlihy & Shavit.
283 *
284 * The main disadvantage of per-bin locks is that other update
285 * operations on other nodes in a bin list protected by the same
286 * lock can stall, for example when user equals() or mapping
287 * functions take a long time. However, statistically, under
288 * random hash codes, this is not a common problem. Ideally, the
289 * frequency of nodes in bins follows a Poisson distribution
290 * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
291 * parameter of about 0.5 on average, given the resizing threshold
292 * of 0.75, although with a large variance because of resizing
293 * granularity. Ignoring variance, the expected occurrences of
294 * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
295 * first values are:
296 *
297 * 0: 0.60653066
298 * 1: 0.30326533
299 * 2: 0.07581633
300 * 3: 0.01263606
301 * 4: 0.00157952
302 * 5: 0.00015795
303 * 6: 0.00001316
304 * 7: 0.00000094
305 * 8: 0.00000006
306 * more: less than 1 in ten million
307 *
308 * Lock contention probability for two threads accessing distinct
309 * elements is roughly 1 / (8 * #elements) under random hashes.
310 *
311 * Actual hash code distributions encountered in practice
312 * sometimes deviate significantly from uniform randomness. This
313 * includes the case when N > (1<<30), so some keys MUST collide.
314 * Similarly for dumb or hostile usages in which multiple keys are
315 * designed to have identical hash codes. Also, although we guard
316 * against the worst effects of this (see method spread), sets of
317 * hashes may differ only in bits that do not impact their bin
318 * index for a given power-of-two mask. So we use a secondary
319 * strategy that applies when the number of nodes in a bin exceeds
320 * a threshold, and at least one of the keys implements
321 * Comparable. These TreeBins use a balanced tree to hold nodes
322 * (a specialized form of red-black trees), bounding search time
323 * to O(log N). Each search step in a TreeBin is around twice as
324 * slow as in a regular list, but given that N cannot exceed
325 * (1<<64) (before running out of addresses) this bounds search
326 * steps, lock hold times, etc, to reasonable constants (roughly
327 * 100 nodes inspected per operation worst case) so long as keys
328 * are Comparable (which is very common -- String, Long, etc).
329 * TreeBin nodes (TreeNodes) also maintain the same "next"
330 * traversal pointers as regular nodes, so can be traversed in
331 * iterators in the same way.
332 *
333 * The table is resized when occupancy exceeds a percentage
334 * threshold (nominally, 0.75, but see below). Only a single
335 * thread performs the resize (using field "sizeCtl", to arrange
336 * exclusion), but the table otherwise remains usable for reads
337 * and updates. Resizing proceeds by transferring bins, one by
338 * one, from the table to the next table. Because we are using
339 * power-of-two expansion, the elements from each bin must either
340 * stay at same index, or move with a power of two offset. We
341 * eliminate unnecessary node creation by catching cases where old
342 * nodes can be reused because their next fields won't change. On
343 * average, only about one-sixth of them need cloning when a table
344 * doubles. The nodes they replace will be garbage collectable as
345 * soon as they are no longer referenced by any reader thread that
346 * may be in the midst of concurrently traversing table. Upon
347 * transfer, the old table bin contains only a special forwarding
348 * node (with hash field "MOVED") that contains the next table as
349 * its key. On encountering a forwarding node, access and update
350 * operations restart, using the new table.
351 *
352 * Each bin transfer requires its bin lock. However, unlike other
353 * cases, a transfer can skip a bin if it fails to acquire its
354 * lock, and revisit it later (unless it is a TreeBin). Method
355 * rebuild maintains a buffer of TRANSFER_BUFFER_SIZE bins that
356 * have been skipped because of failure to acquire a lock, and
357 * blocks only if none are available (i.e., only very rarely).
358 * The transfer operation must also ensure that all accessible
359 * bins in both the old and new table are usable by any traversal.
360 * When there are no lock acquisition failures, this is arranged
361 * simply by proceeding from the last bin (table.length - 1) up
362 * towards the first. Upon seeing a forwarding node, traversals
363 * (see class InternalIterator) arrange to move to the new table
364 * without revisiting nodes. However, when any node is skipped
365 * during a transfer, all earlier table bins may have become
366 * visible, so are initialized with a reverse-forwarding node back
367 * to the old table until the new ones are established. (This
368 * sometimes requires transiently locking a forwarding node, which
369 * is possible under the above encoding.) These more expensive
370 * mechanics trigger only when necessary.
371 *
372 * The traversal scheme also applies to partial traversals of
373 * ranges of bins (via an alternate InternalIterator constructor)
374 * to support partitioned aggregate operations. Also, read-only
375 * operations give up if ever forwarded to a null table, which
376 * provides support for shutdown-style clearing, which is also not
377 * currently implemented.
378 *
379 * Lazy table initialization minimizes footprint until first use,
380 * and also avoids resizings when the first operation is from a
381 * putAll, constructor with map argument, or deserialization.
382 * These cases attempt to override the initial capacity settings,
383 * but harmlessly fail to take effect in cases of races.
384 *
385 * The element count is maintained using a LongAdder, which avoids
386 * contention on updates but can encounter cache thrashing if read
387 * too frequently during concurrent access. To avoid reading so
388 * often, resizing is attempted either when a bin lock is
389 * contended, or upon adding to a bin already holding two or more
390 * nodes (checked before adding in the xIfAbsent methods, after
391 * adding in others). Under uniform hash distributions, the
392 * probability of this occurring at threshold is around 13%,
393 * meaning that only about 1 in 8 puts check threshold (and after
394 * resizing, many fewer do so). But this approximation has high
395 * variance for small table sizes, so we check on any collision
396 * for sizes <= 64. The bulk putAll operation further reduces
397 * contention by only committing count updates upon these size
398 * checks.
399 *
400 * Maintaining API and serialization compatibility with previous
401 * versions of this class introduces several oddities. Mainly: We
402 * leave untouched but unused constructor arguments refering to
403 * concurrencyLevel. We accept a loadFactor constructor argument,
404 * but apply it only to initial table capacity (which is the only
405 * time that we can guarantee to honor it.) We also declare an
406 * unused "Segment" class that is instantiated in minimal form
407 * only when serializing.
408 */
409
410 /* ---------------- Constants -------------- */
411
412 /**
413 * The largest possible table capacity. This value must be
414 * exactly 1<<30 to stay within Java array allocation and indexing
415 * bounds for power of two table sizes, and is further required
416 * because the top two bits of 32bit hash fields are used for
417 * control purposes.
418 */
419 private static final int MAXIMUM_CAPACITY = 1 << 30;
420
421 /**
422 * The default initial table capacity. Must be a power of 2
423 * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
424 */
425 private static final int DEFAULT_CAPACITY = 16;
426
427 /**
428 * The largest possible (non-power of two) array size.
429 * Needed by toArray and related methods.
430 */
431 static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
432
433 /**
434 * The default concurrency level for this table. Unused but
435 * defined for compatibility with previous versions of this class.
436 */
437 private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
438
439 /**
440 * The load factor for this table. Overrides of this value in
441 * constructors affect only the initial table capacity. The
442 * actual floating point value isn't normally used -- it is
443 * simpler to use expressions such as {@code n - (n >>> 2)} for
444 * the associated resizing threshold.
445 */
446 private static final float LOAD_FACTOR = 0.75f;
447
448 /**
449 * The buffer size for skipped bins during transfers. The
450 * value is arbitrary but should be large enough to avoid
451 * most locking stalls during resizes.
452 */
453 private static final int TRANSFER_BUFFER_SIZE = 32;
454
455 /**
456 * The bin count threshold for using a tree rather than list for a
457 * bin. The value reflects the approximate break-even point for
458 * using tree-based operations.
459 */
460 private static final int TREE_THRESHOLD = 8;
461
462 /*
463 * Encodings for special uses of Node hash fields. See above for
464 * explanation.
465 */
466 static final int MOVED = 0x80000000; // hash field for forwarding nodes
467 static final int LOCKED = 0x40000000; // set/tested only as a bit
468 static final int WAITING = 0xc0000000; // both bits set/tested together
469 static final int HASH_BITS = 0x3fffffff; // usable bits of normal node hash
470
471 /* ---------------- Fields -------------- */
472
473 /**
474 * The array of bins. Lazily initialized upon first insertion.
475 * Size is always a power of two. Accessed directly by iterators.
476 */
477 transient volatile Node[] table;
478
479 /**
480 * The counter maintaining number of elements.
481 */
482 private transient final LongAdder counter;
483
484 /**
485 * Table initialization and resizing control. When negative, the
486 * table is being initialized or resized. Otherwise, when table is
487 * null, holds the initial table size to use upon creation, or 0
488 * for default. After initialization, holds the next element count
489 * value upon which to resize the table.
490 */
491 private transient volatile int sizeCtl;
492
493 // views
494 private transient KeySet<K,V> keySet;
495 private transient Values<K,V> values;
496 private transient EntrySet<K,V> entrySet;
497
498 /** For serialization compatibility. Null unless serialized; see below */
499 private Segment<K,V>[] segments;
500
501 /* ---------------- Table element access -------------- */
502
503 /*
504 * Volatile access methods are used for table elements as well as
505 * elements of in-progress next table while resizing. Uses are
506 * null checked by callers, and implicitly bounds-checked, relying
507 * on the invariants that tab arrays have non-zero size, and all
508 * indices are masked with (tab.length - 1) which is never
509 * negative and always less than length. Note that, to be correct
510 * wrt arbitrary concurrency errors by users, bounds checks must
511 * operate on local variables, which accounts for some odd-looking
512 * inline assignments below.
513 */
514
515 static final Node tabAt(Node[] tab, int i) { // used by InternalIterator
516 return (Node)UNSAFE.getObjectVolatile(tab, ((long)i<<ASHIFT)+ABASE);
517 }
518
519 private static final boolean casTabAt(Node[] tab, int i, Node c, Node v) {
520 return UNSAFE.compareAndSwapObject(tab, ((long)i<<ASHIFT)+ABASE, c, v);
521 }
522
523 private static final void setTabAt(Node[] tab, int i, Node v) {
524 UNSAFE.putObjectVolatile(tab, ((long)i<<ASHIFT)+ABASE, v);
525 }
526
527 /* ---------------- Nodes -------------- */
528
529 /**
530 * Key-value entry. Note that this is never exported out as a
531 * user-visible Map.Entry (see MapEntry below). Nodes with a hash
532 * field of MOVED are special, and do not contain user keys or
533 * values. Otherwise, keys are never null, and null val fields
534 * indicate that a node is in the process of being deleted or
535 * created. For purposes of read-only access, a key may be read
536 * before a val, but can only be used after checking val to be
537 * non-null.
538 */
539 static class Node {
540 volatile int hash;
541 final Object key;
542 volatile Object val;
543 volatile Node next;
544
545 Node(int hash, Object key, Object val, Node next) {
546 this.hash = hash;
547 this.key = key;
548 this.val = val;
549 this.next = next;
550 }
551
552 /** CompareAndSet the hash field */
553 final boolean casHash(int cmp, int val) {
554 return UNSAFE.compareAndSwapInt(this, hashOffset, cmp, val);
555 }
556
557 /** The number of spins before blocking for a lock */
558 static final int MAX_SPINS =
559 Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1;
560
561 /**
562 * Spins a while if LOCKED bit set and this node is the first
563 * of its bin, and then sets WAITING bits on hash field and
564 * blocks (once) if they are still set. It is OK for this
565 * method to return even if lock is not available upon exit,
566 * which enables these simple single-wait mechanics.
567 *
568 * The corresponding signalling operation is performed within
569 * callers: Upon detecting that WAITING has been set when
570 * unlocking lock (via a failed CAS from non-waiting LOCKED
571 * state), unlockers acquire the sync lock and perform a
572 * notifyAll.
573 */
574 final void tryAwaitLock(Node[] tab, int i) {
575 if (tab != null && i >= 0 && i < tab.length) { // bounds check
576 int r = ThreadLocalRandom.current().nextInt(); // randomize spins
577 int spins = MAX_SPINS, h;
578 while (tabAt(tab, i) == this && ((h = hash) & LOCKED) != 0) {
579 if (spins >= 0) {
580 r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift
581 if (r >= 0 && --spins == 0)
582 Thread.yield(); // yield before block
583 }
584 else if (casHash(h, h | WAITING)) {
585 synchronized (this) {
586 if (tabAt(tab, i) == this &&
587 (hash & WAITING) == WAITING) {
588 try {
589 wait();
590 } catch (InterruptedException ie) {
591 Thread.currentThread().interrupt();
592 }
593 }
594 else
595 notifyAll(); // possibly won race vs signaller
596 }
597 break;
598 }
599 }
600 }
601 }
602
603 // Unsafe mechanics for casHash
604 private static final sun.misc.Unsafe UNSAFE;
605 private static final long hashOffset;
606
607 static {
608 try {
609 UNSAFE = getUnsafe();
610 Class<?> k = Node.class;
611 hashOffset = UNSAFE.objectFieldOffset
612 (k.getDeclaredField("hash"));
613 } catch (Exception e) {
614 throw new Error(e);
615 }
616 }
617 }
618
619 /* ---------------- TreeBins -------------- */
620
621 /**
622 * Nodes for use in TreeBins
623 */
624 static final class TreeNode extends Node {
625 TreeNode parent; // red-black tree links
626 TreeNode left;
627 TreeNode right;
628 TreeNode prev; // needed to unlink next upon deletion
629 boolean red;
630
631 TreeNode(int hash, Object key, Object val, Node next, TreeNode parent) {
632 super(hash, key, val, next);
633 this.parent = parent;
634 }
635 }
636
637 /**
638 * A specialized form of red-black tree for use in bins
639 * whose size exceeds a threshold.
640 *
641 * TreeBins use a special form of comparison for search and
642 * related operations (which is the main reason we cannot use
643 * existing collections such as TreeMaps). TreeBins contain
644 * Comparable elements, but may contain others, as well as
645 * elements that are Comparable but not necessarily Comparable<T>
646 * for the same T, so we cannot invoke compareTo among them. To
647 * handle this, the tree is ordered primarily by hash value, then
648 * by getClass().getName() order, and then by Comparator order
649 * among elements of the same class. On lookup at a node, if
650 * elements are not comparable or compare as 0, both left and
651 * right children may need to be searched in the case of tied hash
652 * values. (This corresponds to the full list search that would be
653 * necessary if all elements were non-Comparable and had tied
654 * hashes.) The red-black balancing code is updated from
655 * pre-jdk-collections
656 * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
657 * based in turn on Cormen, Leiserson, and Rivest "Introduction to
658 * Algorithms" (CLR).
659 *
660 * TreeBins also maintain a separate locking discipline than
661 * regular bins. Because they are forwarded via special MOVED
662 * nodes at bin heads (which can never change once established),
663 * we cannot use use those nodes as locks. Instead, TreeBin
664 * extends AbstractQueuedSynchronizer to support a simple form of
665 * read-write lock. For update operations and table validation,
666 * the exclusive form of lock behaves in the same way as bin-head
667 * locks. However, lookups use shared read-lock mechanics to allow
668 * multiple readers in the absence of writers. Additionally,
669 * these lookups do not ever block: While the lock is not
670 * available, they proceed along the slow traversal path (via
671 * next-pointers) until the lock becomes available or the list is
672 * exhausted, whichever comes first. (These cases are not fast,
673 * but maximize aggregate expected throughput.) The AQS mechanics
674 * for doing this are straightforward. The lock state is held as
675 * AQS getState(). Read counts are negative; the write count (1)
676 * is positive. There are no signalling preferences among readers
677 * and writers. Since we don't need to export full Lock API, we
678 * just override the minimal AQS methods and use them directly.
679 */
680 static final class TreeBin extends AbstractQueuedSynchronizer {
681 private static final long serialVersionUID = 2249069246763182397L;
682 transient TreeNode root; // root of tree
683 transient TreeNode first; // head of next-pointer list
684
685 /* AQS overrides */
686 public final boolean isHeldExclusively() { return getState() > 0; }
687 public final boolean tryAcquire(int ignore) {
688 if (compareAndSetState(0, 1)) {
689 setExclusiveOwnerThread(Thread.currentThread());
690 return true;
691 }
692 return false;
693 }
694 public final boolean tryRelease(int ignore) {
695 setExclusiveOwnerThread(null);
696 setState(0);
697 return true;
698 }
699 public final int tryAcquireShared(int ignore) {
700 for (int c;;) {
701 if ((c = getState()) > 0)
702 return -1;
703 if (compareAndSetState(c, c -1))
704 return 1;
705 }
706 }
707 public final boolean tryReleaseShared(int ignore) {
708 int c;
709 do {} while (!compareAndSetState(c = getState(), c + 1));
710 return c == -1;
711 }
712
713 /** From CLR */
714 private void rotateLeft(TreeNode p) {
715 if (p != null) {
716 TreeNode r = p.right, pp, rl;
717 if ((rl = p.right = r.left) != null)
718 rl.parent = p;
719 if ((pp = r.parent = p.parent) == null)
720 root = r;
721 else if (pp.left == p)
722 pp.left = r;
723 else
724 pp.right = r;
725 r.left = p;
726 p.parent = r;
727 }
728 }
729
730 /** From CLR */
731 private void rotateRight(TreeNode p) {
732 if (p != null) {
733 TreeNode l = p.left, pp, lr;
734 if ((lr = p.left = l.right) != null)
735 lr.parent = p;
736 if ((pp = l.parent = p.parent) == null)
737 root = l;
738 else if (pp.right == p)
739 pp.right = l;
740 else
741 pp.left = l;
742 l.right = p;
743 p.parent = l;
744 }
745 }
746
747 /**
748 * Return the TreeNode (or null if not found) for the given key
749 * starting at given root.
750 */
751 @SuppressWarnings("unchecked") // suppress Comparable cast warning
752 final TreeNode getTreeNode(int h, Object k, TreeNode p) {
753 Class<?> c = k.getClass();
754 while (p != null) {
755 int dir, ph; Object pk; Class<?> pc;
756 if ((ph = p.hash) == h) {
757 if ((pk = p.key) == k || k.equals(pk))
758 return p;
759 if (c != (pc = pk.getClass()) ||
760 !(k instanceof Comparable) ||
761 (dir = ((Comparable)k).compareTo((Comparable)pk)) == 0) {
762 dir = (c == pc)? 0 : c.getName().compareTo(pc.getName());
763 TreeNode r = null, s = null, pl, pr;
764 if (dir >= 0) {
765 if ((pl = p.left) != null && h <= pl.hash)
766 s = pl;
767 }
768 else if ((pr = p.right) != null && h >= pr.hash)
769 s = pr;
770 if (s != null && (r = getTreeNode(h, k, s)) != null)
771 return r;
772 }
773 }
774 else
775 dir = (h < ph) ? -1 : 1;
776 p = (dir > 0) ? p.right : p.left;
777 }
778 return null;
779 }
780
781 /**
782 * Wrapper for getTreeNode used by CHM.get. Tries to obtain
783 * read-lock to call getTreeNode, but during failure to get
784 * lock, searches along next links.
785 */
786 final Object getValue(int h, Object k) {
787 Node r = null;
788 int c = getState(); // Must read lock state first
789 for (Node e = first; e != null; e = e.next) {
790 if (c <= 0 && compareAndSetState(c, c - 1)) {
791 try {
792 r = getTreeNode(h, k, root);
793 } finally {
794 releaseShared(0);
795 }
796 break;
797 }
798 else if ((e.hash & HASH_BITS) == h && k.equals(e.key)) {
799 r = e;
800 break;
801 }
802 else
803 c = getState();
804 }
805 return r == null ? null : r.val;
806 }
807
808 /**
809 * Find or add a node
810 * @return null if added
811 */
812 @SuppressWarnings("unchecked") // suppress Comparable cast warning
813 final TreeNode putTreeNode(int h, Object k, Object v) {
814 Class<?> c = k.getClass();
815 TreeNode pp = root, p = null;
816 int dir = 0;
817 while (pp != null) { // find existing node or leaf to insert at
818 int ph; Object pk; Class<?> pc;
819 p = pp;
820 if ((ph = p.hash) == h) {
821 if ((pk = p.key) == k || k.equals(pk))
822 return p;
823 if (c != (pc = pk.getClass()) ||
824 !(k instanceof Comparable) ||
825 (dir = ((Comparable)k).compareTo((Comparable)pk)) == 0) {
826 dir = (c == pc)? 0 : c.getName().compareTo(pc.getName());
827 TreeNode r = null, s = null, pl, pr;
828 if (dir >= 0) {
829 if ((pl = p.left) != null && h <= pl.hash)
830 s = pl;
831 }
832 else if ((pr = p.right) != null && h >= pr.hash)
833 s = pr;
834 if (s != null && (r = getTreeNode(h, k, s)) != null)
835 return r;
836 }
837 }
838 else
839 dir = (h < ph) ? -1 : 1;
840 pp = (dir > 0) ? p.right : p.left;
841 }
842
843 TreeNode f = first;
844 TreeNode x = first = new TreeNode(h, k, v, f, p);
845 if (p == null)
846 root = x;
847 else { // attach and rebalance; adapted from CLR
848 TreeNode xp, xpp;
849 if (f != null)
850 f.prev = x;
851 if (dir <= 0)
852 p.left = x;
853 else
854 p.right = x;
855 x.red = true;
856 while (x != null && (xp = x.parent) != null && xp.red &&
857 (xpp = xp.parent) != null) {
858 TreeNode xppl = xpp.left;
859 if (xp == xppl) {
860 TreeNode y = xpp.right;
861 if (y != null && y.red) {
862 y.red = false;
863 xp.red = false;
864 xpp.red = true;
865 x = xpp;
866 }
867 else {
868 if (x == xp.right) {
869 rotateLeft(x = xp);
870 xpp = (xp = x.parent) == null ? null : xp.parent;
871 }
872 if (xp != null) {
873 xp.red = false;
874 if (xpp != null) {
875 xpp.red = true;
876 rotateRight(xpp);
877 }
878 }
879 }
880 }
881 else {
882 TreeNode y = xppl;
883 if (y != null && y.red) {
884 y.red = false;
885 xp.red = false;
886 xpp.red = true;
887 x = xpp;
888 }
889 else {
890 if (x == xp.left) {
891 rotateRight(x = xp);
892 xpp = (xp = x.parent) == null ? null : xp.parent;
893 }
894 if (xp != null) {
895 xp.red = false;
896 if (xpp != null) {
897 xpp.red = true;
898 rotateLeft(xpp);
899 }
900 }
901 }
902 }
903 }
904 TreeNode r = root;
905 if (r != null && r.red)
906 r.red = false;
907 }
908 return null;
909 }
910
911 /**
912 * Removes the given node, that must be present before this
913 * call. This is messier than typical red-black deletion code
914 * because we cannot swap the contents of an interior node
915 * with a leaf successor that is pinned by "next" pointers
916 * that are accessible independently of lock. So instead we
917 * swap the tree linkages.
918 */
919 final void deleteTreeNode(TreeNode p) {
920 TreeNode next = (TreeNode)p.next; // unlink traversal pointers
921 TreeNode pred = p.prev;
922 if (pred == null)
923 first = next;
924 else
925 pred.next = next;
926 if (next != null)
927 next.prev = pred;
928 TreeNode replacement;
929 TreeNode pl = p.left;
930 TreeNode pr = p.right;
931 if (pl != null && pr != null) {
932 TreeNode s = pr, sl;
933 while ((sl = s.left) != null) // find successor
934 s = sl;
935 boolean c = s.red; s.red = p.red; p.red = c; // swap colors
936 TreeNode sr = s.right;
937 TreeNode pp = p.parent;
938 if (s == pr) { // p was s's direct parent
939 p.parent = s;
940 s.right = p;
941 }
942 else {
943 TreeNode sp = s.parent;
944 if ((p.parent = sp) != null) {
945 if (s == sp.left)
946 sp.left = p;
947 else
948 sp.right = p;
949 }
950 if ((s.right = pr) != null)
951 pr.parent = s;
952 }
953 p.left = null;
954 if ((p.right = sr) != null)
955 sr.parent = p;
956 if ((s.left = pl) != null)
957 pl.parent = s;
958 if ((s.parent = pp) == null)
959 root = s;
960 else if (p == pp.left)
961 pp.left = s;
962 else
963 pp.right = s;
964 replacement = sr;
965 }
966 else
967 replacement = (pl != null) ? pl : pr;
968 TreeNode pp = p.parent;
969 if (replacement == null) {
970 if (pp == null) {
971 root = null;
972 return;
973 }
974 replacement = p;
975 }
976 else {
977 replacement.parent = pp;
978 if (pp == null)
979 root = replacement;
980 else if (p == pp.left)
981 pp.left = replacement;
982 else
983 pp.right = replacement;
984 p.left = p.right = p.parent = null;
985 }
986 if (!p.red) { // rebalance, from CLR
987 TreeNode x = replacement;
988 while (x != null) {
989 TreeNode xp, xpl;
990 if (x.red || (xp = x.parent) == null) {
991 x.red = false;
992 break;
993 }
994 if (x == (xpl = xp.left)) {
995 TreeNode sib = xp.right;
996 if (sib != null && sib.red) {
997 sib.red = false;
998 xp.red = true;
999 rotateLeft(xp);
1000 sib = (xp = x.parent) == null ? null : xp.right;
1001 }
1002 if (sib == null)
1003 x = xp;
1004 else {
1005 TreeNode sl = sib.left, sr = sib.right;
1006 if ((sr == null || !sr.red) &&
1007 (sl == null || !sl.red)) {
1008 sib.red = true;
1009 x = xp;
1010 }
1011 else {
1012 if (sr == null || !sr.red) {
1013 if (sl != null)
1014 sl.red = false;
1015 sib.red = true;
1016 rotateRight(sib);
1017 sib = (xp = x.parent) == null ? null : xp.right;
1018 }
1019 if (sib != null) {
1020 sib.red = (xp == null)? false : xp.red;
1021 if ((sr = sib.right) != null)
1022 sr.red = false;
1023 }
1024 if (xp != null) {
1025 xp.red = false;
1026 rotateLeft(xp);
1027 }
1028 x = root;
1029 }
1030 }
1031 }
1032 else { // symmetric
1033 TreeNode sib = xpl;
1034 if (sib != null && sib.red) {
1035 sib.red = false;
1036 xp.red = true;
1037 rotateRight(xp);
1038 sib = (xp = x.parent) == null ? null : xp.left;
1039 }
1040 if (sib == null)
1041 x = xp;
1042 else {
1043 TreeNode sl = sib.left, sr = sib.right;
1044 if ((sl == null || !sl.red) &&
1045 (sr == null || !sr.red)) {
1046 sib.red = true;
1047 x = xp;
1048 }
1049 else {
1050 if (sl == null || !sl.red) {
1051 if (sr != null)
1052 sr.red = false;
1053 sib.red = true;
1054 rotateLeft(sib);
1055 sib = (xp = x.parent) == null ? null : xp.left;
1056 }
1057 if (sib != null) {
1058 sib.red = (xp == null)? false : xp.red;
1059 if ((sl = sib.left) != null)
1060 sl.red = false;
1061 }
1062 if (xp != null) {
1063 xp.red = false;
1064 rotateRight(xp);
1065 }
1066 x = root;
1067 }
1068 }
1069 }
1070 }
1071 }
1072 if (p == replacement && (pp = p.parent) != null) {
1073 if (p == pp.left) // detach pointers
1074 pp.left = null;
1075 else if (p == pp.right)
1076 pp.right = null;
1077 p.parent = null;
1078 }
1079 }
1080 }
1081
1082 /* ---------------- Collision reduction methods -------------- */
1083
1084 /**
1085 * Spreads higher bits to lower, and also forces top 2 bits to 0.
1086 * Because the table uses power-of-two masking, sets of hashes
1087 * that vary only in bits above the current mask will always
1088 * collide. (Among known examples are sets of Float keys holding
1089 * consecutive whole numbers in small tables.) To counter this,
1090 * we apply a transform that spreads the impact of higher bits
1091 * downward. There is a tradeoff between speed, utility, and
1092 * quality of bit-spreading. Because many common sets of hashes
1093 * are already reasonably distributed across bits (so don't benefit
1094 * from spreading), and because we use trees to handle large sets
1095 * of collisions in bins, we don't need excessively high quality.
1096 */
1097 private static final int spread(int h) {
1098 h ^= (h >>> 18) ^ (h >>> 12);
1099 return (h ^ (h >>> 10)) & HASH_BITS;
1100 }
1101
1102 /**
1103 * Replaces a list bin with a tree bin. Call only when locked.
1104 * Fails to replace if the given key is non-comparable or table
1105 * is, or needs, resizing.
1106 */
1107 private final void replaceWithTreeBin(Node[] tab, int index, Object key) {
1108 if ((key instanceof Comparable) &&
1109 (tab.length >= MAXIMUM_CAPACITY || counter.sum() < (long)sizeCtl)) {
1110 TreeBin t = new TreeBin();
1111 for (Node e = tabAt(tab, index); e != null; e = e.next)
1112 t.putTreeNode(e.hash & HASH_BITS, e.key, e.val);
1113 setTabAt(tab, index, new Node(MOVED, t, null, null));
1114 }
1115 }
1116
1117 /* ---------------- Internal access and update methods -------------- */
1118
1119 /** Implementation for get and containsKey */
1120 private final Object internalGet(Object k) {
1121 int h = spread(k.hashCode());
1122 retry: for (Node[] tab = table; tab != null;) {
1123 Node e, p; Object ek, ev; int eh; // locals to read fields once
1124 for (e = tabAt(tab, (tab.length - 1) & h); e != null; e = e.next) {
1125 if ((eh = e.hash) == MOVED) {
1126 if ((ek = e.key) instanceof TreeBin) // search TreeBin
1127 return ((TreeBin)ek).getValue(h, k);
1128 else { // restart with new table
1129 tab = (Node[])ek;
1130 continue retry;
1131 }
1132 }
1133 else if ((eh & HASH_BITS) == h && (ev = e.val) != null &&
1134 ((ek = e.key) == k || k.equals(ek)))
1135 return ev;
1136 }
1137 break;
1138 }
1139 return null;
1140 }
1141
1142 /**
1143 * Implementation for the four public remove/replace methods:
1144 * Replaces node value with v, conditional upon match of cv if
1145 * non-null. If resulting value is null, delete.
1146 */
1147 private final Object internalReplace(Object k, Object v, Object cv) {
1148 int h = spread(k.hashCode());
1149 Object oldVal = null;
1150 for (Node[] tab = table;;) {
1151 Node f; int i, fh; Object fk;
1152 if (tab == null ||
1153 (f = tabAt(tab, i = (tab.length - 1) & h)) == null)
1154 break;
1155 else if ((fh = f.hash) == MOVED) {
1156 if ((fk = f.key) instanceof TreeBin) {
1157 TreeBin t = (TreeBin)fk;
1158 boolean validated = false;
1159 boolean deleted = false;
1160 t.acquire(0);
1161 try {
1162 if (tabAt(tab, i) == f) {
1163 validated = true;
1164 TreeNode p = t.getTreeNode(h, k, t.root);
1165 if (p != null) {
1166 Object pv = p.val;
1167 if (cv == null || cv == pv || cv.equals(pv)) {
1168 oldVal = pv;
1169 if ((p.val = v) == null) {
1170 deleted = true;
1171 t.deleteTreeNode(p);
1172 }
1173 }
1174 }
1175 }
1176 } finally {
1177 t.release(0);
1178 }
1179 if (validated) {
1180 if (deleted)
1181 counter.add(-1L);
1182 break;
1183 }
1184 }
1185 else
1186 tab = (Node[])fk;
1187 }
1188 else if ((fh & HASH_BITS) != h && f.next == null) // precheck
1189 break; // rules out possible existence
1190 else if ((fh & LOCKED) != 0) {
1191 checkForResize(); // try resizing if can't get lock
1192 f.tryAwaitLock(tab, i);
1193 }
1194 else if (f.casHash(fh, fh | LOCKED)) {
1195 boolean validated = false;
1196 boolean deleted = false;
1197 try {
1198 if (tabAt(tab, i) == f) {
1199 validated = true;
1200 for (Node e = f, pred = null;;) {
1201 Object ek, ev;
1202 if ((e.hash & HASH_BITS) == h &&
1203 ((ev = e.val) != null) &&
1204 ((ek = e.key) == k || k.equals(ek))) {
1205 if (cv == null || cv == ev || cv.equals(ev)) {
1206 oldVal = ev;
1207 if ((e.val = v) == null) {
1208 deleted = true;
1209 Node en = e.next;
1210 if (pred != null)
1211 pred.next = en;
1212 else
1213 setTabAt(tab, i, en);
1214 }
1215 }
1216 break;
1217 }
1218 pred = e;
1219 if ((e = e.next) == null)
1220 break;
1221 }
1222 }
1223 } finally {
1224 if (!f.casHash(fh | LOCKED, fh)) {
1225 f.hash = fh;
1226 synchronized (f) { f.notifyAll(); };
1227 }
1228 }
1229 if (validated) {
1230 if (deleted)
1231 counter.add(-1L);
1232 break;
1233 }
1234 }
1235 }
1236 return oldVal;
1237 }
1238
1239 /*
1240 * Internal versions of the five insertion methods, each a
1241 * little more complicated than the last. All have
1242 * the same basic structure as the first (internalPut):
1243 * 1. If table uninitialized, create
1244 * 2. If bin empty, try to CAS new node
1245 * 3. If bin stale, use new table
1246 * 4. if bin converted to TreeBin, validate and relay to TreeBin methods
1247 * 5. Lock and validate; if valid, scan and add or update
1248 *
1249 * The others interweave other checks and/or alternative actions:
1250 * * Plain put checks for and performs resize after insertion.
1251 * * putIfAbsent prescans for mapping without lock (and fails to add
1252 * if present), which also makes pre-emptive resize checks worthwhile.
1253 * * computeIfAbsent extends form used in putIfAbsent with additional
1254 * mechanics to deal with, calls, potential exceptions and null
1255 * returns from function call.
1256 * * compute uses the same function-call mechanics, but without
1257 * the prescans
1258 * * putAll attempts to pre-allocate enough table space
1259 * and more lazily performs count updates and checks.
1260 *
1261 * Someday when details settle down a bit more, it might be worth
1262 * some factoring to reduce sprawl.
1263 */
1264
1265 /** Implementation for put */
1266 private final Object internalPut(Object k, Object v) {
1267 int h = spread(k.hashCode());
1268 int count = 0;
1269 for (Node[] tab = table;;) {
1270 int i; Node f; int fh; Object fk;
1271 if (tab == null)
1272 tab = initTable();
1273 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1274 if (casTabAt(tab, i, null, new Node(h, k, v, null)))
1275 break; // no lock when adding to empty bin
1276 }
1277 else if ((fh = f.hash) == MOVED) {
1278 if ((fk = f.key) instanceof TreeBin) {
1279 TreeBin t = (TreeBin)fk;
1280 Object oldVal = null;
1281 t.acquire(0);
1282 try {
1283 if (tabAt(tab, i) == f) {
1284 count = 2;
1285 TreeNode p = t.putTreeNode(h, k, v);
1286 if (p != null) {
1287 oldVal = p.val;
1288 p.val = v;
1289 }
1290 }
1291 } finally {
1292 t.release(0);
1293 }
1294 if (count != 0) {
1295 if (oldVal != null)
1296 return oldVal;
1297 break;
1298 }
1299 }
1300 else
1301 tab = (Node[])fk;
1302 }
1303 else if ((fh & LOCKED) != 0) {
1304 checkForResize();
1305 f.tryAwaitLock(tab, i);
1306 }
1307 else if (f.casHash(fh, fh | LOCKED)) {
1308 Object oldVal = null;
1309 try { // needed in case equals() throws
1310 if (tabAt(tab, i) == f) {
1311 count = 1;
1312 for (Node e = f;; ++count) {
1313 Object ek, ev;
1314 if ((e.hash & HASH_BITS) == h &&
1315 (ev = e.val) != null &&
1316 ((ek = e.key) == k || k.equals(ek))) {
1317 oldVal = ev;
1318 e.val = v;
1319 break;
1320 }
1321 Node last = e;
1322 if ((e = e.next) == null) {
1323 last.next = new Node(h, k, v, null);
1324 if (count >= TREE_THRESHOLD)
1325 replaceWithTreeBin(tab, i, k);
1326 break;
1327 }
1328 }
1329 }
1330 } finally { // unlock and signal if needed
1331 if (!f.casHash(fh | LOCKED, fh)) {
1332 f.hash = fh;
1333 synchronized (f) { f.notifyAll(); };
1334 }
1335 }
1336 if (count != 0) {
1337 if (oldVal != null)
1338 return oldVal;
1339 if (tab.length <= 64)
1340 count = 2;
1341 break;
1342 }
1343 }
1344 }
1345 counter.add(1L);
1346 if (count > 1)
1347 checkForResize();
1348 return null;
1349 }
1350
1351 /** Implementation for putIfAbsent */
1352 private final Object internalPutIfAbsent(Object k, Object v) {
1353 int h = spread(k.hashCode());
1354 int count = 0;
1355 for (Node[] tab = table;;) {
1356 int i; Node f; int fh; Object fk, fv;
1357 if (tab == null)
1358 tab = initTable();
1359 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1360 if (casTabAt(tab, i, null, new Node(h, k, v, null)))
1361 break;
1362 }
1363 else if ((fh = f.hash) == MOVED) {
1364 if ((fk = f.key) instanceof TreeBin) {
1365 TreeBin t = (TreeBin)fk;
1366 Object oldVal = null;
1367 t.acquire(0);
1368 try {
1369 if (tabAt(tab, i) == f) {
1370 count = 2;
1371 TreeNode p = t.putTreeNode(h, k, v);
1372 if (p != null)
1373 oldVal = p.val;
1374 }
1375 } finally {
1376 t.release(0);
1377 }
1378 if (count != 0) {
1379 if (oldVal != null)
1380 return oldVal;
1381 break;
1382 }
1383 }
1384 else
1385 tab = (Node[])fk;
1386 }
1387 else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
1388 ((fk = f.key) == k || k.equals(fk)))
1389 return fv;
1390 else {
1391 Node g = f.next;
1392 if (g != null) { // at least 2 nodes -- search and maybe resize
1393 for (Node e = g;;) {
1394 Object ek, ev;
1395 if ((e.hash & HASH_BITS) == h && (ev = e.val) != null &&
1396 ((ek = e.key) == k || k.equals(ek)))
1397 return ev;
1398 if ((e = e.next) == null) {
1399 checkForResize();
1400 break;
1401 }
1402 }
1403 }
1404 if (((fh = f.hash) & LOCKED) != 0) {
1405 checkForResize();
1406 f.tryAwaitLock(tab, i);
1407 }
1408 else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
1409 Object oldVal = null;
1410 try {
1411 if (tabAt(tab, i) == f) {
1412 count = 1;
1413 for (Node e = f;; ++count) {
1414 Object ek, ev;
1415 if ((e.hash & HASH_BITS) == h &&
1416 (ev = e.val) != null &&
1417 ((ek = e.key) == k || k.equals(ek))) {
1418 oldVal = ev;
1419 break;
1420 }
1421 Node last = e;
1422 if ((e = e.next) == null) {
1423 last.next = new Node(h, k, v, null);
1424 if (count >= TREE_THRESHOLD)
1425 replaceWithTreeBin(tab, i, k);
1426 break;
1427 }
1428 }
1429 }
1430 } finally {
1431 if (!f.casHash(fh | LOCKED, fh)) {
1432 f.hash = fh;
1433 synchronized (f) { f.notifyAll(); };
1434 }
1435 }
1436 if (count != 0) {
1437 if (oldVal != null)
1438 return oldVal;
1439 if (tab.length <= 64)
1440 count = 2;
1441 break;
1442 }
1443 }
1444 }
1445 }
1446 counter.add(1L);
1447 if (count > 1)
1448 checkForResize();
1449 return null;
1450 }
1451
1452 /** Implementation for computeIfAbsent */
1453 private final Object internalComputeIfAbsent(K k,
1454 MappingFunction<? super K, ?> mf) {
1455 int h = spread(k.hashCode());
1456 Object val = null;
1457 int count = 0;
1458 for (Node[] tab = table;;) {
1459 Node f; int i, fh; Object fk, fv;
1460 if (tab == null)
1461 tab = initTable();
1462 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1463 Node node = new Node(fh = h | LOCKED, k, null, null);
1464 if (casTabAt(tab, i, null, node)) {
1465 count = 1;
1466 try {
1467 if ((val = mf.map(k)) != null)
1468 node.val = val;
1469 } finally {
1470 if (val == null)
1471 setTabAt(tab, i, null);
1472 if (!node.casHash(fh, h)) {
1473 node.hash = h;
1474 synchronized (node) { node.notifyAll(); };
1475 }
1476 }
1477 }
1478 if (count != 0)
1479 break;
1480 }
1481 else if ((fh = f.hash) == MOVED) {
1482 if ((fk = f.key) instanceof TreeBin) {
1483 TreeBin t = (TreeBin)fk;
1484 boolean added = false;
1485 t.acquire(0);
1486 try {
1487 if (tabAt(tab, i) == f) {
1488 count = 1;
1489 TreeNode p = t.getTreeNode(h, k, t.root);
1490 if (p != null)
1491 val = p.val;
1492 else if ((val = mf.map(k)) != null) {
1493 added = true;
1494 count = 2;
1495 t.putTreeNode(h, k, val);
1496 }
1497 }
1498 } finally {
1499 t.release(0);
1500 }
1501 if (count != 0) {
1502 if (!added)
1503 return val;
1504 break;
1505 }
1506 }
1507 else
1508 tab = (Node[])fk;
1509 }
1510 else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
1511 ((fk = f.key) == k || k.equals(fk)))
1512 return fv;
1513 else {
1514 Node g = f.next;
1515 if (g != null) {
1516 for (Node e = g;;) {
1517 Object ek, ev;
1518 if ((e.hash & HASH_BITS) == h && (ev = e.val) != null &&
1519 ((ek = e.key) == k || k.equals(ek)))
1520 return ev;
1521 if ((e = e.next) == null) {
1522 checkForResize();
1523 break;
1524 }
1525 }
1526 }
1527 if (((fh = f.hash) & LOCKED) != 0) {
1528 checkForResize();
1529 f.tryAwaitLock(tab, i);
1530 }
1531 else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
1532 boolean added = false;
1533 try {
1534 if (tabAt(tab, i) == f) {
1535 count = 1;
1536 for (Node e = f;; ++count) {
1537 Object ek, ev;
1538 if ((e.hash & HASH_BITS) == h &&
1539 (ev = e.val) != null &&
1540 ((ek = e.key) == k || k.equals(ek))) {
1541 val = ev;
1542 break;
1543 }
1544 Node last = e;
1545 if ((e = e.next) == null) {
1546 if ((val = mf.map(k)) != null) {
1547 added = true;
1548 last.next = new Node(h, k, val, null);
1549 if (count >= TREE_THRESHOLD)
1550 replaceWithTreeBin(tab, i, k);
1551 }
1552 break;
1553 }
1554 }
1555 }
1556 } finally {
1557 if (!f.casHash(fh | LOCKED, fh)) {
1558 f.hash = fh;
1559 synchronized (f) { f.notifyAll(); };
1560 }
1561 }
1562 if (count != 0) {
1563 if (!added)
1564 return val;
1565 if (tab.length <= 64)
1566 count = 2;
1567 break;
1568 }
1569 }
1570 }
1571 }
1572 if (val != null) {
1573 counter.add(1L);
1574 if (count > 1)
1575 checkForResize();
1576 }
1577 return val;
1578 }
1579
1580 /** Implementation for compute */
1581 @SuppressWarnings("unchecked")
1582 private final Object internalCompute(K k,
1583 RemappingFunction<? super K, V> mf) {
1584 int h = spread(k.hashCode());
1585 Object val = null;
1586 int delta = 0;
1587 int count = 0;
1588 for (Node[] tab = table;;) {
1589 Node f; int i, fh; Object fk;
1590 if (tab == null)
1591 tab = initTable();
1592 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1593 Node node = new Node(fh = h | LOCKED, k, null, null);
1594 if (casTabAt(tab, i, null, node)) {
1595 try {
1596 count = 1;
1597 if ((val = mf.remap(k, null)) != null) {
1598 node.val = val;
1599 delta = 1;
1600 }
1601 } finally {
1602 if (delta == 0)
1603 setTabAt(tab, i, null);
1604 if (!node.casHash(fh, h)) {
1605 node.hash = h;
1606 synchronized (node) { node.notifyAll(); };
1607 }
1608 }
1609 }
1610 if (count != 0)
1611 break;
1612 }
1613 else if ((fh = f.hash) == MOVED) {
1614 if ((fk = f.key) instanceof TreeBin) {
1615 TreeBin t = (TreeBin)fk;
1616 t.acquire(0);
1617 try {
1618 if (tabAt(tab, i) == f) {
1619 count = 1;
1620 TreeNode p = t.getTreeNode(h, k, t.root);
1621 Object pv = (p == null) ? null : p.val;
1622 if ((val = mf.remap(k, (V)pv)) != null) {
1623 if (p != null)
1624 p.val = val;
1625 else {
1626 count = 2;
1627 delta = 1;
1628 t.putTreeNode(h, k, val);
1629 }
1630 }
1631 else if (p != null) {
1632 delta = -1;
1633 t.deleteTreeNode(p);
1634 }
1635 }
1636 } finally {
1637 t.release(0);
1638 }
1639 if (count != 0)
1640 break;
1641 }
1642 else
1643 tab = (Node[])fk;
1644 }
1645 else if ((fh & LOCKED) != 0) {
1646 checkForResize();
1647 f.tryAwaitLock(tab, i);
1648 }
1649 else if (f.casHash(fh, fh | LOCKED)) {
1650 try {
1651 if (tabAt(tab, i) == f) {
1652 count = 1;
1653 for (Node e = f, pred = null;; ++count) {
1654 Object ek, ev;
1655 if ((e.hash & HASH_BITS) == h &&
1656 (ev = e.val) != null &&
1657 ((ek = e.key) == k || k.equals(ek))) {
1658 val = mf.remap(k, (V)ev);
1659 if (val != null)
1660 e.val = val;
1661 else {
1662 delta = -1;
1663 Node en = e.next;
1664 if (pred != null)
1665 pred.next = en;
1666 else
1667 setTabAt(tab, i, en);
1668 }
1669 break;
1670 }
1671 pred = e;
1672 if ((e = e.next) == null) {
1673 if ((val = mf.remap(k, null)) != null) {
1674 pred.next = new Node(h, k, val, null);
1675 delta = 1;
1676 if (count >= TREE_THRESHOLD)
1677 replaceWithTreeBin(tab, i, k);
1678 }
1679 break;
1680 }
1681 }
1682 }
1683 } finally {
1684 if (!f.casHash(fh | LOCKED, fh)) {
1685 f.hash = fh;
1686 synchronized (f) { f.notifyAll(); };
1687 }
1688 }
1689 if (count != 0) {
1690 if (tab.length <= 64)
1691 count = 2;
1692 break;
1693 }
1694 }
1695 }
1696 if (delta != 0) {
1697 counter.add((long)delta);
1698 if (count > 1)
1699 checkForResize();
1700 }
1701 return val;
1702 }
1703
1704 /** Implementation for putAll */
1705 private final void internalPutAll(Map<?, ?> m) {
1706 tryPresize(m.size());
1707 long delta = 0L; // number of uncommitted additions
1708 boolean npe = false; // to throw exception on exit for nulls
1709 try { // to clean up counts on other exceptions
1710 for (Map.Entry<?, ?> entry : m.entrySet()) {
1711 Object k, v;
1712 if (entry == null || (k = entry.getKey()) == null ||
1713 (v = entry.getValue()) == null) {
1714 npe = true;
1715 break;
1716 }
1717 int h = spread(k.hashCode());
1718 for (Node[] tab = table;;) {
1719 int i; Node f; int fh; Object fk;
1720 if (tab == null)
1721 tab = initTable();
1722 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null){
1723 if (casTabAt(tab, i, null, new Node(h, k, v, null))) {
1724 ++delta;
1725 break;
1726 }
1727 }
1728 else if ((fh = f.hash) == MOVED) {
1729 if ((fk = f.key) instanceof TreeBin) {
1730 TreeBin t = (TreeBin)fk;
1731 boolean validated = false;
1732 t.acquire(0);
1733 try {
1734 if (tabAt(tab, i) == f) {
1735 validated = true;
1736 TreeNode p = t.getTreeNode(h, k, t.root);
1737 if (p != null)
1738 p.val = v;
1739 else {
1740 t.putTreeNode(h, k, v);
1741 ++delta;
1742 }
1743 }
1744 } finally {
1745 t.release(0);
1746 }
1747 if (validated)
1748 break;
1749 }
1750 else
1751 tab = (Node[])fk;
1752 }
1753 else if ((fh & LOCKED) != 0) {
1754 counter.add(delta);
1755 delta = 0L;
1756 checkForResize();
1757 f.tryAwaitLock(tab, i);
1758 }
1759 else if (f.casHash(fh, fh | LOCKED)) {
1760 int count = 0;
1761 try {
1762 if (tabAt(tab, i) == f) {
1763 count = 1;
1764 for (Node e = f;; ++count) {
1765 Object ek, ev;
1766 if ((e.hash & HASH_BITS) == h &&
1767 (ev = e.val) != null &&
1768 ((ek = e.key) == k || k.equals(ek))) {
1769 e.val = v;
1770 break;
1771 }
1772 Node last = e;
1773 if ((e = e.next) == null) {
1774 ++delta;
1775 last.next = new Node(h, k, v, null);
1776 if (count >= TREE_THRESHOLD)
1777 replaceWithTreeBin(tab, i, k);
1778 break;
1779 }
1780 }
1781 }
1782 } finally {
1783 if (!f.casHash(fh | LOCKED, fh)) {
1784 f.hash = fh;
1785 synchronized (f) { f.notifyAll(); };
1786 }
1787 }
1788 if (count != 0) {
1789 if (count > 1) {
1790 counter.add(delta);
1791 delta = 0L;
1792 checkForResize();
1793 }
1794 break;
1795 }
1796 }
1797 }
1798 }
1799 } finally {
1800 if (delta != 0)
1801 counter.add(delta);
1802 }
1803 if (npe)
1804 throw new NullPointerException();
1805 }
1806
1807 /* ---------------- Table Initialization and Resizing -------------- */
1808
1809 /**
1810 * Returns a power of two table size for the given desired capacity.
1811 * See Hackers Delight, sec 3.2
1812 */
1813 private static final int tableSizeFor(int c) {
1814 int n = c - 1;
1815 n |= n >>> 1;
1816 n |= n >>> 2;
1817 n |= n >>> 4;
1818 n |= n >>> 8;
1819 n |= n >>> 16;
1820 return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
1821 }
1822
1823 /**
1824 * Initializes table, using the size recorded in sizeCtl.
1825 */
1826 private final Node[] initTable() {
1827 Node[] tab; int sc;
1828 while ((tab = table) == null) {
1829 if ((sc = sizeCtl) < 0)
1830 Thread.yield(); // lost initialization race; just spin
1831 else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
1832 try {
1833 if ((tab = table) == null) {
1834 int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
1835 tab = table = new Node[n];
1836 sc = n - (n >>> 2);
1837 }
1838 } finally {
1839 sizeCtl = sc;
1840 }
1841 break;
1842 }
1843 }
1844 return tab;
1845 }
1846
1847 /**
1848 * If table is too small and not already resizing, creates next
1849 * table and transfers bins. Rechecks occupancy after a transfer
1850 * to see if another resize is already needed because resizings
1851 * are lagging additions.
1852 */
1853 private final void checkForResize() {
1854 Node[] tab; int n, sc;
1855 while ((tab = table) != null &&
1856 (n = tab.length) < MAXIMUM_CAPACITY &&
1857 (sc = sizeCtl) >= 0 && counter.sum() >= (long)sc &&
1858 UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
1859 try {
1860 if (tab == table) {
1861 table = rebuild(tab);
1862 sc = (n << 1) - (n >>> 1);
1863 }
1864 } finally {
1865 sizeCtl = sc;
1866 }
1867 }
1868 }
1869
1870 /**
1871 * Tries to presize table to accommodate the given number of elements.
1872 *
1873 * @param size number of elements (doesn't need to be perfectly accurate)
1874 */
1875 private final void tryPresize(int size) {
1876 int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
1877 tableSizeFor(size + (size >>> 1) + 1);
1878 int sc;
1879 while ((sc = sizeCtl) >= 0) {
1880 Node[] tab = table; int n;
1881 if (tab == null || (n = tab.length) == 0) {
1882 n = (sc > c) ? sc : c;
1883 if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
1884 try {
1885 if (table == tab) {
1886 table = new Node[n];
1887 sc = n - (n >>> 2);
1888 }
1889 } finally {
1890 sizeCtl = sc;
1891 }
1892 }
1893 }
1894 else if (c <= sc || n >= MAXIMUM_CAPACITY)
1895 break;
1896 else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
1897 try {
1898 if (table == tab) {
1899 table = rebuild(tab);
1900 sc = (n << 1) - (n >>> 1);
1901 }
1902 } finally {
1903 sizeCtl = sc;
1904 }
1905 }
1906 }
1907 }
1908
1909 /*
1910 * Moves and/or copies the nodes in each bin to new table. See
1911 * above for explanation.
1912 *
1913 * @return the new table
1914 */
1915 private static final Node[] rebuild(Node[] tab) {
1916 int n = tab.length;
1917 Node[] nextTab = new Node[n << 1];
1918 Node fwd = new Node(MOVED, nextTab, null, null);
1919 int[] buffer = null; // holds bins to revisit; null until needed
1920 Node rev = null; // reverse forwarder; null until needed
1921 int nbuffered = 0; // the number of bins in buffer list
1922 int bufferIndex = 0; // buffer index of current buffered bin
1923 int bin = n - 1; // current non-buffered bin or -1 if none
1924
1925 for (int i = bin;;) { // start upwards sweep
1926 int fh; Node f;
1927 if ((f = tabAt(tab, i)) == null) {
1928 if (bin >= 0) { // no lock needed (or available)
1929 if (!casTabAt(tab, i, f, fwd))
1930 continue;
1931 }
1932 else { // transiently use a locked forwarding node
1933 Node g = new Node(MOVED|LOCKED, nextTab, null, null);
1934 if (!casTabAt(tab, i, f, g))
1935 continue;
1936 setTabAt(nextTab, i, null);
1937 setTabAt(nextTab, i + n, null);
1938 setTabAt(tab, i, fwd);
1939 if (!g.casHash(MOVED|LOCKED, MOVED)) {
1940 g.hash = MOVED;
1941 synchronized (g) { g.notifyAll(); }
1942 }
1943 }
1944 }
1945 else if ((fh = f.hash) == MOVED) {
1946 Object fk = f.key;
1947 if (fk instanceof TreeBin) {
1948 TreeBin t = (TreeBin)fk;
1949 boolean validated = false;
1950 t.acquire(0);
1951 try {
1952 if (tabAt(tab, i) == f) {
1953 validated = true;
1954 splitTreeBin(nextTab, i, t);
1955 setTabAt(tab, i, fwd);
1956 }
1957 } finally {
1958 t.release(0);
1959 }
1960 if (!validated)
1961 continue;
1962 }
1963 }
1964 else if ((fh & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) {
1965 boolean validated = false;
1966 try { // split to lo and hi lists; copying as needed
1967 if (tabAt(tab, i) == f) {
1968 validated = true;
1969 splitBin(nextTab, i, f);
1970 setTabAt(tab, i, fwd);
1971 }
1972 } finally {
1973 if (!f.casHash(fh | LOCKED, fh)) {
1974 f.hash = fh;
1975 synchronized (f) { f.notifyAll(); };
1976 }
1977 }
1978 if (!validated)
1979 continue;
1980 }
1981 else {
1982 if (buffer == null) // initialize buffer for revisits
1983 buffer = new int[TRANSFER_BUFFER_SIZE];
1984 if (bin < 0 && bufferIndex > 0) {
1985 int j = buffer[--bufferIndex];
1986 buffer[bufferIndex] = i;
1987 i = j; // swap with another bin
1988 continue;
1989 }
1990 if (bin < 0 || nbuffered >= TRANSFER_BUFFER_SIZE) {
1991 f.tryAwaitLock(tab, i);
1992 continue; // no other options -- block
1993 }
1994 if (rev == null) // initialize reverse-forwarder
1995 rev = new Node(MOVED, tab, null, null);
1996 if (tabAt(tab, i) != f || (f.hash & LOCKED) == 0)
1997 continue; // recheck before adding to list
1998 buffer[nbuffered++] = i;
1999 setTabAt(nextTab, i, rev); // install place-holders
2000 setTabAt(nextTab, i + n, rev);
2001 }
2002
2003 if (bin > 0)
2004 i = --bin;
2005 else if (buffer != null && nbuffered > 0) {
2006 bin = -1;
2007 i = buffer[bufferIndex = --nbuffered];
2008 }
2009 else
2010 return nextTab;
2011 }
2012 }
2013
2014 /**
2015 * Split a normal bin with list headed by e into lo and hi parts;
2016 * install in given table
2017 */
2018 private static void splitBin(Node[] nextTab, int i, Node e) {
2019 int bit = nextTab.length >>> 1; // bit to split on
2020 int runBit = e.hash & bit;
2021 Node lastRun = e, lo = null, hi = null;
2022 for (Node p = e.next; p != null; p = p.next) {
2023 int b = p.hash & bit;
2024 if (b != runBit) {
2025 runBit = b;
2026 lastRun = p;
2027 }
2028 }
2029 if (runBit == 0)
2030 lo = lastRun;
2031 else
2032 hi = lastRun;
2033 for (Node p = e; p != lastRun; p = p.next) {
2034 int ph = p.hash & HASH_BITS;
2035 Object pk = p.key, pv = p.val;
2036 if ((ph & bit) == 0)
2037 lo = new Node(ph, pk, pv, lo);
2038 else
2039 hi = new Node(ph, pk, pv, hi);
2040 }
2041 setTabAt(nextTab, i, lo);
2042 setTabAt(nextTab, i + bit, hi);
2043 }
2044
2045 /**
2046 * Split a tree bin into lo and hi parts; install in given table
2047 */
2048 private static void splitTreeBin(Node[] nextTab, int i, TreeBin t) {
2049 int bit = nextTab.length >>> 1;
2050 TreeBin lt = new TreeBin();
2051 TreeBin ht = new TreeBin();
2052 int lc = 0, hc = 0;
2053 for (Node e = t.first; e != null; e = e.next) {
2054 int h = e.hash & HASH_BITS;
2055 Object k = e.key, v = e.val;
2056 if ((h & bit) == 0) {
2057 ++lc;
2058 lt.putTreeNode(h, k, v);
2059 }
2060 else {
2061 ++hc;
2062 ht.putTreeNode(h, k, v);
2063 }
2064 }
2065 Node ln, hn; // throw away trees if too small
2066 if (lc <= (TREE_THRESHOLD >>> 1)) {
2067 ln = null;
2068 for (Node p = lt.first; p != null; p = p.next)
2069 ln = new Node(p.hash, p.key, p.val, ln);
2070 }
2071 else
2072 ln = new Node(MOVED, lt, null, null);
2073 setTabAt(nextTab, i, ln);
2074 if (hc <= (TREE_THRESHOLD >>> 1)) {
2075 hn = null;
2076 for (Node p = ht.first; p != null; p = p.next)
2077 hn = new Node(p.hash, p.key, p.val, hn);
2078 }
2079 else
2080 hn = new Node(MOVED, ht, null, null);
2081 setTabAt(nextTab, i + bit, hn);
2082 }
2083
2084 /**
2085 * Implementation for clear. Steps through each bin, removing all
2086 * nodes.
2087 */
2088 private final void internalClear() {
2089 long delta = 0L; // negative number of deletions
2090 int i = 0;
2091 Node[] tab = table;
2092 while (tab != null && i < tab.length) {
2093 int fh; Object fk;
2094 Node f = tabAt(tab, i);
2095 if (f == null)
2096 ++i;
2097 else if ((fh = f.hash) == MOVED) {
2098 if ((fk = f.key) instanceof TreeBin) {
2099 TreeBin t = (TreeBin)fk;
2100 t.acquire(0);
2101 try {
2102 if (tabAt(tab, i) == f) {
2103 for (Node p = t.first; p != null; p = p.next) {
2104 p.val = null;
2105 --delta;
2106 }
2107 t.first = null;
2108 t.root = null;
2109 ++i;
2110 }
2111 } finally {
2112 t.release(0);
2113 }
2114 }
2115 else
2116 tab = (Node[])fk;
2117 }
2118 else if ((fh & LOCKED) != 0) {
2119 counter.add(delta); // opportunistically update count
2120 delta = 0L;
2121 f.tryAwaitLock(tab, i);
2122 }
2123 else if (f.casHash(fh, fh | LOCKED)) {
2124 try {
2125 if (tabAt(tab, i) == f) {
2126 for (Node e = f; e != null; e = e.next) {
2127 e.val = null;
2128 --delta;
2129 }
2130 setTabAt(tab, i, null);
2131 ++i;
2132 }
2133 } finally {
2134 if (!f.casHash(fh | LOCKED, fh)) {
2135 f.hash = fh;
2136 synchronized (f) { f.notifyAll(); };
2137 }
2138 }
2139 }
2140 }
2141 if (delta != 0)
2142 counter.add(delta);
2143 }
2144
2145 /* ----------------Table Traversal -------------- */
2146
2147 /**
2148 * Encapsulates traversal for methods such as containsValue; also
2149 * serves as a base class for other iterators.
2150 *
2151 * At each step, the iterator snapshots the key ("nextKey") and
2152 * value ("nextVal") of a valid node (i.e., one that, at point of
2153 * snapshot, has a non-null user value). Because val fields can
2154 * change (including to null, indicating deletion), field nextVal
2155 * might not be accurate at point of use, but still maintains the
2156 * weak consistency property of holding a value that was once
2157 * valid.
2158 *
2159 * Internal traversals directly access these fields, as in:
2160 * {@code while (it.advance() != null) { process(it.nextKey); }}
2161 *
2162 * Exported iterators must track whether the iterator has advanced
2163 * (in hasNext vs next) (by setting/checking/nulling field
2164 * nextVal), and then extract key, value, or key-value pairs as
2165 * return values of next().
2166 *
2167 * The iterator visits once each still-valid node that was
2168 * reachable upon iterator construction. It might miss some that
2169 * were added to a bin after the bin was visited, which is OK wrt
2170 * consistency guarantees. Maintaining this property in the face
2171 * of possible ongoing resizes requires a fair amount of
2172 * bookkeeping state that is difficult to optimize away amidst
2173 * volatile accesses. Even so, traversal maintains reasonable
2174 * throughput.
2175 *
2176 * Normally, iteration proceeds bin-by-bin traversing lists.
2177 * However, if the table has been resized, then all future steps
2178 * must traverse both the bin at the current index as well as at
2179 * (index + baseSize); and so on for further resizings. To
2180 * paranoically cope with potential sharing by users of iterators
2181 * across threads, iteration terminates if a bounds checks fails
2182 * for a table read.
2183 */
2184 static class InternalIterator<K,V> {
2185 final ConcurrentHashMapV8<K, V> map;
2186 Node next; // the next entry to use
2187 Node last; // the last entry used
2188 Object nextKey; // cached key field of next
2189 Object nextVal; // cached val field of next
2190 Node[] tab; // current table; updated if resized
2191 int index; // index of bin to use next
2192 int baseIndex; // current index of initial table
2193 int baseLimit; // index bound for initial table
2194 final int baseSize; // initial table size
2195
2196 /** Creates iterator for all entries in the table. */
2197 InternalIterator(ConcurrentHashMapV8<K, V> map) {
2198 this.tab = (this.map = map).table;
2199 baseLimit = baseSize = (tab == null) ? 0 : tab.length;
2200 }
2201
2202 /** Creates iterator for clone() and split() methods */
2203 InternalIterator(InternalIterator<K,V> it, boolean split) {
2204 this.map = it.map;
2205 this.tab = it.tab;
2206 this.baseSize = it.baseSize;
2207 int lo = it.baseIndex;
2208 int hi = this.baseLimit = it.baseLimit;
2209 this.index = this.baseIndex =
2210 (split) ? (it.baseLimit = (lo + hi + 1) >>> 1) : lo;
2211 }
2212
2213 /**
2214 * Advances next; returns nextVal or null if terminated
2215 * See above for explanation.
2216 */
2217 final Object advance() {
2218 Node e = last = next;
2219 Object ev = null;
2220 outer: do {
2221 if (e != null) // advance past used/skipped node
2222 e = e.next;
2223 while (e == null) { // get to next non-null bin
2224 Node[] t; int b, i, n; Object ek; // checks must use locals
2225 if ((b = baseIndex) >= baseLimit || (i = index) < 0 ||
2226 (t = tab) == null || i >= (n = t.length))
2227 break outer;
2228 else if ((e = tabAt(t, i)) != null && e.hash == MOVED) {
2229 if ((ek = e.key) instanceof TreeBin)
2230 e = ((TreeBin)ek).first;
2231 else {
2232 tab = (Node[])ek;
2233 continue; // restarts due to null val
2234 }
2235 } // visit upper slots if present
2236 index = (i += baseSize) < n ? i : (baseIndex = b + 1);
2237 }
2238 nextKey = e.key;
2239 } while ((ev = e.val) == null); // skip deleted or special nodes
2240 next = e;
2241 return nextVal = ev;
2242 }
2243
2244 public final void remove() {
2245 if (nextVal == null)
2246 advance();
2247 Node e = last;
2248 if (e == null)
2249 throw new IllegalStateException();
2250 last = null;
2251 map.remove(e.key);
2252 }
2253
2254 public final boolean hasNext() {
2255 return nextVal != null || advance() != null;
2256 }
2257
2258 public final boolean hasMoreElements() { return hasNext(); }
2259 }
2260
2261 /* ---------------- Public operations -------------- */
2262
2263 /**
2264 * Creates a new, empty map with the default initial table size (16),
2265 */
2266 public ConcurrentHashMapV8() {
2267 this.counter = new LongAdder();
2268 }
2269
2270 /**
2271 * Creates a new, empty map with an initial table size
2272 * accommodating the specified number of elements without the need
2273 * to dynamically resize.
2274 *
2275 * @param initialCapacity The implementation performs internal
2276 * sizing to accommodate this many elements.
2277 * @throws IllegalArgumentException if the initial capacity of
2278 * elements is negative
2279 */
2280 public ConcurrentHashMapV8(int initialCapacity) {
2281 if (initialCapacity < 0)
2282 throw new IllegalArgumentException();
2283 int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
2284 MAXIMUM_CAPACITY :
2285 tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
2286 this.counter = new LongAdder();
2287 this.sizeCtl = cap;
2288 }
2289
2290 /**
2291 * Creates a new map with the same mappings as the given map.
2292 *
2293 * @param m the map
2294 */
2295 public ConcurrentHashMapV8(Map<? extends K, ? extends V> m) {
2296 this.counter = new LongAdder();
2297 this.sizeCtl = DEFAULT_CAPACITY;
2298 internalPutAll(m);
2299 }
2300
2301 /**
2302 * Creates a new, empty map with an initial table size based on
2303 * the given number of elements ({@code initialCapacity}) and
2304 * initial table density ({@code loadFactor}).
2305 *
2306 * @param initialCapacity the initial capacity. The implementation
2307 * performs internal sizing to accommodate this many elements,
2308 * given the specified load factor.
2309 * @param loadFactor the load factor (table density) for
2310 * establishing the initial table size
2311 * @throws IllegalArgumentException if the initial capacity of
2312 * elements is negative or the load factor is nonpositive
2313 *
2314 * @since 1.6
2315 */
2316 public ConcurrentHashMapV8(int initialCapacity, float loadFactor) {
2317 this(initialCapacity, loadFactor, 1);
2318 }
2319
2320 /**
2321 * Creates a new, empty map with an initial table size based on
2322 * the given number of elements ({@code initialCapacity}), table
2323 * density ({@code loadFactor}), and number of concurrently
2324 * updating threads ({@code concurrencyLevel}).
2325 *
2326 * @param initialCapacity the initial capacity. The implementation
2327 * performs internal sizing to accommodate this many elements,
2328 * given the specified load factor.
2329 * @param loadFactor the load factor (table density) for
2330 * establishing the initial table size
2331 * @param concurrencyLevel the estimated number of concurrently
2332 * updating threads. The implementation may use this value as
2333 * a sizing hint.
2334 * @throws IllegalArgumentException if the initial capacity is
2335 * negative or the load factor or concurrencyLevel are
2336 * nonpositive
2337 */
2338 public ConcurrentHashMapV8(int initialCapacity,
2339 float loadFactor, int concurrencyLevel) {
2340 if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
2341 throw new IllegalArgumentException();
2342 if (initialCapacity < concurrencyLevel) // Use at least as many bins
2343 initialCapacity = concurrencyLevel; // as estimated threads
2344 long size = (long)(1.0 + (long)initialCapacity / loadFactor);
2345 int cap = ((size >= (long)MAXIMUM_CAPACITY) ?
2346 MAXIMUM_CAPACITY: tableSizeFor((int)size));
2347 this.counter = new LongAdder();
2348 this.sizeCtl = cap;
2349 }
2350
2351 /**
2352 * {@inheritDoc}
2353 */
2354 public boolean isEmpty() {
2355 return counter.sum() <= 0L; // ignore transient negative values
2356 }
2357
2358 /**
2359 * {@inheritDoc}
2360 */
2361 public int size() {
2362 long n = counter.sum();
2363 return ((n < 0L) ? 0 :
2364 (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
2365 (int)n);
2366 }
2367
2368 final long longSize() { // accurate version of size needed for views
2369 long n = counter.sum();
2370 return (n < 0L) ? 0L : n;
2371 }
2372
2373 /**
2374 * Returns the value to which the specified key is mapped,
2375 * or {@code null} if this map contains no mapping for the key.
2376 *
2377 * <p>More formally, if this map contains a mapping from a key
2378 * {@code k} to a value {@code v} such that {@code key.equals(k)},
2379 * then this method returns {@code v}; otherwise it returns
2380 * {@code null}. (There can be at most one such mapping.)
2381 *
2382 * @throws NullPointerException if the specified key is null
2383 */
2384 @SuppressWarnings("unchecked")
2385 public V get(Object key) {
2386 if (key == null)
2387 throw new NullPointerException();
2388 return (V)internalGet(key);
2389 }
2390
2391 /**
2392 * Tests if the specified object is a key in this table.
2393 *
2394 * @param key possible key
2395 * @return {@code true} if and only if the specified object
2396 * is a key in this table, as determined by the
2397 * {@code equals} method; {@code false} otherwise
2398 * @throws NullPointerException if the specified key is null
2399 */
2400 public boolean containsKey(Object key) {
2401 if (key == null)
2402 throw new NullPointerException();
2403 return internalGet(key) != null;
2404 }
2405
2406 /**
2407 * Returns {@code true} if this map maps one or more keys to the
2408 * specified value. Note: This method may require a full traversal
2409 * of the map, and is much slower than method {@code containsKey}.
2410 *
2411 * @param value value whose presence in this map is to be tested
2412 * @return {@code true} if this map maps one or more keys to the
2413 * specified value
2414 * @throws NullPointerException if the specified value is null
2415 */
2416 public boolean containsValue(Object value) {
2417 if (value == null)
2418 throw new NullPointerException();
2419 Object v;
2420 InternalIterator<K,V> it = new InternalIterator<K,V>(this);
2421 while ((v = it.advance()) != null) {
2422 if (v == value || value.equals(v))
2423 return true;
2424 }
2425 return false;
2426 }
2427
2428 /**
2429 * Legacy method testing if some key maps into the specified value
2430 * in this table. This method is identical in functionality to
2431 * {@link #containsValue}, and exists solely to ensure
2432 * full compatibility with class {@link java.util.Hashtable},
2433 * which supported this method prior to introduction of the
2434 * Java Collections framework.
2435 *
2436 * @param value a value to search for
2437 * @return {@code true} if and only if some key maps to the
2438 * {@code value} argument in this table as
2439 * determined by the {@code equals} method;
2440 * {@code false} otherwise
2441 * @throws NullPointerException if the specified value is null
2442 */
2443 public boolean contains(Object value) {
2444 return containsValue(value);
2445 }
2446
2447 /**
2448 * Maps the specified key to the specified value in this table.
2449 * Neither the key nor the value can be null.
2450 *
2451 * <p> The value can be retrieved by calling the {@code get} method
2452 * with a key that is equal to the original key.
2453 *
2454 * @param key key with which the specified value is to be associated
2455 * @param value value to be associated with the specified key
2456 * @return the previous value associated with {@code key}, or
2457 * {@code null} if there was no mapping for {@code key}
2458 * @throws NullPointerException if the specified key or value is null
2459 */
2460 @SuppressWarnings("unchecked")
2461 public V put(K key, V value) {
2462 if (key == null || value == null)
2463 throw new NullPointerException();
2464 return (V)internalPut(key, value);
2465 }
2466
2467 /**
2468 * {@inheritDoc}
2469 *
2470 * @return the previous value associated with the specified key,
2471 * or {@code null} if there was no mapping for the key
2472 * @throws NullPointerException if the specified key or value is null
2473 */
2474 @SuppressWarnings("unchecked")
2475 public V putIfAbsent(K key, V value) {
2476 if (key == null || value == null)
2477 throw new NullPointerException();
2478 return (V)internalPutIfAbsent(key, value);
2479 }
2480
2481 /**
2482 * Copies all of the mappings from the specified map to this one.
2483 * These mappings replace any mappings that this map had for any of the
2484 * keys currently in the specified map.
2485 *
2486 * @param m mappings to be stored in this map
2487 */
2488 public void putAll(Map<? extends K, ? extends V> m) {
2489 internalPutAll(m);
2490 }
2491
2492 /**
2493 * If the specified key is not already associated with a value,
2494 * computes its value using the given mappingFunction and enters
2495 * it into the map unless null. This is equivalent to
2496 * <pre> {@code
2497 * if (map.containsKey(key))
2498 * return map.get(key);
2499 * value = mappingFunction.map(key);
2500 * if (value != null)
2501 * map.put(key, value);
2502 * return value;}</pre>
2503 *
2504 * except that the action is performed atomically. If the
2505 * function returns {@code null} no mapping is recorded. If the
2506 * function itself throws an (unchecked) exception, the exception
2507 * is rethrown to its caller, and no mapping is recorded. Some
2508 * attempted update operations on this map by other threads may be
2509 * blocked while computation is in progress, so the computation
2510 * should be short and simple, and must not attempt to update any
2511 * other mappings of this Map. The most appropriate usage is to
2512 * construct a new object serving as an initial mapped value, or
2513 * memoized result, as in:
2514 *
2515 * <pre> {@code
2516 * map.computeIfAbsent(key, new MappingFunction<K, V>() {
2517 * public V map(K k) { return new Value(f(k)); }});}</pre>
2518 *
2519 * @param key key with which the specified value is to be associated
2520 * @param mappingFunction the function to compute a value
2521 * @return the current (existing or computed) value associated with
2522 * the specified key, or null if the computed value is null.
2523 * @throws NullPointerException if the specified key or mappingFunction
2524 * is null
2525 * @throws IllegalStateException if the computation detectably
2526 * attempts a recursive update to this map that would
2527 * otherwise never complete
2528 * @throws RuntimeException or Error if the mappingFunction does so,
2529 * in which case the mapping is left unestablished
2530 */
2531 @SuppressWarnings("unchecked")
2532 public V computeIfAbsent(K key, MappingFunction<? super K, ? extends V> mappingFunction) {
2533 if (key == null || mappingFunction == null)
2534 throw new NullPointerException();
2535 return (V)internalComputeIfAbsent(key, mappingFunction);
2536 }
2537
2538 /**
2539 * Computes a new mapping value given a key and
2540 * its current mapped value (or {@code null} if there is no current
2541 * mapping). This is equivalent to
2542 * <pre> {@code
2543 * value = remappingFunction.remap(key, map.get(key));
2544 * if (value != null)
2545 * map.put(key, value);
2546 * else
2547 * map.remove(key);
2548 * }</pre>
2549 *
2550 * except that the action is performed atomically. If the
2551 * function returns {@code null}, the mapping is removed. If the
2552 * function itself throws an (unchecked) exception, the exception
2553 * is rethrown to its caller, and the current mapping is left
2554 * unchanged. Some attempted update operations on this map by
2555 * other threads may be blocked while computation is in progress,
2556 * so the computation should be short and simple, and must not
2557 * attempt to update any other mappings of this Map. For example,
2558 * to either create or append new messages to a value mapping:
2559 *
2560 * <pre> {@code
2561 * Map<Key, String> map = ...;
2562 * final String msg = ...;
2563 * map.compute(key, new RemappingFunction<Key, String>() {
2564 * public String remap(Key k, String v) {
2565 * return (v == null) ? msg : v + msg;});}}</pre>
2566 *
2567 * @param key key with which the specified value is to be associated
2568 * @param remappingFunction the function to compute a value
2569 * @return the new value associated with
2570 * the specified key, or null if none.
2571 * @throws NullPointerException if the specified key or remappingFunction
2572 * is null
2573 * @throws IllegalStateException if the computation detectably
2574 * attempts a recursive update to this map that would
2575 * otherwise never complete
2576 * @throws RuntimeException or Error if the remappingFunction does so,
2577 * in which case the mapping is unchanged
2578 */
2579 @SuppressWarnings("unchecked")
2580 public V compute(K key, RemappingFunction<? super K, V> remappingFunction) {
2581 if (key == null || remappingFunction == null)
2582 throw new NullPointerException();
2583 return (V)internalCompute(key, remappingFunction);
2584 }
2585
2586 /**
2587 * Removes the key (and its corresponding value) from this map.
2588 * This method does nothing if the key is not in the map.
2589 *
2590 * @param key the key that needs to be removed
2591 * @return the previous value associated with {@code key}, or
2592 * {@code null} if there was no mapping for {@code key}
2593 * @throws NullPointerException if the specified key is null
2594 */
2595 @SuppressWarnings("unchecked")
2596 public V remove(Object key) {
2597 if (key == null)
2598 throw new NullPointerException();
2599 return (V)internalReplace(key, null, null);
2600 }
2601
2602 /**
2603 * {@inheritDoc}
2604 *
2605 * @throws NullPointerException if the specified key is null
2606 */
2607 public boolean remove(Object key, Object value) {
2608 if (key == null)
2609 throw new NullPointerException();
2610 if (value == null)
2611 return false;
2612 return internalReplace(key, null, value) != null;
2613 }
2614
2615 /**
2616 * {@inheritDoc}
2617 *
2618 * @throws NullPointerException if any of the arguments are null
2619 */
2620 public boolean replace(K key, V oldValue, V newValue) {
2621 if (key == null || oldValue == null || newValue == null)
2622 throw new NullPointerException();
2623 return internalReplace(key, newValue, oldValue) != null;
2624 }
2625
2626 /**
2627 * {@inheritDoc}
2628 *
2629 * @return the previous value associated with the specified key,
2630 * or {@code null} if there was no mapping for the key
2631 * @throws NullPointerException if the specified key or value is null
2632 */
2633 @SuppressWarnings("unchecked")
2634 public V replace(K key, V value) {
2635 if (key == null || value == null)
2636 throw new NullPointerException();
2637 return (V)internalReplace(key, value, null);
2638 }
2639
2640 /**
2641 * Removes all of the mappings from this map.
2642 */
2643 public void clear() {
2644 internalClear();
2645 }
2646
2647 /**
2648 * Returns a {@link Set} view of the keys contained in this map.
2649 * The set is backed by the map, so changes to the map are
2650 * reflected in the set, and vice-versa. The set supports element
2651 * removal, which removes the corresponding mapping from this map,
2652 * via the {@code Iterator.remove}, {@code Set.remove},
2653 * {@code removeAll}, {@code retainAll}, and {@code clear}
2654 * operations. It does not support the {@code add} or
2655 * {@code addAll} operations.
2656 *
2657 * <p>The view's {@code iterator} is a "weakly consistent" iterator
2658 * that will never throw {@link ConcurrentModificationException},
2659 * and guarantees to traverse elements as they existed upon
2660 * construction of the iterator, and may (but is not guaranteed to)
2661 * reflect any modifications subsequent to construction.
2662 */
2663 public Set<K> keySet() {
2664 KeySet<K,V> ks = keySet;
2665 return (ks != null) ? ks : (keySet = new KeySet<K,V>(this));
2666 }
2667
2668 /**
2669 * Returns a {@link Collection} view of the values contained in this map.
2670 * The collection is backed by the map, so changes to the map are
2671 * reflected in the collection, and vice-versa. The collection
2672 * supports element removal, which removes the corresponding
2673 * mapping from this map, via the {@code Iterator.remove},
2674 * {@code Collection.remove}, {@code removeAll},
2675 * {@code retainAll}, and {@code clear} operations. It does not
2676 * support the {@code add} or {@code addAll} operations.
2677 *
2678 * <p>The view's {@code iterator} is a "weakly consistent" iterator
2679 * that will never throw {@link ConcurrentModificationException},
2680 * and guarantees to traverse elements as they existed upon
2681 * construction of the iterator, and may (but is not guaranteed to)
2682 * reflect any modifications subsequent to construction.
2683 */
2684 public Collection<V> values() {
2685 Values<K,V> vs = values;
2686 return (vs != null) ? vs : (values = new Values<K,V>(this));
2687 }
2688
2689 /**
2690 * Returns a {@link Set} view of the mappings contained in this map.
2691 * The set is backed by the map, so changes to the map are
2692 * reflected in the set, and vice-versa. The set supports element
2693 * removal, which removes the corresponding mapping from the map,
2694 * via the {@code Iterator.remove}, {@code Set.remove},
2695 * {@code removeAll}, {@code retainAll}, and {@code clear}
2696 * operations. It does not support the {@code add} or
2697 * {@code addAll} operations.
2698 *
2699 * <p>The view's {@code iterator} is a "weakly consistent" iterator
2700 * that will never throw {@link ConcurrentModificationException},
2701 * and guarantees to traverse elements as they existed upon
2702 * construction of the iterator, and may (but is not guaranteed to)
2703 * reflect any modifications subsequent to construction.
2704 */
2705 public Set<Map.Entry<K,V>> entrySet() {
2706 EntrySet<K,V> es = entrySet;
2707 return (es != null) ? es : (entrySet = new EntrySet<K,V>(this));
2708 }
2709
2710 /**
2711 * Returns an enumeration of the keys in this table.
2712 *
2713 * @return an enumeration of the keys in this table
2714 * @see #keySet()
2715 */
2716 public Enumeration<K> keys() {
2717 return new KeyIterator<K,V>(this);
2718 }
2719
2720 /**
2721 * Returns an enumeration of the values in this table.
2722 *
2723 * @return an enumeration of the values in this table
2724 * @see #values()
2725 */
2726 public Enumeration<V> elements() {
2727 return new ValueIterator<K,V>(this);
2728 }
2729
2730 /**
2731 * Returns a partionable iterator of the keys in this map.
2732 *
2733 * @return a partionable iterator of the keys in this map
2734 */
2735 public Spliterator<K> keySpliterator() {
2736 return new KeyIterator<K,V>(this);
2737 }
2738
2739 /**
2740 * Returns a partionable iterator of the values in this map.
2741 *
2742 * @return a partionable iterator of the values in this map
2743 */
2744 public Spliterator<V> valueSpliterator() {
2745 return new ValueIterator<K,V>(this);
2746 }
2747
2748 /**
2749 * Returns a partionable iterator of the entries in this map.
2750 *
2751 * @return a partionable iterator of the entries in this map
2752 */
2753 public Spliterator<Map.Entry<K,V>> entrySpliterator() {
2754 return new EntryIterator<K,V>(this);
2755 }
2756
2757 /**
2758 * Returns the hash code value for this {@link Map}, i.e.,
2759 * the sum of, for each key-value pair in the map,
2760 * {@code key.hashCode() ^ value.hashCode()}.
2761 *
2762 * @return the hash code value for this map
2763 */
2764 public int hashCode() {
2765 int h = 0;
2766 InternalIterator<K,V> it = new InternalIterator<K,V>(this);
2767 Object v;
2768 while ((v = it.advance()) != null) {
2769 h += it.nextKey.hashCode() ^ v.hashCode();
2770 }
2771 return h;
2772 }
2773
2774 /**
2775 * Returns a string representation of this map. The string
2776 * representation consists of a list of key-value mappings (in no
2777 * particular order) enclosed in braces ("{@code {}}"). Adjacent
2778 * mappings are separated by the characters {@code ", "} (comma
2779 * and space). Each key-value mapping is rendered as the key
2780 * followed by an equals sign ("{@code =}") followed by the
2781 * associated value.
2782 *
2783 * @return a string representation of this map
2784 */
2785 public String toString() {
2786 InternalIterator<K,V> it = new InternalIterator<K,V>(this);
2787 StringBuilder sb = new StringBuilder();
2788 sb.append('{');
2789 Object v;
2790 if ((v = it.advance()) != null) {
2791 for (;;) {
2792 Object k = it.nextKey;
2793 sb.append(k == this ? "(this Map)" : k);
2794 sb.append('=');
2795 sb.append(v == this ? "(this Map)" : v);
2796 if ((v = it.advance()) == null)
2797 break;
2798 sb.append(',').append(' ');
2799 }
2800 }
2801 return sb.append('}').toString();
2802 }
2803
2804 /**
2805 * Compares the specified object with this map for equality.
2806 * Returns {@code true} if the given object is a map with the same
2807 * mappings as this map. This operation may return misleading
2808 * results if either map is concurrently modified during execution
2809 * of this method.
2810 *
2811 * @param o object to be compared for equality with this map
2812 * @return {@code true} if the specified object is equal to this map
2813 */
2814 public boolean equals(Object o) {
2815 if (o != this) {
2816 if (!(o instanceof Map))
2817 return false;
2818 Map<?,?> m = (Map<?,?>) o;
2819 InternalIterator<K,V> it = new InternalIterator<K,V>(this);
2820 Object val;
2821 while ((val = it.advance()) != null) {
2822 Object v = m.get(it.nextKey);
2823 if (v == null || (v != val && !v.equals(val)))
2824 return false;
2825 }
2826 for (Map.Entry<?,?> e : m.entrySet()) {
2827 Object mk, mv, v;
2828 if ((mk = e.getKey()) == null ||
2829 (mv = e.getValue()) == null ||
2830 (v = internalGet(mk)) == null ||
2831 (mv != v && !mv.equals(v)))
2832 return false;
2833 }
2834 }
2835 return true;
2836 }
2837
2838 /* ----------------Iterators -------------- */
2839
2840 static final class KeyIterator<K,V> extends InternalIterator<K,V>
2841 implements Spliterator<K>, Enumeration<K> {
2842 KeyIterator(ConcurrentHashMapV8<K, V> map) { super(map); }
2843 KeyIterator(InternalIterator<K,V> it, boolean split) {
2844 super(it, split);
2845 }
2846 public KeyIterator<K,V> split() {
2847 if (last != null || (next != null && nextVal == null))
2848 throw new IllegalStateException();
2849 return new KeyIterator<K,V>(this, true);
2850 }
2851 public KeyIterator<K,V> clone() {
2852 if (last != null || (next != null && nextVal == null))
2853 throw new IllegalStateException();
2854 return new KeyIterator<K,V>(this, false);
2855 }
2856
2857 @SuppressWarnings("unchecked")
2858 public final K next() {
2859 if (nextVal == null && advance() == null)
2860 throw new NoSuchElementException();
2861 Object k = nextKey;
2862 nextVal = null;
2863 return (K) k;
2864 }
2865
2866 public final K nextElement() { return next(); }
2867 }
2868
2869 static final class ValueIterator<K,V> extends InternalIterator<K,V>
2870 implements Spliterator<V>, Enumeration<V> {
2871 ValueIterator(ConcurrentHashMapV8<K, V> map) { super(map); }
2872 ValueIterator(InternalIterator<K,V> it, boolean split) {
2873 super(it, split);
2874 }
2875 public ValueIterator<K,V> split() {
2876 if (last != null || (next != null && nextVal == null))
2877 throw new IllegalStateException();
2878 return new ValueIterator<K,V>(this, true);
2879 }
2880
2881 public ValueIterator<K,V> clone() {
2882 if (last != null || (next != null && nextVal == null))
2883 throw new IllegalStateException();
2884 return new ValueIterator<K,V>(this, false);
2885 }
2886
2887 @SuppressWarnings("unchecked")
2888 public final V next() {
2889 Object v;
2890 if ((v = nextVal) == null && (v = advance()) == null)
2891 throw new NoSuchElementException();
2892 nextVal = null;
2893 return (V) v;
2894 }
2895
2896 public final V nextElement() { return next(); }
2897 }
2898
2899 static final class EntryIterator<K,V> extends InternalIterator<K,V>
2900 implements Spliterator<Map.Entry<K,V>> {
2901 EntryIterator(ConcurrentHashMapV8<K, V> map) { super(map); }
2902 EntryIterator(InternalIterator<K,V> it, boolean split) {
2903 super(it, split);
2904 }
2905 public EntryIterator<K,V> split() {
2906 if (last != null || (next != null && nextVal == null))
2907 throw new IllegalStateException();
2908 return new EntryIterator<K,V>(this, true);
2909 }
2910 public EntryIterator<K,V> clone() {
2911 if (last != null || (next != null && nextVal == null))
2912 throw new IllegalStateException();
2913 return new EntryIterator<K,V>(this, false);
2914 }
2915
2916 @SuppressWarnings("unchecked")
2917 public final Map.Entry<K,V> next() {
2918 Object v;
2919 if ((v = nextVal) == null && (v = advance()) == null)
2920 throw new NoSuchElementException();
2921 Object k = nextKey;
2922 nextVal = null;
2923 return new MapEntry<K,V>((K)k, (V)v, map);
2924 }
2925 }
2926
2927 /**
2928 * Exported Entry for iterators
2929 */
2930 static final class MapEntry<K,V> implements Map.Entry<K, V> {
2931 final K key; // non-null
2932 V val; // non-null
2933 final ConcurrentHashMapV8<K, V> map;
2934 MapEntry(K key, V val, ConcurrentHashMapV8<K, V> map) {
2935 this.key = key;
2936 this.val = val;
2937 this.map = map;
2938 }
2939 public final K getKey() { return key; }
2940 public final V getValue() { return val; }
2941 public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
2942 public final String toString(){ return key + "=" + val; }
2943
2944 public final boolean equals(Object o) {
2945 Object k, v; Map.Entry<?,?> e;
2946 return ((o instanceof Map.Entry) &&
2947 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
2948 (v = e.getValue()) != null &&
2949 (k == key || k.equals(key)) &&
2950 (v == val || v.equals(val)));
2951 }
2952
2953 /**
2954 * Sets our entry's value and writes through to the map. The
2955 * value to return is somewhat arbitrary here. Since a we do
2956 * not necessarily track asynchronous changes, the most recent
2957 * "previous" value could be different from what we return (or
2958 * could even have been removed in which case the put will
2959 * re-establish). We do not and cannot guarantee more.
2960 */
2961 public final V setValue(V value) {
2962 if (value == null) throw new NullPointerException();
2963 V v = val;
2964 val = value;
2965 map.put(key, value);
2966 return v;
2967 }
2968 }
2969
2970 /* ----------------Views -------------- */
2971
2972 /**
2973 * Base class for views.
2974 */
2975 static abstract class MapView<K, V> {
2976 final ConcurrentHashMapV8<K, V> map;
2977 MapView(ConcurrentHashMapV8<K, V> map) { this.map = map; }
2978 public final int size() { return map.size(); }
2979 public final boolean isEmpty() { return map.isEmpty(); }
2980 public final void clear() { map.clear(); }
2981
2982 // implementations below rely on concrete classes supplying these
2983 abstract public Iterator<?> iterator();
2984 abstract public boolean contains(Object o);
2985 abstract public boolean remove(Object o);
2986
2987 private static final String oomeMsg = "Required array size too large";
2988
2989 public final Object[] toArray() {
2990 long sz = map.longSize();
2991 if (sz > (long)(MAX_ARRAY_SIZE))
2992 throw new OutOfMemoryError(oomeMsg);
2993 int n = (int)sz;
2994 Object[] r = new Object[n];
2995 int i = 0;
2996 Iterator<?> it = iterator();
2997 while (it.hasNext()) {
2998 if (i == n) {
2999 if (n >= MAX_ARRAY_SIZE)
3000 throw new OutOfMemoryError(oomeMsg);
3001 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
3002 n = MAX_ARRAY_SIZE;
3003 else
3004 n += (n >>> 1) + 1;
3005 r = Arrays.copyOf(r, n);
3006 }
3007 r[i++] = it.next();
3008 }
3009 return (i == n) ? r : Arrays.copyOf(r, i);
3010 }
3011
3012 @SuppressWarnings("unchecked")
3013 public final <T> T[] toArray(T[] a) {
3014 long sz = map.longSize();
3015 if (sz > (long)(MAX_ARRAY_SIZE))
3016 throw new OutOfMemoryError(oomeMsg);
3017 int m = (int)sz;
3018 T[] r = (a.length >= m) ? a :
3019 (T[])java.lang.reflect.Array
3020 .newInstance(a.getClass().getComponentType(), m);
3021 int n = r.length;
3022 int i = 0;
3023 Iterator<?> it = iterator();
3024 while (it.hasNext()) {
3025 if (i == n) {
3026 if (n >= MAX_ARRAY_SIZE)
3027 throw new OutOfMemoryError(oomeMsg);
3028 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
3029 n = MAX_ARRAY_SIZE;
3030 else
3031 n += (n >>> 1) + 1;
3032 r = Arrays.copyOf(r, n);
3033 }
3034 r[i++] = (T)it.next();
3035 }
3036 if (a == r && i < n) {
3037 r[i] = null; // null-terminate
3038 return r;
3039 }
3040 return (i == n) ? r : Arrays.copyOf(r, i);
3041 }
3042
3043 public final int hashCode() {
3044 int h = 0;
3045 for (Iterator<?> it = iterator(); it.hasNext();)
3046 h += it.next().hashCode();
3047 return h;
3048 }
3049
3050 public final String toString() {
3051 StringBuilder sb = new StringBuilder();
3052 sb.append('[');
3053 Iterator<?> it = iterator();
3054 if (it.hasNext()) {
3055 for (;;) {
3056 Object e = it.next();
3057 sb.append(e == this ? "(this Collection)" : e);
3058 if (!it.hasNext())
3059 break;
3060 sb.append(',').append(' ');
3061 }
3062 }
3063 return sb.append(']').toString();
3064 }
3065
3066 public final boolean containsAll(Collection<?> c) {
3067 if (c != this) {
3068 for (Iterator<?> it = c.iterator(); it.hasNext();) {
3069 Object e = it.next();
3070 if (e == null || !contains(e))
3071 return false;
3072 }
3073 }
3074 return true;
3075 }
3076
3077 public final boolean removeAll(Collection<?> c) {
3078 boolean modified = false;
3079 for (Iterator<?> it = iterator(); it.hasNext();) {
3080 if (c.contains(it.next())) {
3081 it.remove();
3082 modified = true;
3083 }
3084 }
3085 return modified;
3086 }
3087
3088 public final boolean retainAll(Collection<?> c) {
3089 boolean modified = false;
3090 for (Iterator<?> it = iterator(); it.hasNext();) {
3091 if (!c.contains(it.next())) {
3092 it.remove();
3093 modified = true;
3094 }
3095 }
3096 return modified;
3097 }
3098
3099 }
3100
3101 static final class KeySet<K,V> extends MapView<K,V> implements Set<K> {
3102 KeySet(ConcurrentHashMapV8<K, V> map) { super(map); }
3103 public final boolean contains(Object o) { return map.containsKey(o); }
3104 public final boolean remove(Object o) { return map.remove(o) != null; }
3105 public final Iterator<K> iterator() {
3106 return new KeyIterator<K,V>(map);
3107 }
3108 public final boolean add(K e) {
3109 throw new UnsupportedOperationException();
3110 }
3111 public final boolean addAll(Collection<? extends K> c) {
3112 throw new UnsupportedOperationException();
3113 }
3114 public boolean equals(Object o) {
3115 Set<?> c;
3116 return ((o instanceof Set) &&
3117 ((c = (Set<?>)o) == this ||
3118 (containsAll(c) && c.containsAll(this))));
3119 }
3120 }
3121
3122 static final class Values<K,V> extends MapView<K,V>
3123 implements Collection<V> {
3124 Values(ConcurrentHashMapV8<K, V> map) { super(map); }
3125 public final boolean contains(Object o) { return map.containsValue(o); }
3126 public final boolean remove(Object o) {
3127 if (o != null) {
3128 Iterator<V> it = new ValueIterator<K,V>(map);
3129 while (it.hasNext()) {
3130 if (o.equals(it.next())) {
3131 it.remove();
3132 return true;
3133 }
3134 }
3135 }
3136 return false;
3137 }
3138 public final Iterator<V> iterator() {
3139 return new ValueIterator<K,V>(map);
3140 }
3141 public final boolean add(V e) {
3142 throw new UnsupportedOperationException();
3143 }
3144 public final boolean addAll(Collection<? extends V> c) {
3145 throw new UnsupportedOperationException();
3146 }
3147 }
3148
3149 static final class EntrySet<K,V> extends MapView<K,V>
3150 implements Set<Map.Entry<K,V>> {
3151 EntrySet(ConcurrentHashMapV8<K, V> map) { super(map); }
3152 public final boolean contains(Object o) {
3153 Object k, v, r; Map.Entry<?,?> e;
3154 return ((o instanceof Map.Entry) &&
3155 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3156 (r = map.get(k)) != null &&
3157 (v = e.getValue()) != null &&
3158 (v == r || v.equals(r)));
3159 }
3160 public final boolean remove(Object o) {
3161 Object k, v; Map.Entry<?,?> e;
3162 return ((o instanceof Map.Entry) &&
3163 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3164 (v = e.getValue()) != null &&
3165 map.remove(k, v));
3166 }
3167 public final Iterator<Map.Entry<K,V>> iterator() {
3168 return new EntryIterator<K,V>(map);
3169 }
3170 public final boolean add(Entry<K,V> e) {
3171 throw new UnsupportedOperationException();
3172 }
3173 public final boolean addAll(Collection<? extends Entry<K,V>> c) {
3174 throw new UnsupportedOperationException();
3175 }
3176 public boolean equals(Object o) {
3177 Set<?> c;
3178 return ((o instanceof Set) &&
3179 ((c = (Set<?>)o) == this ||
3180 (containsAll(c) && c.containsAll(this))));
3181 }
3182 }
3183
3184 /* ---------------- Serialization Support -------------- */
3185
3186 /**
3187 * Stripped-down version of helper class used in previous version,
3188 * declared for the sake of serialization compatibility
3189 */
3190 static class Segment<K,V> implements Serializable {
3191 private static final long serialVersionUID = 2249069246763182397L;
3192 final float loadFactor;
3193 Segment(float lf) { this.loadFactor = lf; }
3194 }
3195
3196 /**
3197 * Saves the state of the {@code ConcurrentHashMapV8} instance to a
3198 * stream (i.e., serializes it).
3199 * @param s the stream
3200 * @serialData
3201 * the key (Object) and value (Object)
3202 * for each key-value mapping, followed by a null pair.
3203 * The key-value mappings are emitted in no particular order.
3204 */
3205 @SuppressWarnings("unchecked")
3206 private void writeObject(java.io.ObjectOutputStream s)
3207 throws java.io.IOException {
3208 if (segments == null) { // for serialization compatibility
3209 segments = (Segment<K,V>[])
3210 new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
3211 for (int i = 0; i < segments.length; ++i)
3212 segments[i] = new Segment<K,V>(LOAD_FACTOR);
3213 }
3214 s.defaultWriteObject();
3215 InternalIterator<K,V> it = new InternalIterator<K,V>(this);
3216 Object v;
3217 while ((v = it.advance()) != null) {
3218 s.writeObject(it.nextKey);
3219 s.writeObject(v);
3220 }
3221 s.writeObject(null);
3222 s.writeObject(null);
3223 segments = null; // throw away
3224 }
3225
3226 /**
3227 * Reconstitutes the instance from a stream (that is, deserializes it).
3228 * @param s the stream
3229 */
3230 @SuppressWarnings("unchecked")
3231 private void readObject(java.io.ObjectInputStream s)
3232 throws java.io.IOException, ClassNotFoundException {
3233 s.defaultReadObject();
3234 this.segments = null; // unneeded
3235 // initialize transient final field
3236 UNSAFE.putObjectVolatile(this, counterOffset, new LongAdder());
3237
3238 // Create all nodes, then place in table once size is known
3239 long size = 0L;
3240 Node p = null;
3241 for (;;) {
3242 K k = (K) s.readObject();
3243 V v = (V) s.readObject();
3244 if (k != null && v != null) {
3245 int h = spread(k.hashCode());
3246 p = new Node(h, k, v, p);
3247 ++size;
3248 }
3249 else
3250 break;
3251 }
3252 if (p != null) {
3253 boolean init = false;
3254 int n;
3255 if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
3256 n = MAXIMUM_CAPACITY;
3257 else {
3258 int sz = (int)size;
3259 n = tableSizeFor(sz + (sz >>> 1) + 1);
3260 }
3261 int sc = sizeCtl;
3262 boolean collide = false;
3263 if (n > sc &&
3264 UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
3265 try {
3266 if (table == null) {
3267 init = true;
3268 Node[] tab = new Node[n];
3269 int mask = n - 1;
3270 while (p != null) {
3271 int j = p.hash & mask;
3272 Node next = p.next;
3273 Node q = p.next = tabAt(tab, j);
3274 setTabAt(tab, j, p);
3275 if (!collide && q != null && q.hash == p.hash)
3276 collide = true;
3277 p = next;
3278 }
3279 table = tab;
3280 counter.add(size);
3281 sc = n - (n >>> 2);
3282 }
3283 } finally {
3284 sizeCtl = sc;
3285 }
3286 if (collide) { // rescan and convert to TreeBins
3287 Node[] tab = table;
3288 for (int i = 0; i < tab.length; ++i) {
3289 int c = 0;
3290 for (Node e = tabAt(tab, i); e != null; e = e.next) {
3291 if (++c > TREE_THRESHOLD &&
3292 (e.key instanceof Comparable)) {
3293 replaceWithTreeBin(tab, i, e.key);
3294 break;
3295 }
3296 }
3297 }
3298 }
3299 }
3300 if (!init) { // Can only happen if unsafely published.
3301 while (p != null) {
3302 internalPut(p.key, p.val);
3303 p = p.next;
3304 }
3305 }
3306 }
3307 }
3308
3309 // Unsafe mechanics
3310 private static final sun.misc.Unsafe UNSAFE;
3311 private static final long counterOffset;
3312 private static final long sizeCtlOffset;
3313 private static final long ABASE;
3314 private static final int ASHIFT;
3315
3316 static {
3317 int ss;
3318 try {
3319 UNSAFE = getUnsafe();
3320 Class<?> k = ConcurrentHashMapV8.class;
3321 counterOffset = UNSAFE.objectFieldOffset
3322 (k.getDeclaredField("counter"));
3323 sizeCtlOffset = UNSAFE.objectFieldOffset
3324 (k.getDeclaredField("sizeCtl"));
3325 Class<?> sc = Node[].class;
3326 ABASE = UNSAFE.arrayBaseOffset(sc);
3327 ss = UNSAFE.arrayIndexScale(sc);
3328 } catch (Exception e) {
3329 throw new Error(e);
3330 }
3331 if ((ss & (ss-1)) != 0)
3332 throw new Error("data type scale not a power of two");
3333 ASHIFT = 31 - Integer.numberOfLeadingZeros(ss);
3334 }
3335
3336 /**
3337 * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
3338 * Replace with a simple call to Unsafe.getUnsafe when integrating
3339 * into a jdk.
3340 *
3341 * @return a sun.misc.Unsafe
3342 */
3343 private static sun.misc.Unsafe getUnsafe() {
3344 try {
3345 return sun.misc.Unsafe.getUnsafe();
3346 } catch (SecurityException se) {
3347 try {
3348 return java.security.AccessController.doPrivileged
3349 (new java.security
3350 .PrivilegedExceptionAction<sun.misc.Unsafe>() {
3351 public sun.misc.Unsafe run() throws Exception {
3352 java.lang.reflect.Field f = sun.misc
3353 .Unsafe.class.getDeclaredField("theUnsafe");
3354 f.setAccessible(true);
3355 return (sun.misc.Unsafe) f.get(null);
3356 }});
3357 } catch (java.security.PrivilegedActionException e) {
3358 throw new RuntimeException("Could not initialize intrinsics",
3359 e.getCause());
3360 }
3361 }
3362 }
3363
3364 }