ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/jdk7/java/util/concurrent/ConcurrentHashMap.java
Revision: 1.21
Committed: Tue Jun 18 17:46:16 2013 UTC (10 years, 11 months ago) by jsr166
Branch: MAIN
Changes since 1.20: +4 -5 lines
Log Message:
whitespace

File Contents

# Content
1 /*
2 * Written by Doug Lea with assistance from members of JCP JSR-166
3 * Expert Group and released to the public domain, as explained at
4 * http://creativecommons.org/publicdomain/zero/1.0/
5 */
6
7 package java.util.concurrent;
8
9 import java.io.ObjectStreamField;
10 import java.io.Serializable;
11 import java.lang.reflect.ParameterizedType;
12 import java.lang.reflect.Type;
13 import java.util.Arrays;
14 import java.util.Collection;
15 import java.util.Comparator;
16 import java.util.ConcurrentModificationException;
17 import java.util.Enumeration;
18 import java.util.HashMap;
19 import java.util.Hashtable;
20 import java.util.Iterator;
21 import java.util.Map;
22 import java.util.NoSuchElementException;
23 import java.util.Set;
24 import java.util.concurrent.ConcurrentMap;
25 import java.util.concurrent.ForkJoinPool;
26 import java.util.concurrent.atomic.AtomicInteger;
27 import java.util.concurrent.locks.LockSupport;
28 import java.util.concurrent.locks.ReentrantLock;
29
30 /**
31 * A hash table supporting full concurrency of retrievals and
32 * high expected concurrency for updates. This class obeys the
33 * same functional specification as {@link java.util.Hashtable}, and
34 * includes versions of methods corresponding to each method of
35 * {@code Hashtable}. However, even though all operations are
36 * thread-safe, retrieval operations do <em>not</em> entail locking,
37 * and there is <em>not</em> any support for locking the entire table
38 * in a way that prevents all access. This class is fully
39 * interoperable with {@code Hashtable} in programs that rely on its
40 * thread safety but not on its synchronization details.
41 *
42 * <p>Retrieval operations (including {@code get}) generally do not
43 * block, so may overlap with update operations (including {@code put}
44 * and {@code remove}). Retrievals reflect the results of the most
45 * recently <em>completed</em> update operations holding upon their
46 * onset. (More formally, an update operation for a given key bears a
47 * <em>happens-before</em> relation with any (non-null) retrieval for
48 * that key reporting the updated value.) For aggregate operations
49 * such as {@code putAll} and {@code clear}, concurrent retrievals may
50 * reflect insertion or removal of only some entries. Similarly,
51 * Iterators and Enumerations return elements reflecting the state of
52 * the hash table at some point at or since the creation of the
53 * iterator/enumeration. They do <em>not</em> throw {@link
54 * ConcurrentModificationException}. However, iterators are designed
55 * to be used by only one thread at a time. Bear in mind that the
56 * results of aggregate status methods including {@code size}, {@code
57 * isEmpty}, and {@code containsValue} are typically useful only when
58 * a map is not undergoing concurrent updates in other threads.
59 * Otherwise the results of these methods reflect transient states
60 * that may be adequate for monitoring or estimation purposes, but not
61 * for program control.
62 *
63 * <p>The table is dynamically expanded when there are too many
64 * collisions (i.e., keys that have distinct hash codes but fall into
65 * the same slot modulo the table size), with the expected average
66 * effect of maintaining roughly two bins per mapping (corresponding
67 * to a 0.75 load factor threshold for resizing). There may be much
68 * variance around this average as mappings are added and removed, but
69 * overall, this maintains a commonly accepted time/space tradeoff for
70 * hash tables. However, resizing this or any other kind of hash
71 * table may be a relatively slow operation. When possible, it is a
72 * good idea to provide a size estimate as an optional {@code
73 * initialCapacity} constructor argument. An additional optional
74 * {@code loadFactor} constructor argument provides a further means of
75 * customizing initial table capacity by specifying the table density
76 * to be used in calculating the amount of space to allocate for the
77 * given number of elements. Also, for compatibility with previous
78 * versions of this class, constructors may optionally specify an
79 * expected {@code concurrencyLevel} as an additional hint for
80 * internal sizing. Note that using many keys with exactly the same
81 * {@code hashCode()} is a sure way to slow down performance of any
82 * hash table. To ameliorate impact, when keys are {@link Comparable},
83 * this class may use comparison order among keys to help break ties.
84 *
85 * <p>A {@link Set} projection of a ConcurrentHashMap may be created
86 * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
87 * (using {@link #keySet(Object)} when only keys are of interest, and the
88 * mapped values are (perhaps transiently) not used or all take the
89 * same mapping value.
90 *
91 * <p>This class and its views and iterators implement all of the
92 * <em>optional</em> methods of the {@link Map} and {@link Iterator}
93 * interfaces.
94 *
95 * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
96 * does <em>not</em> allow {@code null} to be used as a key or value.
97 *
98 * <p>This class is a member of the
99 * <a href="{@docRoot}/../technotes/guides/collections/index.html">
100 * Java Collections Framework</a>.
101 *
102 * @since 1.5
103 * @author Doug Lea
104 * @param <K> the type of keys maintained by this map
105 * @param <V> the type of mapped values
106 */
107 public class ConcurrentHashMap<K,V> implements ConcurrentMap<K,V>, Serializable {
108 private static final long serialVersionUID = 7249069246763182397L;
109
110 /*
111 * Overview:
112 *
113 * The primary design goal of this hash table is to maintain
114 * concurrent readability (typically method get(), but also
115 * iterators and related methods) while minimizing update
116 * contention. Secondary goals are to keep space consumption about
117 * the same or better than java.util.HashMap, and to support high
118 * initial insertion rates on an empty table by many threads.
119 *
120 * This map usually acts as a binned (bucketed) hash table. Each
121 * key-value mapping is held in a Node. Most nodes are instances
122 * of the basic Node class with hash, key, value, and next
123 * fields. However, various subclasses exist: TreeNodes are
124 * arranged in balanced trees, not lists. TreeBins hold the roots
125 * of sets of TreeNodes. ForwardingNodes are placed at the heads
126 * of bins during resizing. ReservationNodes are used as
127 * placeholders while establishing values in computeIfAbsent and
128 * related methods. The types TreeBin, ForwardingNode, and
129 * ReservationNode do not hold normal user keys, values, or
130 * hashes, and are readily distinguishable during search etc
131 * because they have negative hash fields and null key and value
132 * fields. (These special nodes are either uncommon or transient,
133 * so the impact of carrying around some unused fields is
134 * insignficant.)
135 *
136 * The table is lazily initialized to a power-of-two size upon the
137 * first insertion. Each bin in the table normally contains a
138 * list of Nodes (most often, the list has only zero or one Node).
139 * Table accesses require volatile/atomic reads, writes, and
140 * CASes. Because there is no other way to arrange this without
141 * adding further indirections, we use intrinsics
142 * (sun.misc.Unsafe) operations.
143 *
144 * We use the top (sign) bit of Node hash fields for control
145 * purposes -- it is available anyway because of addressing
146 * constraints. Nodes with negative hash fields are specially
147 * handled or ignored in map methods.
148 *
149 * Insertion (via put or its variants) of the first node in an
150 * empty bin is performed by just CASing it to the bin. This is
151 * by far the most common case for put operations under most
152 * key/hash distributions. Other update operations (insert,
153 * delete, and replace) require locks. We do not want to waste
154 * the space required to associate a distinct lock object with
155 * each bin, so instead use the first node of a bin list itself as
156 * a lock. Locking support for these locks relies on builtin
157 * "synchronized" monitors.
158 *
159 * Using the first node of a list as a lock does not by itself
160 * suffice though: When a node is locked, any update must first
161 * validate that it is still the first node after locking it, and
162 * retry if not. Because new nodes are always appended to lists,
163 * once a node is first in a bin, it remains first until deleted
164 * or the bin becomes invalidated (upon resizing).
165 *
166 * The main disadvantage of per-bin locks is that other update
167 * operations on other nodes in a bin list protected by the same
168 * lock can stall, for example when user equals() or mapping
169 * functions take a long time. However, statistically, under
170 * random hash codes, this is not a common problem. Ideally, the
171 * frequency of nodes in bins follows a Poisson distribution
172 * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
173 * parameter of about 0.5 on average, given the resizing threshold
174 * of 0.75, although with a large variance because of resizing
175 * granularity. Ignoring variance, the expected occurrences of
176 * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
177 * first values are:
178 *
179 * 0: 0.60653066
180 * 1: 0.30326533
181 * 2: 0.07581633
182 * 3: 0.01263606
183 * 4: 0.00157952
184 * 5: 0.00015795
185 * 6: 0.00001316
186 * 7: 0.00000094
187 * 8: 0.00000006
188 * more: less than 1 in ten million
189 *
190 * Lock contention probability for two threads accessing distinct
191 * elements is roughly 1 / (8 * #elements) under random hashes.
192 *
193 * Actual hash code distributions encountered in practice
194 * sometimes deviate significantly from uniform randomness. This
195 * includes the case when N > (1<<30), so some keys MUST collide.
196 * Similarly for dumb or hostile usages in which multiple keys are
197 * designed to have identical hash codes or ones that differs only
198 * in masked-out high bits. So we use a secondary strategy that
199 * applies when the number of nodes in a bin exceeds a
200 * threshold. These TreeBins use a balanced tree to hold nodes (a
201 * specialized form of red-black trees), bounding search time to
202 * O(log N). Each search step in a TreeBin is at least twice as
203 * slow as in a regular list, but given that N cannot exceed
204 * (1<<64) (before running out of addresses) this bounds search
205 * steps, lock hold times, etc, to reasonable constants (roughly
206 * 100 nodes inspected per operation worst case) so long as keys
207 * are Comparable (which is very common -- String, Long, etc).
208 * TreeBin nodes (TreeNodes) also maintain the same "next"
209 * traversal pointers as regular nodes, so can be traversed in
210 * iterators in the same way.
211 *
212 * The table is resized when occupancy exceeds a percentage
213 * threshold (nominally, 0.75, but see below). Any thread
214 * noticing an overfull bin may assist in resizing after the
215 * initiating thread allocates and sets up the replacement
216 * array. However, rather than stalling, these other threads may
217 * proceed with insertions etc. The use of TreeBins shields us
218 * from the worst case effects of overfilling while resizes are in
219 * progress. Resizing proceeds by transferring bins, one by one,
220 * from the table to the next table. To enable concurrency, the
221 * next table must be (incrementally) prefilled with place-holders
222 * serving as reverse forwarders to the old table. Because we are
223 * using power-of-two expansion, the elements from each bin must
224 * either stay at same index, or move with a power of two
225 * offset. We eliminate unnecessary node creation by catching
226 * cases where old nodes can be reused because their next fields
227 * won't change. On average, only about one-sixth of them need
228 * cloning when a table doubles. The nodes they replace will be
229 * garbage collectable as soon as they are no longer referenced by
230 * any reader thread that may be in the midst of concurrently
231 * traversing table. Upon transfer, the old table bin contains
232 * only a special forwarding node (with hash field "MOVED") that
233 * contains the next table as its key. On encountering a
234 * forwarding node, access and update operations restart, using
235 * the new table.
236 *
237 * Each bin transfer requires its bin lock, which can stall
238 * waiting for locks while resizing. However, because other
239 * threads can join in and help resize rather than contend for
240 * locks, average aggregate waits become shorter as resizing
241 * progresses. The transfer operation must also ensure that all
242 * accessible bins in both the old and new table are usable by any
243 * traversal. This is arranged by proceeding from the last bin
244 * (table.length - 1) up towards the first. Upon seeing a
245 * forwarding node, traversals (see class Traverser) arrange to
246 * move to the new table without revisiting nodes. However, to
247 * ensure that no intervening nodes are skipped, bin splitting can
248 * only begin after the associated reverse-forwarders are in
249 * place.
250 *
251 * The traversal scheme also applies to partial traversals of
252 * ranges of bins (via an alternate Traverser constructor)
253 * to support partitioned aggregate operations. Also, read-only
254 * operations give up if ever forwarded to a null table, which
255 * provides support for shutdown-style clearing, which is also not
256 * currently implemented.
257 *
258 * Lazy table initialization minimizes footprint until first use,
259 * and also avoids resizings when the first operation is from a
260 * putAll, constructor with map argument, or deserialization.
261 * These cases attempt to override the initial capacity settings,
262 * but harmlessly fail to take effect in cases of races.
263 *
264 * The element count is maintained using a specialization of
265 * LongAdder. We need to incorporate a specialization rather than
266 * just use a LongAdder in order to access implicit
267 * contention-sensing that leads to creation of multiple
268 * CounterCells. The counter mechanics avoid contention on
269 * updates but can encounter cache thrashing if read too
270 * frequently during concurrent access. To avoid reading so often,
271 * resizing under contention is attempted only upon adding to a
272 * bin already holding two or more nodes. Under uniform hash
273 * distributions, the probability of this occurring at threshold
274 * is around 13%, meaning that only about 1 in 8 puts check
275 * threshold (and after resizing, many fewer do so).
276 *
277 * TreeBins use a special form of comparison for search and
278 * related operations (which is the main reason we cannot use
279 * existing collections such as TreeMaps). TreeBins contain
280 * Comparable elements, but may contain others, as well as
281 * elements that are Comparable but not necessarily Comparable
282 * for the same T, so we cannot invoke compareTo among them. To
283 * handle this, the tree is ordered primarily by hash value, then
284 * by Comparable.compareTo order if applicable. On lookup at a
285 * node, if elements are not comparable or compare as 0 then both
286 * left and right children may need to be searched in the case of
287 * tied hash values. (This corresponds to the full list search
288 * that would be necessary if all elements were non-Comparable and
289 * had tied hashes.) The red-black balancing code is updated from
290 * pre-jdk-collections
291 * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
292 * based in turn on Cormen, Leiserson, and Rivest "Introduction to
293 * Algorithms" (CLR).
294 *
295 * TreeBins also require an additional locking mechanism. While
296 * list traversal is always possible by readers even during
297 * updates, tree traversal is not, mainly beause of tree-rotations
298 * that may change the root node and/or its linkages. TreeBins
299 * include a simple read-write lock mechanism parasitic on the
300 * main bin-synchronization strategy: Structural adjustments
301 * associated with an insertion or removal are already bin-locked
302 * (and so cannot conflict with other writers) but must wait for
303 * ongoing readers to finish. Since there can be only one such
304 * waiter, we use a simple scheme using a single "waiter" field to
305 * block writers. However, readers need never block. If the root
306 * lock is held, they proceed along the slow traversal path (via
307 * next-pointers) until the lock becomes available or the list is
308 * exhausted, whichever comes first. These cases are not fast, but
309 * maximize aggregate expected throughput.
310 *
311 * Maintaining API and serialization compatibility with previous
312 * versions of this class introduces several oddities. Mainly: We
313 * leave untouched but unused constructor arguments refering to
314 * concurrencyLevel. We accept a loadFactor constructor argument,
315 * but apply it only to initial table capacity (which is the only
316 * time that we can guarantee to honor it.) We also declare an
317 * unused "Segment" class that is instantiated in minimal form
318 * only when serializing.
319 *
320 * This file is organized to make things a little easier to follow
321 * while reading than they might otherwise: First the main static
322 * declarations and utilities, then fields, then main public
323 * methods (with a few factorings of multiple public methods into
324 * internal ones), then sizing methods, trees, traversers, and
325 * bulk operations.
326 */
327
328 /* ---------------- Constants -------------- */
329
330 /**
331 * The largest possible table capacity. This value must be
332 * exactly 1<<30 to stay within Java array allocation and indexing
333 * bounds for power of two table sizes, and is further required
334 * because the top two bits of 32bit hash fields are used for
335 * control purposes.
336 */
337 private static final int MAXIMUM_CAPACITY = 1 << 30;
338
339 /**
340 * The default initial table capacity. Must be a power of 2
341 * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
342 */
343 private static final int DEFAULT_CAPACITY = 16;
344
345 /**
346 * The largest possible (non-power of two) array size.
347 * Needed by toArray and related methods.
348 */
349 static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
350
351 /**
352 * The default concurrency level for this table. Unused but
353 * defined for compatibility with previous versions of this class.
354 */
355 private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
356
357 /**
358 * The load factor for this table. Overrides of this value in
359 * constructors affect only the initial table capacity. The
360 * actual floating point value isn't normally used -- it is
361 * simpler to use expressions such as {@code n - (n >>> 2)} for
362 * the associated resizing threshold.
363 */
364 private static final float LOAD_FACTOR = 0.75f;
365
366 /**
367 * The bin count threshold for using a tree rather than list for a
368 * bin. Bins are converted to trees when adding an element to a
369 * bin with at least this many nodes. The value must be greater
370 * than 2, and should be at least 8 to mesh with assumptions in
371 * tree removal about conversion back to plain bins upon
372 * shrinkage.
373 */
374 static final int TREEIFY_THRESHOLD = 8;
375
376 /**
377 * The bin count threshold for untreeifying a (split) bin during a
378 * resize operation. Should be less than TREEIFY_THRESHOLD, and at
379 * most 6 to mesh with shrinkage detection under removal.
380 */
381 static final int UNTREEIFY_THRESHOLD = 6;
382
383 /**
384 * The smallest table capacity for which bins may be treeified.
385 * (Otherwise the table is resized if too many nodes in a bin.)
386 * The value should be at least 4 * TREEIFY_THRESHOLD to avoid
387 * conflicts between resizing and treeification thresholds.
388 */
389 static final int MIN_TREEIFY_CAPACITY = 64;
390
391 /**
392 * Minimum number of rebinnings per transfer step. Ranges are
393 * subdivided to allow multiple resizer threads. This value
394 * serves as a lower bound to avoid resizers encountering
395 * excessive memory contention. The value should be at least
396 * DEFAULT_CAPACITY.
397 */
398 private static final int MIN_TRANSFER_STRIDE = 16;
399
400 /*
401 * Encodings for Node hash fields. See above for explanation.
402 */
403 static final int MOVED = 0x8fffffff; // (-1) hash for forwarding nodes
404 static final int TREEBIN = 0x80000000; // hash for heads of treea
405 static final int RESERVED = 0x80000001; // hash for transient reservations
406 static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
407
408 /** Number of CPUS, to place bounds on some sizings */
409 static final int NCPU = Runtime.getRuntime().availableProcessors();
410
411 /** For serialization compatibility. */
412 private static final ObjectStreamField[] serialPersistentFields = {
413 new ObjectStreamField("segments", Segment[].class),
414 new ObjectStreamField("segmentMask", Integer.TYPE),
415 new ObjectStreamField("segmentShift", Integer.TYPE)
416 };
417
418 /* ---------------- Nodes -------------- */
419
420 /**
421 * Key-value entry. This class is never exported out as a
422 * user-mutable Map.Entry (i.e., one supporting setValue; see
423 * MapEntry below), but can be used for read-only traversals used
424 * in bulk tasks. Subclasses of Node with a negativehash field
425 * are special, and contain null keys and values (but are never
426 * exported). Otherwise, keys and vals are never null.
427 */
428 static class Node<K,V> implements Map.Entry<K,V> {
429 final int hash;
430 final K key;
431 volatile V val;
432 Node<K,V> next;
433
434 Node(int hash, K key, V val, Node<K,V> next) {
435 this.hash = hash;
436 this.key = key;
437 this.val = val;
438 this.next = next;
439 }
440
441 public final K getKey() { return key; }
442 public final V getValue() { return val; }
443 public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
444 public final String toString(){ return key + "=" + val; }
445 public final V setValue(V value) {
446 throw new UnsupportedOperationException();
447 }
448
449 public final boolean equals(Object o) {
450 Object k, v, u; Map.Entry<?,?> e;
451 return ((o instanceof Map.Entry) &&
452 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
453 (v = e.getValue()) != null &&
454 (k == key || k.equals(key)) &&
455 (v == (u = val) || v.equals(u)));
456 }
457
458 /**
459 * Virtualized support for map.get(); overridden in subclasses.
460 */
461 Node<K,V> find(int h, Object k) {
462 Node<K,V> e = this;
463 if (k != null) {
464 do {
465 K ek;
466 if (e.hash == h &&
467 ((ek = e.key) == k || (ek != null && k.equals(ek))))
468 return e;
469 } while ((e = e.next) != null);
470 }
471 return null;
472 }
473 }
474
475 /* ---------------- Static utilities -------------- */
476
477 /**
478 * Spreads (XORs) higher bits of hash to lower and also forces top
479 * bit to 0. Because the table uses power-of-two masking, sets of
480 * hashes that vary only in bits above the current mask will
481 * always collide. (Among known examples are sets of Float keys
482 * holding consecutive whole numbers in small tables.) So we
483 * apply a transform that spreads the impact of higher bits
484 * downward. There is a tradeoff between speed, utility, and
485 * quality of bit-spreading. Because many common sets of hashes
486 * are already reasonably distributed (so don't benefit from
487 * spreading), and because we use trees to handle large sets of
488 * collisions in bins, we just XOR some shifted bits in the
489 * cheapest possible way to reduce systematic lossage, as well as
490 * to incorporate impact of the highest bits that would otherwise
491 * never be used in index calculations because of table bounds.
492 */
493 static final int spread(int h) {
494 return (h ^ (h >>> 16)) & HASH_BITS;
495 }
496
497 /**
498 * Returns a power of two table size for the given desired capacity.
499 * See Hackers Delight, sec 3.2
500 */
501 private static final int tableSizeFor(int c) {
502 int n = c - 1;
503 n |= n >>> 1;
504 n |= n >>> 2;
505 n |= n >>> 4;
506 n |= n >>> 8;
507 n |= n >>> 16;
508 return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
509 }
510
511 /**
512 * Returns x's Class if it is of the form "class C implements
513 * Comparable<C>", else null.
514 */
515 static Class<?> comparableClassFor(Object x) {
516 if (x instanceof Comparable) {
517 Class<?> c; Type[] ts, as; Type t; ParameterizedType p;
518 if ((c = x.getClass()) == String.class) // bypass checks
519 return c;
520 if ((ts = c.getGenericInterfaces()) != null) {
521 for (int i = 0; i < ts.length; ++i) {
522 if (((t = ts[i]) instanceof ParameterizedType) &&
523 ((p = (ParameterizedType)t).getRawType() ==
524 Comparable.class) &&
525 (as = p.getActualTypeArguments()) != null &&
526 as.length == 1 && as[0] == c) // type arg is c
527 return c;
528 }
529 }
530 }
531 return null;
532 }
533
534 /**
535 * Returns k.compareTo(x) if x matches kc (k's screened comparable
536 * class), else 0.
537 */
538 @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
539 static int compareComparables(Class<?> kc, Object k, Object x) {
540 return (x == null || x.getClass() != kc ? 0 :
541 ((Comparable)k).compareTo(x));
542 }
543
544 /* ---------------- Table element access -------------- */
545
546 /*
547 * Volatile access methods are used for table elements as well as
548 * elements of in-progress next table while resizing. All uses of
549 * the tab arguments must be null checked by callers. All callers
550 * also paranoically precheck that tab's length is not zero (or an
551 * equivalent check), thus ensuring that any index argument taking
552 * the form of a hash value anded with (length - 1) is a valid
553 * index. Note that, to be correct wrt arbitrary concurrency
554 * errors by users, these checks must operate on local variables,
555 * which accounts for some odd-looking inline assignments below.
556 * Note that calls to setTabAt always occur within locked regions,
557 * and so do not need full volatile semantics, but still require
558 * ordering to maintain concurrent readability.
559 */
560
561 @SuppressWarnings("unchecked")
562 static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) {
563 return (Node<K,V>)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE);
564 }
565
566 static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i,
567 Node<K,V> c, Node<K,V> v) {
568 return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
569 }
570
571 static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) {
572 U.putOrderedObject(tab, ((long)i << ASHIFT) + ABASE, v);
573 }
574
575 /* ---------------- Fields -------------- */
576
577 /**
578 * The array of bins. Lazily initialized upon first insertion.
579 * Size is always a power of two. Accessed directly by iterators.
580 */
581 transient volatile Node<K,V>[] table;
582
583 /**
584 * The next table to use; non-null only while resizing.
585 */
586 private transient volatile Node<K,V>[] nextTable;
587
588 /**
589 * Base counter value, used mainly when there is no contention,
590 * but also as a fallback during table initialization
591 * races. Updated via CAS.
592 */
593 private transient volatile long baseCount;
594
595 /**
596 * Table initialization and resizing control. When negative, the
597 * table is being initialized or resized: -1 for initialization,
598 * else -(1 + the number of active resizing threads). Otherwise,
599 * when table is null, holds the initial table size to use upon
600 * creation, or 0 for default. After initialization, holds the
601 * next element count value upon which to resize the table.
602 */
603 private transient volatile int sizeCtl;
604
605 /**
606 * The next table index (plus one) to split while resizing.
607 */
608 private transient volatile int transferIndex;
609
610 /**
611 * The least available table index to split while resizing.
612 */
613 private transient volatile int transferOrigin;
614
615 /**
616 * Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
617 */
618 private transient volatile int cellsBusy;
619
620 /**
621 * Table of counter cells. When non-null, size is a power of 2.
622 */
623 private transient volatile CounterCell[] counterCells;
624
625 // views
626 private transient KeySetView<K,V> keySet;
627 private transient ValuesView<K,V> values;
628 private transient EntrySetView<K,V> entrySet;
629
630
631 /* ---------------- Public operations -------------- */
632
633 /**
634 * Creates a new, empty map with the default initial table size (16).
635 */
636 public ConcurrentHashMap() {
637 }
638
639 /**
640 * Creates a new, empty map with an initial table size
641 * accommodating the specified number of elements without the need
642 * to dynamically resize.
643 *
644 * @param initialCapacity The implementation performs internal
645 * sizing to accommodate this many elements.
646 * @throws IllegalArgumentException if the initial capacity of
647 * elements is negative
648 */
649 public ConcurrentHashMap(int initialCapacity) {
650 if (initialCapacity < 0)
651 throw new IllegalArgumentException();
652 int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
653 MAXIMUM_CAPACITY :
654 tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
655 this.sizeCtl = cap;
656 }
657
658 /**
659 * Creates a new map with the same mappings as the given map.
660 *
661 * @param m the map
662 */
663 public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
664 this.sizeCtl = DEFAULT_CAPACITY;
665 putAll(m);
666 }
667
668 /**
669 * Creates a new, empty map with an initial table size based on
670 * the given number of elements ({@code initialCapacity}) and
671 * initial table density ({@code loadFactor}).
672 *
673 * @param initialCapacity the initial capacity. The implementation
674 * performs internal sizing to accommodate this many elements,
675 * given the specified load factor.
676 * @param loadFactor the load factor (table density) for
677 * establishing the initial table size
678 * @throws IllegalArgumentException if the initial capacity of
679 * elements is negative or the load factor is nonpositive
680 *
681 * @since 1.6
682 */
683 public ConcurrentHashMap(int initialCapacity, float loadFactor) {
684 this(initialCapacity, loadFactor, 1);
685 }
686
687 /**
688 * Creates a new, empty map with an initial table size based on
689 * the given number of elements ({@code initialCapacity}), table
690 * density ({@code loadFactor}), and number of concurrently
691 * updating threads ({@code concurrencyLevel}).
692 *
693 * @param initialCapacity the initial capacity. The implementation
694 * performs internal sizing to accommodate this many elements,
695 * given the specified load factor.
696 * @param loadFactor the load factor (table density) for
697 * establishing the initial table size
698 * @param concurrencyLevel the estimated number of concurrently
699 * updating threads. The implementation may use this value as
700 * a sizing hint.
701 * @throws IllegalArgumentException if the initial capacity is
702 * negative or the load factor or concurrencyLevel are
703 * nonpositive
704 */
705 public ConcurrentHashMap(int initialCapacity,
706 float loadFactor, int concurrencyLevel) {
707 if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
708 throw new IllegalArgumentException();
709 if (initialCapacity < concurrencyLevel) // Use at least as many bins
710 initialCapacity = concurrencyLevel; // as estimated threads
711 long size = (long)(1.0 + (long)initialCapacity / loadFactor);
712 int cap = (size >= (long)MAXIMUM_CAPACITY) ?
713 MAXIMUM_CAPACITY : tableSizeFor((int)size);
714 this.sizeCtl = cap;
715 }
716
717 // Original (since JDK1.2) Map methods
718
719 /**
720 * {@inheritDoc}
721 */
722 public int size() {
723 long n = sumCount();
724 return ((n < 0L) ? 0 :
725 (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
726 (int)n);
727 }
728
729 /**
730 * {@inheritDoc}
731 */
732 public boolean isEmpty() {
733 return sumCount() <= 0L; // ignore transient negative values
734 }
735
736 /**
737 * Returns the value to which the specified key is mapped,
738 * or {@code null} if this map contains no mapping for the key.
739 *
740 * <p>More formally, if this map contains a mapping from a key
741 * {@code k} to a value {@code v} such that {@code key.equals(k)},
742 * then this method returns {@code v}; otherwise it returns
743 * {@code null}. (There can be at most one such mapping.)
744 *
745 * @throws NullPointerException if the specified key is null
746 */
747 public V get(Object key) {
748 Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
749 int h = spread(key.hashCode());
750 if ((tab = table) != null && (n = tab.length) > 0 &&
751 (e = tabAt(tab, (n - 1) & h)) != null) {
752 if ((eh = e.hash) == h) {
753 if ((ek = e.key) == key || (ek != null && key.equals(ek)))
754 return e.val;
755 }
756 else if (eh < 0)
757 return (p = e.find(h, key)) != null ? p.val : null;
758 while ((e = e.next) != null) {
759 if (e.hash == h &&
760 ((ek = e.key) == key || (ek != null && key.equals(ek))))
761 return e.val;
762 }
763 }
764 return null;
765 }
766
767 /**
768 * Tests if the specified object is a key in this table.
769 *
770 * @param key possible key
771 * @return {@code true} if and only if the specified object
772 * is a key in this table, as determined by the
773 * {@code equals} method; {@code false} otherwise
774 * @throws NullPointerException if the specified key is null
775 */
776 public boolean containsKey(Object key) {
777 return get(key) != null;
778 }
779
780 /**
781 * Returns {@code true} if this map maps one or more keys to the
782 * specified value. Note: This method may require a full traversal
783 * of the map, and is much slower than method {@code containsKey}.
784 *
785 * @param value value whose presence in this map is to be tested
786 * @return {@code true} if this map maps one or more keys to the
787 * specified value
788 * @throws NullPointerException if the specified value is null
789 */
790 public boolean containsValue(Object value) {
791 if (value == null)
792 throw new NullPointerException();
793 Node<K,V>[] t;
794 if ((t = table) != null) {
795 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
796 for (Node<K,V> p; (p = it.advance()) != null; ) {
797 V v;
798 if ((v = p.val) == value || (v != null && value.equals(v)))
799 return true;
800 }
801 }
802 return false;
803 }
804
805 /**
806 * Maps the specified key to the specified value in this table.
807 * Neither the key nor the value can be null.
808 *
809 * <p>The value can be retrieved by calling the {@code get} method
810 * with a key that is equal to the original key.
811 *
812 * @param key key with which the specified value is to be associated
813 * @param value value to be associated with the specified key
814 * @return the previous value associated with {@code key}, or
815 * {@code null} if there was no mapping for {@code key}
816 * @throws NullPointerException if the specified key or value is null
817 */
818 public V put(K key, V value) {
819 return putVal(key, value, false);
820 }
821
822 /** Implementation for put and putIfAbsent */
823 final V putVal(K key, V value, boolean onlyIfAbsent) {
824 if (key == null || value == null) throw new NullPointerException();
825 int hash = spread(key.hashCode());
826 int binCount = 0;
827 for (Node<K,V>[] tab = table;;) {
828 Node<K,V> f; int n, i, fh;
829 if (tab == null || (n = tab.length) == 0)
830 tab = initTable();
831 else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
832 if (casTabAt(tab, i, null,
833 new Node<K,V>(hash, key, value, null)))
834 break; // no lock when adding to empty bin
835 }
836 else if ((fh = f.hash) == MOVED)
837 tab = helpTransfer(tab, f);
838 else {
839 V oldVal = null;
840 synchronized (f) {
841 if (tabAt(tab, i) == f) {
842 if (fh >= 0) {
843 binCount = 1;
844 for (Node<K,V> e = f;; ++binCount) {
845 K ek;
846 if (e.hash == hash &&
847 ((ek = e.key) == key ||
848 (ek != null && key.equals(ek)))) {
849 oldVal = e.val;
850 if (!onlyIfAbsent)
851 e.val = value;
852 break;
853 }
854 Node<K,V> pred = e;
855 if ((e = e.next) == null) {
856 pred.next = new Node<K,V>(hash, key,
857 value, null);
858 break;
859 }
860 }
861 }
862 else if (f instanceof TreeBin) {
863 Node<K,V> p;
864 binCount = 2;
865 if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
866 value)) != null) {
867 oldVal = p.val;
868 if (!onlyIfAbsent)
869 p.val = value;
870 }
871 }
872 }
873 }
874 if (binCount != 0) {
875 if (binCount >= TREEIFY_THRESHOLD)
876 treeifyBin(tab, i);
877 if (oldVal != null)
878 return oldVal;
879 break;
880 }
881 }
882 }
883 addCount(1L, binCount);
884 return null;
885 }
886
887 /**
888 * Copies all of the mappings from the specified map to this one.
889 * These mappings replace any mappings that this map had for any of the
890 * keys currently in the specified map.
891 *
892 * @param m mappings to be stored in this map
893 */
894 public void putAll(Map<? extends K, ? extends V> m) {
895 tryPresize(m.size());
896 for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
897 putVal(e.getKey(), e.getValue(), false);
898 }
899
900 /**
901 * Removes the key (and its corresponding value) from this map.
902 * This method does nothing if the key is not in the map.
903 *
904 * @param key the key that needs to be removed
905 * @return the previous value associated with {@code key}, or
906 * {@code null} if there was no mapping for {@code key}
907 * @throws NullPointerException if the specified key is null
908 */
909 public V remove(Object key) {
910 return replaceNode(key, null, null);
911 }
912
913 /**
914 * Implementation for the four public remove/replace methods:
915 * Replaces node value with v, conditional upon match of cv if
916 * non-null. If resulting value is null, delete.
917 */
918 final V replaceNode(Object key, V value, Object cv) {
919 int hash = spread(key.hashCode());
920 for (Node<K,V>[] tab = table;;) {
921 Node<K,V> f; int n, i, fh;
922 if (tab == null || (n = tab.length) == 0 ||
923 (f = tabAt(tab, i = (n - 1) & hash)) == null)
924 break;
925 else if ((fh = f.hash) == MOVED)
926 tab = helpTransfer(tab, f);
927 else {
928 V oldVal = null;
929 boolean validated = false;
930 synchronized (f) {
931 if (tabAt(tab, i) == f) {
932 if (fh >= 0) {
933 validated = true;
934 for (Node<K,V> e = f, pred = null;;) {
935 K ek;
936 if (e.hash == hash &&
937 ((ek = e.key) == key ||
938 (ek != null && key.equals(ek)))) {
939 V ev = e.val;
940 if (cv == null || cv == ev ||
941 (ev != null && cv.equals(ev))) {
942 oldVal = ev;
943 if (value != null)
944 e.val = value;
945 else if (pred != null)
946 pred.next = e.next;
947 else
948 setTabAt(tab, i, e.next);
949 }
950 break;
951 }
952 pred = e;
953 if ((e = e.next) == null)
954 break;
955 }
956 }
957 else if (f instanceof TreeBin) {
958 validated = true;
959 TreeBin<K,V> t = (TreeBin<K,V>)f;
960 TreeNode<K,V> r, p;
961 if ((r = t.root) != null &&
962 (p = r.findTreeNode(hash, key, null)) != null) {
963 V pv = p.val;
964 if (cv == null || cv == pv ||
965 (pv != null && cv.equals(pv))) {
966 oldVal = pv;
967 if (value != null)
968 p.val = value;
969 else if (t.removeTreeNode(p))
970 setTabAt(tab, i, untreeify(t.first));
971 }
972 }
973 }
974 }
975 }
976 if (validated) {
977 if (oldVal != null) {
978 if (value == null)
979 addCount(-1L, -1);
980 return oldVal;
981 }
982 break;
983 }
984 }
985 }
986 return null;
987 }
988
989 /**
990 * Removes all of the mappings from this map.
991 */
992 public void clear() {
993 long delta = 0L; // negative number of deletions
994 int i = 0;
995 Node<K,V>[] tab = table;
996 while (tab != null && i < tab.length) {
997 int fh;
998 Node<K,V> f = tabAt(tab, i);
999 if (f == null)
1000 ++i;
1001 else if ((fh = f.hash) == MOVED) {
1002 tab = helpTransfer(tab, f);
1003 i = 0; // restart
1004 }
1005 else {
1006 synchronized (f) {
1007 if (tabAt(tab, i) == f) {
1008 Node<K,V> p = (fh >= 0 ? f :
1009 (f instanceof TreeBin) ?
1010 ((TreeBin<K,V>)f).first : null);
1011 while (p != null) {
1012 --delta;
1013 p = p.next;
1014 }
1015 setTabAt(tab, i++, null);
1016 }
1017 }
1018 }
1019 }
1020 if (delta != 0L)
1021 addCount(delta, -1);
1022 }
1023
1024 /**
1025 * Returns a {@link Set} view of the keys contained in this map.
1026 * The set is backed by the map, so changes to the map are
1027 * reflected in the set, and vice-versa. The set supports element
1028 * removal, which removes the corresponding mapping from this map,
1029 * via the {@code Iterator.remove}, {@code Set.remove},
1030 * {@code removeAll}, {@code retainAll}, and {@code clear}
1031 * operations. It does not support the {@code add} or
1032 * {@code addAll} operations.
1033 *
1034 * <p>The view's {@code iterator} is a "weakly consistent" iterator
1035 * that will never throw {@link ConcurrentModificationException},
1036 * and guarantees to traverse elements as they existed upon
1037 * construction of the iterator, and may (but is not guaranteed to)
1038 * reflect any modifications subsequent to construction.
1039 *
1040 * @return the set view
1041 */
1042 public KeySetView<K,V> keySet() {
1043 KeySetView<K,V> ks;
1044 return (ks = keySet) != null ? ks : (keySet = new KeySetView<K,V>(this, null));
1045 }
1046
1047 /**
1048 * Returns a {@link Collection} view of the values contained in this map.
1049 * The collection is backed by the map, so changes to the map are
1050 * reflected in the collection, and vice-versa. The collection
1051 * supports element removal, which removes the corresponding
1052 * mapping from this map, via the {@code Iterator.remove},
1053 * {@code Collection.remove}, {@code removeAll},
1054 * {@code retainAll}, and {@code clear} operations. It does not
1055 * support the {@code add} or {@code addAll} operations.
1056 *
1057 * <p>The view's {@code iterator} is a "weakly consistent" iterator
1058 * that will never throw {@link ConcurrentModificationException},
1059 * and guarantees to traverse elements as they existed upon
1060 * construction of the iterator, and may (but is not guaranteed to)
1061 * reflect any modifications subsequent to construction.
1062 *
1063 * @return the collection view
1064 */
1065 public Collection<V> values() {
1066 ValuesView<K,V> vs;
1067 return (vs = values) != null ? vs : (values = new ValuesView<K,V>(this));
1068 }
1069
1070 /**
1071 * Returns a {@link Set} view of the mappings contained in this map.
1072 * The set is backed by the map, so changes to the map are
1073 * reflected in the set, and vice-versa. The set supports element
1074 * removal, which removes the corresponding mapping from the map,
1075 * via the {@code Iterator.remove}, {@code Set.remove},
1076 * {@code removeAll}, {@code retainAll}, and {@code clear}
1077 * operations.
1078 *
1079 * <p>The view's {@code iterator} is a "weakly consistent" iterator
1080 * that will never throw {@link ConcurrentModificationException},
1081 * and guarantees to traverse elements as they existed upon
1082 * construction of the iterator, and may (but is not guaranteed to)
1083 * reflect any modifications subsequent to construction.
1084 *
1085 * @return the set view
1086 */
1087 public Set<Map.Entry<K,V>> entrySet() {
1088 EntrySetView<K,V> es;
1089 return (es = entrySet) != null ? es : (entrySet = new EntrySetView<K,V>(this));
1090 }
1091
1092 /**
1093 * Returns the hash code value for this {@link Map}, i.e.,
1094 * the sum of, for each key-value pair in the map,
1095 * {@code key.hashCode() ^ value.hashCode()}.
1096 *
1097 * @return the hash code value for this map
1098 */
1099 public int hashCode() {
1100 int h = 0;
1101 Node<K,V>[] t;
1102 if ((t = table) != null) {
1103 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1104 for (Node<K,V> p; (p = it.advance()) != null; )
1105 h += p.key.hashCode() ^ p.val.hashCode();
1106 }
1107 return h;
1108 }
1109
1110 /**
1111 * Returns a string representation of this map. The string
1112 * representation consists of a list of key-value mappings (in no
1113 * particular order) enclosed in braces ("{@code {}}"). Adjacent
1114 * mappings are separated by the characters {@code ", "} (comma
1115 * and space). Each key-value mapping is rendered as the key
1116 * followed by an equals sign ("{@code =}") followed by the
1117 * associated value.
1118 *
1119 * @return a string representation of this map
1120 */
1121 public String toString() {
1122 Node<K,V>[] t;
1123 int f = (t = table) == null ? 0 : t.length;
1124 Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1125 StringBuilder sb = new StringBuilder();
1126 sb.append('{');
1127 Node<K,V> p;
1128 if ((p = it.advance()) != null) {
1129 for (;;) {
1130 K k = p.key;
1131 V v = p.val;
1132 sb.append(k == this ? "(this Map)" : k);
1133 sb.append('=');
1134 sb.append(v == this ? "(this Map)" : v);
1135 if ((p = it.advance()) == null)
1136 break;
1137 sb.append(',').append(' ');
1138 }
1139 }
1140 return sb.append('}').toString();
1141 }
1142
1143 /**
1144 * Compares the specified object with this map for equality.
1145 * Returns {@code true} if the given object is a map with the same
1146 * mappings as this map. This operation may return misleading
1147 * results if either map is concurrently modified during execution
1148 * of this method.
1149 *
1150 * @param o object to be compared for equality with this map
1151 * @return {@code true} if the specified object is equal to this map
1152 */
1153 public boolean equals(Object o) {
1154 if (o != this) {
1155 if (!(o instanceof Map))
1156 return false;
1157 Map<?,?> m = (Map<?,?>) o;
1158 Node<K,V>[] t;
1159 int f = (t = table) == null ? 0 : t.length;
1160 Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1161 for (Node<K,V> p; (p = it.advance()) != null; ) {
1162 V val = p.val;
1163 Object v = m.get(p.key);
1164 if (v == null || (v != val && !v.equals(val)))
1165 return false;
1166 }
1167 for (Map.Entry<?,?> e : m.entrySet()) {
1168 Object mk, mv, v;
1169 if ((mk = e.getKey()) == null ||
1170 (mv = e.getValue()) == null ||
1171 (v = get(mk)) == null ||
1172 (mv != v && !mv.equals(v)))
1173 return false;
1174 }
1175 }
1176 return true;
1177 }
1178
1179 /**
1180 * Stripped-down version of helper class used in previous version,
1181 * declared for the sake of serialization compatibility
1182 */
1183 static class Segment<K,V> extends ReentrantLock implements Serializable {
1184 private static final long serialVersionUID = 2249069246763182397L;
1185 final float loadFactor;
1186 Segment(float lf) { this.loadFactor = lf; }
1187 }
1188
1189 /**
1190 * Saves the state of the {@code ConcurrentHashMap} instance to a
1191 * stream (i.e., serializes it).
1192 * @param s the stream
1193 * @serialData
1194 * the key (Object) and value (Object)
1195 * for each key-value mapping, followed by a null pair.
1196 * The key-value mappings are emitted in no particular order.
1197 */
1198 private void writeObject(java.io.ObjectOutputStream s)
1199 throws java.io.IOException {
1200 // For serialization compatibility
1201 // Emulate segment calculation from previous version of this class
1202 int sshift = 0;
1203 int ssize = 1;
1204 while (ssize < DEFAULT_CONCURRENCY_LEVEL) {
1205 ++sshift;
1206 ssize <<= 1;
1207 }
1208 int segmentShift = 32 - sshift;
1209 int segmentMask = ssize - 1;
1210 @SuppressWarnings("unchecked") Segment<K,V>[] segments = (Segment<K,V>[])
1211 new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
1212 for (int i = 0; i < segments.length; ++i)
1213 segments[i] = new Segment<K,V>(LOAD_FACTOR);
1214 s.putFields().put("segments", segments);
1215 s.putFields().put("segmentShift", segmentShift);
1216 s.putFields().put("segmentMask", segmentMask);
1217 s.writeFields();
1218
1219 Node<K,V>[] t;
1220 if ((t = table) != null) {
1221 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1222 for (Node<K,V> p; (p = it.advance()) != null; ) {
1223 s.writeObject(p.key);
1224 s.writeObject(p.val);
1225 }
1226 }
1227 s.writeObject(null);
1228 s.writeObject(null);
1229 segments = null; // throw away
1230 }
1231
1232 /**
1233 * Reconstitutes the instance from a stream (that is, deserializes it).
1234 * @param s the stream
1235 */
1236 private void readObject(java.io.ObjectInputStream s)
1237 throws java.io.IOException, ClassNotFoundException {
1238 /*
1239 * To improve performance in typical cases, we create nodes
1240 * while reading, then place in table once size is known.
1241 * However, we must also validate uniqueness and deal with
1242 * overpopulated bins while doing so, which requires
1243 * specialized versions of putVal mechanics.
1244 */
1245 sizeCtl = -1; // force exclusion for table construction
1246 s.defaultReadObject();
1247 long size = 0L;
1248 Node<K,V> p = null;
1249 for (;;) {
1250 @SuppressWarnings("unchecked") K k = (K) s.readObject();
1251 @SuppressWarnings("unchecked") V v = (V) s.readObject();
1252 if (k != null && v != null) {
1253 p = new Node<K,V>(spread(k.hashCode()), k, v, p);
1254 ++size;
1255 }
1256 else
1257 break;
1258 }
1259 if (size == 0L)
1260 sizeCtl = 0;
1261 else {
1262 int n;
1263 if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
1264 n = MAXIMUM_CAPACITY;
1265 else {
1266 int sz = (int)size;
1267 n = tableSizeFor(sz + (sz >>> 1) + 1);
1268 }
1269 @SuppressWarnings({"rawtypes","unchecked"})
1270 Node<K,V>[] tab = (Node<K,V>[])new Node[n];
1271 int mask = n - 1;
1272 long added = 0L;
1273 while (p != null) {
1274 boolean insertAtFront;
1275 Node<K,V> next = p.next, first;
1276 int h = p.hash, j = h & mask;
1277 if ((first = tabAt(tab, j)) == null)
1278 insertAtFront = true;
1279 else {
1280 K k = p.key;
1281 if (first.hash < 0) {
1282 TreeBin<K,V> t = (TreeBin<K,V>)first;
1283 if (t.putTreeVal(h, k, p.val) == null)
1284 ++added;
1285 insertAtFront = false;
1286 }
1287 else {
1288 int binCount = 0;
1289 insertAtFront = true;
1290 Node<K,V> q; K qk;
1291 for (q = first; q != null; q = q.next) {
1292 if (q.hash == h &&
1293 ((qk = q.key) == k ||
1294 (qk != null && k.equals(qk)))) {
1295 insertAtFront = false;
1296 break;
1297 }
1298 ++binCount;
1299 }
1300 if (insertAtFront && binCount >= TREEIFY_THRESHOLD) {
1301 insertAtFront = false;
1302 ++added;
1303 p.next = first;
1304 TreeNode<K,V> hd = null, tl = null;
1305 for (q = p; q != null; q = q.next) {
1306 TreeNode<K,V> t = new TreeNode<K,V>
1307 (q.hash, q.key, q.val, null, null);
1308 if ((t.prev = tl) == null)
1309 hd = t;
1310 else
1311 tl.next = t;
1312 tl = t;
1313 }
1314 setTabAt(tab, j, new TreeBin<K,V>(hd));
1315 }
1316 }
1317 }
1318 if (insertAtFront) {
1319 ++added;
1320 p.next = first;
1321 setTabAt(tab, j, p);
1322 }
1323 p = next;
1324 }
1325 table = tab;
1326 sizeCtl = n - (n >>> 2);
1327 baseCount = added;
1328 }
1329 }
1330
1331 // ConcurrentMap methods
1332
1333 /**
1334 * {@inheritDoc}
1335 *
1336 * @return the previous value associated with the specified key,
1337 * or {@code null} if there was no mapping for the key
1338 * @throws NullPointerException if the specified key or value is null
1339 */
1340 public V putIfAbsent(K key, V value) {
1341 return putVal(key, value, true);
1342 }
1343
1344 /**
1345 * {@inheritDoc}
1346 *
1347 * @throws NullPointerException if the specified key is null
1348 */
1349 public boolean remove(Object key, Object value) {
1350 if (key == null)
1351 throw new NullPointerException();
1352 return value != null && replaceNode(key, null, value) != null;
1353 }
1354
1355 /**
1356 * {@inheritDoc}
1357 *
1358 * @throws NullPointerException if any of the arguments are null
1359 */
1360 public boolean replace(K key, V oldValue, V newValue) {
1361 if (key == null || oldValue == null || newValue == null)
1362 throw new NullPointerException();
1363 return replaceNode(key, newValue, oldValue) != null;
1364 }
1365
1366 /**
1367 * {@inheritDoc}
1368 *
1369 * @return the previous value associated with the specified key,
1370 * or {@code null} if there was no mapping for the key
1371 * @throws NullPointerException if the specified key or value is null
1372 */
1373 public V replace(K key, V value) {
1374 if (key == null || value == null)
1375 throw new NullPointerException();
1376 return replaceNode(key, value, null);
1377 }
1378 // Hashtable legacy methods
1379
1380 /**
1381 * Legacy method testing if some key maps into the specified value
1382 * in this table. This method is identical in functionality to
1383 * {@link #containsValue(Object)}, and exists solely to ensure
1384 * full compatibility with class {@link java.util.Hashtable},
1385 * which supported this method prior to introduction of the
1386 * Java Collections framework.
1387 *
1388 * @param value a value to search for
1389 * @return {@code true} if and only if some key maps to the
1390 * {@code value} argument in this table as
1391 * determined by the {@code equals} method;
1392 * {@code false} otherwise
1393 * @throws NullPointerException if the specified value is null
1394 */
1395 @Deprecated public boolean contains(Object value) {
1396 return containsValue(value);
1397 }
1398
1399 /**
1400 * Returns an enumeration of the keys in this table.
1401 *
1402 * @return an enumeration of the keys in this table
1403 * @see #keySet()
1404 */
1405 public Enumeration<K> keys() {
1406 Node<K,V>[] t;
1407 int f = (t = table) == null ? 0 : t.length;
1408 return new KeyIterator<K,V>(t, f, 0, f, this);
1409 }
1410
1411 /**
1412 * Returns an enumeration of the values in this table.
1413 *
1414 * @return an enumeration of the values in this table
1415 * @see #values()
1416 */
1417 public Enumeration<V> elements() {
1418 Node<K,V>[] t;
1419 int f = (t = table) == null ? 0 : t.length;
1420 return new ValueIterator<K,V>(t, f, 0, f, this);
1421 }
1422
1423 // ConcurrentHashMap-only methods
1424
1425 /**
1426 * Returns the number of mappings. This method should be used
1427 * instead of {@link #size} because a ConcurrentHashMap may
1428 * contain more mappings than can be represented as an int. The
1429 * value returned is an estimate; the actual count may differ if
1430 * there are concurrent insertions or removals.
1431 *
1432 * @return the number of mappings
1433 * @since 1.8
1434 */
1435 public long mappingCount() {
1436 long n = sumCount();
1437 return (n < 0L) ? 0L : n; // ignore transient negative values
1438 }
1439
1440 /**
1441 * Creates a new {@link Set} backed by a ConcurrentHashMap
1442 * from the given type to {@code Boolean.TRUE}.
1443 *
1444 * @return the new set
1445 * @since 1.8
1446 */
1447 public static <K> KeySetView<K,Boolean> newKeySet() {
1448 return new KeySetView<K,Boolean>
1449 (new ConcurrentHashMap<K,Boolean>(), Boolean.TRUE);
1450 }
1451
1452 /**
1453 * Creates a new {@link Set} backed by a ConcurrentHashMap
1454 * from the given type to {@code Boolean.TRUE}.
1455 *
1456 * @param initialCapacity The implementation performs internal
1457 * sizing to accommodate this many elements.
1458 * @throws IllegalArgumentException if the initial capacity of
1459 * elements is negative
1460 * @return the new set
1461 * @since 1.8
1462 */
1463 public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
1464 return new KeySetView<K,Boolean>
1465 (new ConcurrentHashMap<K,Boolean>(initialCapacity), Boolean.TRUE);
1466 }
1467
1468 /**
1469 * Returns a {@link Set} view of the keys in this map, using the
1470 * given common mapped value for any additions (i.e., {@link
1471 * Collection#add} and {@link Collection#addAll(Collection)}).
1472 * This is of course only appropriate if it is acceptable to use
1473 * the same value for all additions from this view.
1474 *
1475 * @param mappedValue the mapped value to use for any additions
1476 * @return the set view
1477 * @throws NullPointerException if the mappedValue is null
1478 */
1479 public KeySetView<K,V> keySet(V mappedValue) {
1480 if (mappedValue == null)
1481 throw new NullPointerException();
1482 return new KeySetView<K,V>(this, mappedValue);
1483 }
1484
1485 /* ---------------- Special Nodes -------------- */
1486
1487 /**
1488 * A node inserted at head of bins during transfer operations.
1489 */
1490 static final class ForwardingNode<K,V> extends Node<K,V> {
1491 final Node<K,V>[] nextTable;
1492 ForwardingNode(Node<K,V>[] tab) {
1493 super(MOVED, null, null, null);
1494 this.nextTable = tab;
1495 }
1496
1497 Node<K,V> find(int h, Object k) {
1498 Node<K,V> e; int n;
1499 Node<K,V>[] tab = nextTable;
1500 if (k != null && tab != null && (n = tab.length) > 0 &&
1501 (e = tabAt(tab, (n - 1) & h)) != null) {
1502 do {
1503 int eh; K ek;
1504 if ((eh = e.hash) == h &&
1505 ((ek = e.key) == k || (ek != null && k.equals(ek))))
1506 return e;
1507 if (eh < 0)
1508 return e.find(h, k);
1509 } while ((e = e.next) != null);
1510 }
1511 return null;
1512 }
1513 }
1514
1515 /**
1516 * A place-holder node used in computeIfAbsent and compute
1517 */
1518 static final class ReservationNode<K,V> extends Node<K,V> {
1519 ReservationNode() {
1520 super(RESERVED, null, null, null);
1521 }
1522
1523 Node<K,V> find(int h, Object k) {
1524 return null;
1525 }
1526 }
1527
1528 /* ---------------- Table Initialization and Resizing -------------- */
1529
1530 /**
1531 * Initializes table, using the size recorded in sizeCtl.
1532 */
1533 private final Node<K,V>[] initTable() {
1534 Node<K,V>[] tab; int sc;
1535 while ((tab = table) == null || tab.length == 0) {
1536 if ((sc = sizeCtl) < 0)
1537 Thread.yield(); // lost initialization race; just spin
1538 else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
1539 try {
1540 if ((tab = table) == null || tab.length == 0) {
1541 int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
1542 @SuppressWarnings({"rawtypes","unchecked"})
1543 Node<K,V>[] nt = (Node<K,V>[])new Node[n];
1544 table = tab = nt;
1545 sc = n - (n >>> 2);
1546 }
1547 } finally {
1548 sizeCtl = sc;
1549 }
1550 break;
1551 }
1552 }
1553 return tab;
1554 }
1555
1556 /**
1557 * Adds to count, and if table is too small and not already
1558 * resizing, initiates transfer. If already resizing, helps
1559 * perform transfer if work is available. Rechecks occupancy
1560 * after a transfer to see if another resize is already needed
1561 * because resizings are lagging additions.
1562 *
1563 * @param x the count to add
1564 * @param check if <0, don't check resize, if <= 1 only check if uncontended
1565 */
1566 private final void addCount(long x, int check) {
1567 CounterCell[] as; long b, s;
1568 if ((as = counterCells) != null ||
1569 !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
1570 CounterHashCode hc; CounterCell a; long v; int m;
1571 boolean uncontended = true;
1572 if ((hc = threadCounterHashCode.get()) == null ||
1573 as == null || (m = as.length - 1) < 0 ||
1574 (a = as[m & hc.code]) == null ||
1575 !(uncontended =
1576 U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
1577 fullAddCount(x, hc, uncontended);
1578 return;
1579 }
1580 if (check <= 1)
1581 return;
1582 s = sumCount();
1583 }
1584 if (check >= 0) {
1585 Node<K,V>[] tab, nt; int sc;
1586 while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
1587 tab.length < MAXIMUM_CAPACITY) {
1588 if (sc < 0) {
1589 if (sc == -1 || transferIndex <= transferOrigin ||
1590 (nt = nextTable) == null)
1591 break;
1592 if (U.compareAndSwapInt(this, SIZECTL, sc, sc - 1))
1593 transfer(tab, nt);
1594 }
1595 else if (U.compareAndSwapInt(this, SIZECTL, sc, -2))
1596 transfer(tab, null);
1597 s = sumCount();
1598 }
1599 }
1600 }
1601
1602 /**
1603 * Helps transfer if a resize is in progress.
1604 */
1605 final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) {
1606 Node<K,V>[] nextTab; int sc;
1607 if ((f instanceof ForwardingNode) &&
1608 (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) {
1609 if (nextTab == nextTable && tab == table &&
1610 transferIndex > transferOrigin && (sc = sizeCtl) < -1 &&
1611 U.compareAndSwapInt(this, SIZECTL, sc, sc - 1))
1612 transfer(tab, nextTab);
1613 return nextTab;
1614 }
1615 return table;
1616 }
1617
1618 /**
1619 * Tries to presize table to accommodate the given number of elements.
1620 *
1621 * @param size number of elements (doesn't need to be perfectly accurate)
1622 */
1623 private final void tryPresize(int size) {
1624 int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
1625 tableSizeFor(size + (size >>> 1) + 1);
1626 int sc;
1627 while ((sc = sizeCtl) >= 0) {
1628 Node<K,V>[] tab = table; int n;
1629 if (tab == null || (n = tab.length) == 0) {
1630 n = (sc > c) ? sc : c;
1631 if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
1632 try {
1633 if (table == tab) {
1634 @SuppressWarnings({"rawtypes","unchecked"})
1635 Node<K,V>[] nt = (Node<K,V>[])new Node[n];
1636 table = nt;
1637 sc = n - (n >>> 2);
1638 }
1639 } finally {
1640 sizeCtl = sc;
1641 }
1642 }
1643 }
1644 else if (c <= sc || n >= MAXIMUM_CAPACITY)
1645 break;
1646 else if (tab == table &&
1647 U.compareAndSwapInt(this, SIZECTL, sc, -2))
1648 transfer(tab, null);
1649 }
1650 }
1651
1652 /**
1653 * Moves and/or copies the nodes in each bin to new table. See
1654 * above for explanation.
1655 */
1656 private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
1657 int n = tab.length, stride;
1658 if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
1659 stride = MIN_TRANSFER_STRIDE; // subdivide range
1660 if (nextTab == null) { // initiating
1661 try {
1662 @SuppressWarnings({"rawtypes","unchecked"})
1663 Node<K,V>[] nt = (Node<K,V>[])new Node[n << 1];
1664 nextTab = nt;
1665 } catch (Throwable ex) { // try to cope with OOME
1666 sizeCtl = Integer.MAX_VALUE;
1667 return;
1668 }
1669 nextTable = nextTab;
1670 transferOrigin = n;
1671 transferIndex = n;
1672 ForwardingNode<K,V> rev = new ForwardingNode<K,V>(tab);
1673 for (int k = n; k > 0;) { // progressively reveal ready slots
1674 int nextk = (k > stride) ? k - stride : 0;
1675 for (int m = nextk; m < k; ++m)
1676 nextTab[m] = rev;
1677 for (int m = n + nextk; m < n + k; ++m)
1678 nextTab[m] = rev;
1679 U.putOrderedInt(this, TRANSFERORIGIN, k = nextk);
1680 }
1681 }
1682 int nextn = nextTab.length;
1683 ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
1684 boolean advance = true;
1685 for (int i = 0, bound = 0;;) {
1686 int nextIndex, nextBound, fh; Node<K,V> f;
1687 while (advance) {
1688 if (--i >= bound)
1689 advance = false;
1690 else if ((nextIndex = transferIndex) <= transferOrigin) {
1691 i = -1;
1692 advance = false;
1693 }
1694 else if (U.compareAndSwapInt
1695 (this, TRANSFERINDEX, nextIndex,
1696 nextBound = (nextIndex > stride ?
1697 nextIndex - stride : 0))) {
1698 bound = nextBound;
1699 i = nextIndex - 1;
1700 advance = false;
1701 }
1702 }
1703 if (i < 0 || i >= n || i + n >= nextn) {
1704 for (int sc;;) {
1705 if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, ++sc)) {
1706 if (sc == -1) {
1707 nextTable = null;
1708 table = nextTab;
1709 sizeCtl = (n << 1) - (n >>> 1);
1710 }
1711 return;
1712 }
1713 }
1714 }
1715 else if ((f = tabAt(tab, i)) == null) {
1716 if (casTabAt(tab, i, null, fwd)) {
1717 setTabAt(nextTab, i, null);
1718 setTabAt(nextTab, i + n, null);
1719 advance = true;
1720 }
1721 }
1722 else if ((fh = f.hash) == MOVED)
1723 advance = true; // already processed
1724 else {
1725 synchronized (f) {
1726 if (tabAt(tab, i) == f) {
1727 Node<K,V> ln, hn;
1728 if (fh >= 0) {
1729 int runBit = fh & n;
1730 Node<K,V> lastRun = f;
1731 for (Node<K,V> p = f.next; p != null; p = p.next) {
1732 int b = p.hash & n;
1733 if (b != runBit) {
1734 runBit = b;
1735 lastRun = p;
1736 }
1737 }
1738 if (runBit == 0) {
1739 ln = lastRun;
1740 hn = null;
1741 }
1742 else {
1743 hn = lastRun;
1744 ln = null;
1745 }
1746 for (Node<K,V> p = f; p != lastRun; p = p.next) {
1747 int ph = p.hash; K pk = p.key; V pv = p.val;
1748 if ((ph & n) == 0)
1749 ln = new Node<K,V>(ph, pk, pv, ln);
1750 else
1751 hn = new Node<K,V>(ph, pk, pv, hn);
1752 }
1753 }
1754 else if (f instanceof TreeBin) {
1755 TreeBin<K,V> t = (TreeBin<K,V>)f;
1756 TreeNode<K,V> lo = null, loTail = null;
1757 TreeNode<K,V> hi = null, hiTail = null;
1758 int lc = 0, hc = 0;
1759 for (Node<K,V> e = t.first; e != null; e = e.next) {
1760 int h = e.hash;
1761 TreeNode<K,V> p = new TreeNode<K,V>
1762 (h, e.key, e.val, null, null);
1763 if ((h & n) == 0) {
1764 if ((p.prev = loTail) == null)
1765 lo = p;
1766 else
1767 loTail.next = p;
1768 loTail = p;
1769 ++lc;
1770 }
1771 else {
1772 if ((p.prev = hiTail) == null)
1773 hi = p;
1774 else
1775 hiTail.next = p;
1776 hiTail = p;
1777 ++hc;
1778 }
1779 }
1780 ln = (lc <= UNTREEIFY_THRESHOLD ? untreeify(lo) :
1781 (hc != 0) ? new TreeBin<K,V>(lo) : t);
1782 hn = (hc <= UNTREEIFY_THRESHOLD ? untreeify(hi) :
1783 (lc != 0) ? new TreeBin<K,V>(hi) : t);
1784 }
1785 else
1786 ln = hn = null;
1787 setTabAt(nextTab, i, ln);
1788 setTabAt(nextTab, i + n, hn);
1789 setTabAt(tab, i, fwd);
1790 advance = true;
1791 }
1792 }
1793 }
1794 }
1795 }
1796
1797 /* ---------------- Conversion from/to TreeBins -------------- */
1798
1799 /**
1800 * Replaces all linked nodes in bin at given index unless table is
1801 * too small, in which case resizes instead.
1802 */
1803 private final void treeifyBin(Node<K,V>[] tab, int index) {
1804 Node<K,V> b; int n, sc;
1805 if (tab != null) {
1806 if ((n = tab.length) < MIN_TREEIFY_CAPACITY) {
1807 if (tab == table && (sc = sizeCtl) >= 0 &&
1808 U.compareAndSwapInt(this, SIZECTL, sc, -2))
1809 transfer(tab, null);
1810 }
1811 else if ((b = tabAt(tab, index)) != null) {
1812 synchronized (b) {
1813 if (tabAt(tab, index) == b) {
1814 TreeNode<K,V> hd = null, tl = null;
1815 for (Node<K,V> e = b; e != null; e = e.next) {
1816 TreeNode<K,V> p =
1817 new TreeNode<K,V>(e.hash, e.key, e.val,
1818 null, null);
1819 if ((p.prev = tl) == null)
1820 hd = p;
1821 else
1822 tl.next = p;
1823 tl = p;
1824 }
1825 setTabAt(tab, index, new TreeBin<K,V>(hd));
1826 }
1827 }
1828 }
1829 }
1830 }
1831
1832 /**
1833 * Returns a list on non-TreeNodes replacing those in given list
1834 */
1835 static <K,V> Node<K,V> untreeify(Node<K,V> b) {
1836 Node<K,V> hd = null, tl = null;
1837 for (Node<K,V> q = b; q != null; q = q.next) {
1838 Node<K,V> p = new Node<K,V>(q.hash, q.key, q.val, null);
1839 if (tl == null)
1840 hd = p;
1841 else
1842 tl.next = p;
1843 tl = p;
1844 }
1845 return hd;
1846 }
1847
1848 /* ---------------- TreeNodes -------------- */
1849
1850 /**
1851 * Nodes for use in TreeBins
1852 */
1853 static final class TreeNode<K,V> extends Node<K,V> {
1854 TreeNode<K,V> parent; // red-black tree links
1855 TreeNode<K,V> left;
1856 TreeNode<K,V> right;
1857 TreeNode<K,V> prev; // needed to unlink next upon deletion
1858 boolean red;
1859
1860 TreeNode(int hash, K key, V val, Node<K,V> next,
1861 TreeNode<K,V> parent) {
1862 super(hash, key, val, next);
1863 this.parent = parent;
1864 }
1865
1866 Node<K,V> find(int h, Object k) {
1867 return findTreeNode(h, k, null);
1868 }
1869
1870 /**
1871 * Returns the TreeNode (or null if not found) for the given key
1872 * starting at given root.
1873 */
1874 final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) {
1875 if (k != null) {
1876 TreeNode<K,V> p = this;
1877 do {
1878 int ph, dir; K pk; TreeNode<K,V> q;
1879 TreeNode<K,V> pl = p.left, pr = p.right;
1880 if ((ph = p.hash) > h)
1881 p = pl;
1882 else if (ph < h)
1883 p = pr;
1884 else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
1885 return p;
1886 else if (pl == null && pr == null)
1887 break;
1888 else if ((kc != null ||
1889 (kc = comparableClassFor(k)) != null) &&
1890 (dir = compareComparables(kc, k, pk)) != 0)
1891 p = (dir < 0) ? pl : pr;
1892 else if (pl == null)
1893 p = pr;
1894 else if (pr == null ||
1895 (q = pr.findTreeNode(h, k, kc)) == null)
1896 p = pl;
1897 else
1898 return q;
1899 } while (p != null);
1900 }
1901 return null;
1902 }
1903 }
1904
1905 /* ---------------- TreeBins -------------- */
1906
1907 /**
1908 * TreeNodes used at the heads of bins. TreeBins do not hold user
1909 * keys or values, but instead point to list of TreeNodes and
1910 * their root. They also maintain a parasitic read-write lock
1911 * forcing writers (who hold bin lock) to wait for readers (who do
1912 * not) to complete before tree restructuring operations.
1913 */
1914 static final class TreeBin<K,V> extends Node<K,V> {
1915 TreeNode<K,V> root;
1916 volatile TreeNode<K,V> first;
1917 volatile Thread waiter;
1918 volatile int lockState;
1919 // values for lockState
1920 static final int WRITER = 1; // set while holding write lock
1921 static final int WAITER = 2; // set when waiting for write lock
1922 static final int READER = 4; // increment value for setting read lock
1923
1924 /**
1925 * Creates bin with initial set of nodes headed by b.
1926 */
1927 TreeBin(TreeNode<K,V> b) {
1928 super(TREEBIN, null, null, null);
1929 this.first = b;
1930 TreeNode<K,V> r = null;
1931 for (TreeNode<K,V> x = b, next; x != null; x = next) {
1932 next = (TreeNode<K,V>)x.next;
1933 x.left = x.right = null;
1934 if (r == null) {
1935 x.parent = null;
1936 x.red = false;
1937 r = x;
1938 }
1939 else {
1940 Object key = x.key;
1941 int hash = x.hash;
1942 Class<?> kc = null;
1943 for (TreeNode<K,V> p = r;;) {
1944 int dir, ph;
1945 if ((ph = p.hash) > hash)
1946 dir = -1;
1947 else if (ph < hash)
1948 dir = 1;
1949 else if ((kc != null ||
1950 (kc = comparableClassFor(key)) != null))
1951 dir = compareComparables(kc, key, p.key);
1952 else
1953 dir = 0;
1954 TreeNode<K,V> xp = p;
1955 if ((p = (dir <= 0) ? p.left : p.right) == null) {
1956 x.parent = xp;
1957 if (dir <= 0)
1958 xp.left = x;
1959 else
1960 xp.right = x;
1961 r = balanceInsertion(r, x);
1962 break;
1963 }
1964 }
1965 }
1966 }
1967 this.root = r;
1968 }
1969
1970 /**
1971 * Acquires write lock for tree restructuring
1972 */
1973 private final void lockRoot() {
1974 if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER))
1975 contendedLock(); // offload to separate method
1976 }
1977
1978 /**
1979 * Releases write lock for tree restructuring
1980 */
1981 private final void unlockRoot() {
1982 lockState = 0;
1983 }
1984
1985 /**
1986 * Possibly blocks awaiting root lock
1987 */
1988 private final void contendedLock() {
1989 boolean waiting = false;
1990 for (int s;;) {
1991 if (((s = lockState) & WRITER) == 0) {
1992 if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) {
1993 if (waiting)
1994 waiter = null;
1995 return;
1996 }
1997 }
1998 else if ((s | WAITER) == 0) {
1999 if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) {
2000 waiting = true;
2001 waiter = Thread.currentThread();
2002 }
2003 }
2004 else if (waiting)
2005 LockSupport.park(this);
2006 }
2007 }
2008
2009 /**
2010 * Returns matching node or null if none. Tries to search
2011 * using tree compareisons from root, but continues linear
2012 * search when lock not available.
2013 */
2014 final Node<K,V> find(int h, Object k) {
2015 if (k != null) {
2016 for (Node<K,V> e = first; e != null; e = e.next) {
2017 int s; K ek;
2018 if (((s = lockState) & (WAITER|WRITER)) != 0) {
2019 if (e.hash == h &&
2020 ((ek = e.key) == k || (ek != null && k.equals(ek))))
2021 return e;
2022 }
2023 else if (U.compareAndSwapInt(this, LOCKSTATE, s,
2024 s + READER)) {
2025 TreeNode<K,V> r, p;
2026 try {
2027 p = ((r = root) == null ? null :
2028 r.findTreeNode(h, k, null));
2029 } finally {
2030
2031 Thread w;
2032 int ls;
2033 do {} while (!U.compareAndSwapInt
2034 (this, LOCKSTATE,
2035 ls = lockState, ls - READER));
2036 if (ls == (READER|WAITER) && (w = waiter) != null)
2037 LockSupport.unpark(w);
2038 }
2039 return p;
2040 }
2041 }
2042 }
2043 return null;
2044 }
2045
2046 /**
2047 * Finds or adds a node.
2048 * @return null if added
2049 */
2050 final TreeNode<K,V> putTreeVal(int h, K k, V v) {
2051 Class<?> kc = null;
2052 for (TreeNode<K,V> p = root;;) {
2053 int dir, ph; K pk; TreeNode<K,V> q, pr;
2054 if (p == null) {
2055 first = root = new TreeNode<K,V>(h, k, v, null, null);
2056 break;
2057 }
2058 else if ((ph = p.hash) > h)
2059 dir = -1;
2060 else if (ph < h)
2061 dir = 1;
2062 else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2063 return p;
2064 else if ((kc == null &&
2065 (kc = comparableClassFor(k)) == null) ||
2066 (dir = compareComparables(kc, k, pk)) == 0) {
2067 if (p.left == null)
2068 dir = 1;
2069 else if ((pr = p.right) == null ||
2070 (q = pr.findTreeNode(h, k, kc)) == null)
2071 dir = -1;
2072 else
2073 return q;
2074 }
2075 TreeNode<K,V> xp = p;
2076 if ((p = (dir < 0) ? p.left : p.right) == null) {
2077 TreeNode<K,V> x, f = first;
2078 first = x = new TreeNode<K,V>(h, k, v, f, xp);
2079 if (f != null)
2080 f.prev = x;
2081 if (dir < 0)
2082 xp.left = x;
2083 else
2084 xp.right = x;
2085 if (!xp.red)
2086 x.red = true;
2087 else {
2088 lockRoot();
2089 try {
2090 root = balanceInsertion(root, x);
2091 } finally {
2092 unlockRoot();
2093 }
2094 }
2095 break;
2096 }
2097 }
2098 assert checkInvariants(root);
2099 return null;
2100 }
2101
2102 /**
2103 * Removes the given node, that must be present before this
2104 * call. This is messier than typical red-black deletion code
2105 * because we cannot swap the contents of an interior node
2106 * with a leaf successor that is pinned by "next" pointers
2107 * that are accessible independently of lock. So instead we
2108 * swap the tree linkages.
2109 *
2110 * @return true if now too small so should be untreeified.
2111 */
2112 final boolean removeTreeNode(TreeNode<K,V> p) {
2113 TreeNode<K,V> next = (TreeNode<K,V>)p.next;
2114 TreeNode<K,V> pred = p.prev; // unlink traversal pointers
2115 TreeNode<K,V> r, rl;
2116 if (pred == null)
2117 first = next;
2118 else
2119 pred.next = next;
2120 if (next != null)
2121 next.prev = pred;
2122 if (first == null) {
2123 root = null;
2124 return true;
2125 }
2126 if ((r = root) == null || r.right == null || // too small
2127 (rl = r.left) == null || rl.left == null)
2128 return true;
2129 lockRoot();
2130 try {
2131 TreeNode<K,V> replacement;
2132 TreeNode<K,V> pl = p.left;
2133 TreeNode<K,V> pr = p.right;
2134 if (pl != null && pr != null) {
2135 TreeNode<K,V> s = pr, sl;
2136 while ((sl = s.left) != null) // find successor
2137 s = sl;
2138 boolean c = s.red; s.red = p.red; p.red = c; // swap colors
2139 TreeNode<K,V> sr = s.right;
2140 TreeNode<K,V> pp = p.parent;
2141 if (s == pr) { // p was s's direct parent
2142 p.parent = s;
2143 s.right = p;
2144 }
2145 else {
2146 TreeNode<K,V> sp = s.parent;
2147 if ((p.parent = sp) != null) {
2148 if (s == sp.left)
2149 sp.left = p;
2150 else
2151 sp.right = p;
2152 }
2153 if ((s.right = pr) != null)
2154 pr.parent = s;
2155 }
2156 p.left = null;
2157 if ((p.right = sr) != null)
2158 sr.parent = p;
2159 if ((s.left = pl) != null)
2160 pl.parent = s;
2161 if ((s.parent = pp) == null)
2162 r = s;
2163 else if (p == pp.left)
2164 pp.left = s;
2165 else
2166 pp.right = s;
2167 if (sr != null)
2168 replacement = sr;
2169 else
2170 replacement = p;
2171 }
2172 else if (pl != null)
2173 replacement = pl;
2174 else if (pr != null)
2175 replacement = pr;
2176 else
2177 replacement = p;
2178 if (replacement != p) {
2179 TreeNode<K,V> pp = replacement.parent = p.parent;
2180 if (pp == null)
2181 r = replacement;
2182 else if (p == pp.left)
2183 pp.left = replacement;
2184 else
2185 pp.right = replacement;
2186 p.left = p.right = p.parent = null;
2187 }
2188
2189 root = (p.red) ? r : balanceDeletion(r, replacement);
2190
2191 if (p == replacement) { // detach pointers
2192 TreeNode<K,V> pp;
2193 if ((pp = p.parent) != null) {
2194 if (p == pp.left)
2195 pp.left = null;
2196 else if (p == pp.right)
2197 pp.right = null;
2198 p.parent = null;
2199 }
2200 }
2201 } finally {
2202 unlockRoot();
2203 }
2204 assert checkInvariants(root);
2205 return false;
2206 }
2207
2208 /* ------------------------------------------------------------ */
2209 // Red-black tree methods, all adapted from CLR
2210
2211 static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
2212 TreeNode<K,V> p) {
2213 TreeNode<K,V> r, pp, rl;
2214 if (p != null && (r = p.right) != null) {
2215 if ((rl = p.right = r.left) != null)
2216 rl.parent = p;
2217 if ((pp = r.parent = p.parent) == null)
2218 (root = r).red = false;
2219 else if (pp.left == p)
2220 pp.left = r;
2221 else
2222 pp.right = r;
2223 r.left = p;
2224 p.parent = r;
2225 }
2226 return root;
2227 }
2228
2229 static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
2230 TreeNode<K,V> p) {
2231 TreeNode<K,V> l, pp, lr;
2232 if (p != null && (l = p.left) != null) {
2233 if ((lr = p.left = l.right) != null)
2234 lr.parent = p;
2235 if ((pp = l.parent = p.parent) == null)
2236 (root = l).red = false;
2237 else if (pp.right == p)
2238 pp.right = l;
2239 else
2240 pp.left = l;
2241 l.right = p;
2242 p.parent = l;
2243 }
2244 return root;
2245 }
2246
2247 static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
2248 TreeNode<K,V> x) {
2249 x.red = true;
2250 for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
2251 if ((xp = x.parent) == null) {
2252 x.red = false;
2253 return x;
2254 }
2255 else if (!xp.red || (xpp = xp.parent) == null)
2256 return root;
2257 if (xp == (xppl = xpp.left)) {
2258 if ((xppr = xpp.right) != null && xppr.red) {
2259 xppr.red = false;
2260 xp.red = false;
2261 xpp.red = true;
2262 x = xpp;
2263 }
2264 else {
2265 if (x == xp.right) {
2266 root = rotateLeft(root, x = xp);
2267 xpp = (xp = x.parent) == null ? null : xp.parent;
2268 }
2269 if (xp != null) {
2270 xp.red = false;
2271 if (xpp != null) {
2272 xpp.red = true;
2273 root = rotateRight(root, xpp);
2274 }
2275 }
2276 }
2277 }
2278 else {
2279 if (xppl != null && xppl.red) {
2280 xppl.red = false;
2281 xp.red = false;
2282 xpp.red = true;
2283 x = xpp;
2284 }
2285 else {
2286 if (x == xp.left) {
2287 root = rotateRight(root, x = xp);
2288 xpp = (xp = x.parent) == null ? null : xp.parent;
2289 }
2290 if (xp != null) {
2291 xp.red = false;
2292 if (xpp != null) {
2293 xpp.red = true;
2294 root = rotateLeft(root, xpp);
2295 }
2296 }
2297 }
2298 }
2299 }
2300 }
2301
2302 static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
2303 TreeNode<K,V> x) {
2304 for (TreeNode<K,V> xp, xpl, xpr;;) {
2305 if (x == null || x == root)
2306 return root;
2307 else if ((xp = x.parent) == null) {
2308 x.red = false;
2309 return x;
2310 }
2311 else if (x.red) {
2312 x.red = false;
2313 return root;
2314 }
2315 else if ((xpl = xp.left) == x) {
2316 if ((xpr = xp.right) != null && xpr.red) {
2317 xpr.red = false;
2318 xp.red = true;
2319 root = rotateLeft(root, xp);
2320 xpr = (xp = x.parent) == null ? null : xp.right;
2321 }
2322 if (xpr == null)
2323 x = xp;
2324 else {
2325 TreeNode<K,V> sl = xpr.left, sr = xpr.right;
2326 if ((sr == null || !sr.red) &&
2327 (sl == null || !sl.red)) {
2328 xpr.red = true;
2329 x = xp;
2330 }
2331 else {
2332 if (sr == null || !sr.red) {
2333 if (sl != null)
2334 sl.red = false;
2335 xpr.red = true;
2336 root = rotateRight(root, xpr);
2337 xpr = (xp = x.parent) == null ?
2338 null : xp.right;
2339 }
2340 if (xpr != null) {
2341 xpr.red = (xp == null) ? false : xp.red;
2342 if ((sr = xpr.right) != null)
2343 sr.red = false;
2344 }
2345 if (xp != null) {
2346 xp.red = false;
2347 root = rotateLeft(root, xp);
2348 }
2349 x = root;
2350 }
2351 }
2352 }
2353 else { // symmetric
2354 if (xpl != null && xpl.red) {
2355 xpl.red = false;
2356 xp.red = true;
2357 root = rotateRight(root, xp);
2358 xpl = (xp = x.parent) == null ? null : xp.left;
2359 }
2360 if (xpl == null)
2361 x = xp;
2362 else {
2363 TreeNode<K,V> sl = xpl.left, sr = xpl.right;
2364 if ((sl == null || !sl.red) &&
2365 (sr == null || !sr.red)) {
2366 xpl.red = true;
2367 x = xp;
2368 }
2369 else {
2370 if (sl == null || !sl.red) {
2371 if (sr != null)
2372 sr.red = false;
2373 xpl.red = true;
2374 root = rotateLeft(root, xpl);
2375 xpl = (xp = x.parent) == null ?
2376 null : xp.left;
2377 }
2378 if (xpl != null) {
2379 xpl.red = (xp == null) ? false : xp.red;
2380 if ((sl = xpl.left) != null)
2381 sl.red = false;
2382 }
2383 if (xp != null) {
2384 xp.red = false;
2385 root = rotateRight(root, xp);
2386 }
2387 x = root;
2388 }
2389 }
2390 }
2391 }
2392 }
2393
2394 /**
2395 * Recursive invariant check
2396 */
2397 static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
2398 TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
2399 tb = t.prev, tn = (TreeNode<K,V>)t.next;
2400 if (tb != null && tb.next != t)
2401 return false;
2402 if (tn != null && tn.prev != t)
2403 return false;
2404 if (tp != null && t != tp.left && t != tp.right)
2405 return false;
2406 if (tl != null && (tl.parent != t || tl.hash > t.hash))
2407 return false;
2408 if (tr != null && (tr.parent != t || tr.hash < t.hash))
2409 return false;
2410 if (t.red && tl != null && tl.red && tr != null && tr.red)
2411 return false;
2412 if (tl != null && !checkInvariants(tl))
2413 return false;
2414 if (tr != null && !checkInvariants(tr))
2415 return false;
2416 return true;
2417 }
2418
2419 private static final sun.misc.Unsafe U;
2420 private static final long LOCKSTATE;
2421 static {
2422 try {
2423 U = sun.misc.Unsafe.getUnsafe();
2424 Class<?> k = TreeBin.class;
2425 LOCKSTATE = U.objectFieldOffset
2426 (k.getDeclaredField("lockState"));
2427 } catch (Exception e) {
2428 throw new Error(e);
2429 }
2430 }
2431 }
2432
2433 /* ----------------Table Traversal -------------- */
2434
2435 /**
2436 * Encapsulates traversal for methods such as containsValue; also
2437 * serves as a base class for other iterators.
2438 *
2439 * Method advance visits once each still-valid node that was
2440 * reachable upon iterator construction. It might miss some that
2441 * were added to a bin after the bin was visited, which is OK wrt
2442 * consistency guarantees. Maintaining this property in the face
2443 * of possible ongoing resizes requires a fair amount of
2444 * bookkeeping state that is difficult to optimize away amidst
2445 * volatile accesses. Even so, traversal maintains reasonable
2446 * throughput.
2447 *
2448 * Normally, iteration proceeds bin-by-bin traversing lists.
2449 * However, if the table has been resized, then all future steps
2450 * must traverse both the bin at the current index as well as at
2451 * (index + baseSize); and so on for further resizings. To
2452 * paranoically cope with potential sharing by users of iterators
2453 * across threads, iteration terminates if a bounds checks fails
2454 * for a table read.
2455 */
2456 static class Traverser<K,V> {
2457 Node<K,V>[] tab; // current table; updated if resized
2458 Node<K,V> next; // the next entry to use
2459 int index; // index of bin to use next
2460 int baseIndex; // current index of initial table
2461 int baseLimit; // index bound for initial table
2462 final int baseSize; // initial table size
2463
2464 Traverser(Node<K,V>[] tab, int size, int index, int limit) {
2465 this.tab = tab;
2466 this.baseSize = size;
2467 this.baseIndex = this.index = index;
2468 this.baseLimit = limit;
2469 this.next = null;
2470 }
2471
2472 /**
2473 * Advances if possible, returning next valid node, or null if none.
2474 */
2475 final Node<K,V> advance() {
2476 Node<K,V> e;
2477 if ((e = next) != null)
2478 e = e.next;
2479 for (;;) {
2480 Node<K,V>[] t; int i, n; K ek; // must use locals in checks
2481 if (e != null)
2482 return next = e;
2483 if (baseIndex >= baseLimit || (t = tab) == null ||
2484 (n = t.length) <= (i = index) || i < 0)
2485 return next = null;
2486 if ((e = tabAt(t, index)) != null && e.hash < 0) {
2487 if (e instanceof ForwardingNode) {
2488 tab = ((ForwardingNode<K,V>)e).nextTable;
2489 e = null;
2490 continue;
2491 }
2492 else if (e instanceof TreeBin)
2493 e = ((TreeBin<K,V>)e).first;
2494 else
2495 e = null;
2496 }
2497 if ((index += baseSize) >= n)
2498 index = ++baseIndex; // visit upper slots if present
2499 }
2500 }
2501 }
2502
2503 /**
2504 * Base of key, value, and entry Iterators. Adds fields to
2505 * Traverser to support iterator.remove
2506 */
2507 static class BaseIterator<K,V> extends Traverser<K,V> {
2508 final ConcurrentHashMap<K,V> map;
2509 Node<K,V> lastReturned;
2510 BaseIterator(Node<K,V>[] tab, int size, int index, int limit,
2511 ConcurrentHashMap<K,V> map) {
2512 super(tab, size, index, limit);
2513 this.map = map;
2514 advance();
2515 }
2516
2517 public final boolean hasNext() { return next != null; }
2518 public final boolean hasMoreElements() { return next != null; }
2519
2520 public final void remove() {
2521 Node<K,V> p;
2522 if ((p = lastReturned) == null)
2523 throw new IllegalStateException();
2524 lastReturned = null;
2525 map.replaceNode(p.key, null, null);
2526 }
2527 }
2528
2529 static final class KeyIterator<K,V> extends BaseIterator<K,V>
2530 implements Iterator<K>, Enumeration<K> {
2531 KeyIterator(Node<K,V>[] tab, int index, int size, int limit,
2532 ConcurrentHashMap<K,V> map) {
2533 super(tab, index, size, limit, map);
2534 }
2535
2536 public final K next() {
2537 Node<K,V> p;
2538 if ((p = next) == null)
2539 throw new NoSuchElementException();
2540 K k = p.key;
2541 lastReturned = p;
2542 advance();
2543 return k;
2544 }
2545
2546 public final K nextElement() { return next(); }
2547 }
2548
2549 static final class ValueIterator<K,V> extends BaseIterator<K,V>
2550 implements Iterator<V>, Enumeration<V> {
2551 ValueIterator(Node<K,V>[] tab, int index, int size, int limit,
2552 ConcurrentHashMap<K,V> map) {
2553 super(tab, index, size, limit, map);
2554 }
2555
2556 public final V next() {
2557 Node<K,V> p;
2558 if ((p = next) == null)
2559 throw new NoSuchElementException();
2560 V v = p.val;
2561 lastReturned = p;
2562 advance();
2563 return v;
2564 }
2565
2566 public final V nextElement() { return next(); }
2567 }
2568
2569 static final class EntryIterator<K,V> extends BaseIterator<K,V>
2570 implements Iterator<Map.Entry<K,V>> {
2571 EntryIterator(Node<K,V>[] tab, int index, int size, int limit,
2572 ConcurrentHashMap<K,V> map) {
2573 super(tab, index, size, limit, map);
2574 }
2575
2576 public final Map.Entry<K,V> next() {
2577 Node<K,V> p;
2578 if ((p = next) == null)
2579 throw new NoSuchElementException();
2580 K k = p.key;
2581 V v = p.val;
2582 lastReturned = p;
2583 advance();
2584 return new MapEntry<K,V>(k, v, map);
2585 }
2586 }
2587
2588 /**
2589 * Exported Entry for EntryIterator
2590 */
2591 static final class MapEntry<K,V> implements Map.Entry<K,V> {
2592 final K key; // non-null
2593 V val; // non-null
2594 final ConcurrentHashMap<K,V> map;
2595 MapEntry(K key, V val, ConcurrentHashMap<K,V> map) {
2596 this.key = key;
2597 this.val = val;
2598 this.map = map;
2599 }
2600 public K getKey() { return key; }
2601 public V getValue() { return val; }
2602 public int hashCode() { return key.hashCode() ^ val.hashCode(); }
2603 public String toString() { return key + "=" + val; }
2604
2605 public boolean equals(Object o) {
2606 Object k, v; Map.Entry<?,?> e;
2607 return ((o instanceof Map.Entry) &&
2608 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
2609 (v = e.getValue()) != null &&
2610 (k == key || k.equals(key)) &&
2611 (v == val || v.equals(val)));
2612 }
2613
2614 /**
2615 * Sets our entry's value and writes through to the map. The
2616 * value to return is somewhat arbitrary here. Since we do not
2617 * necessarily track asynchronous changes, the most recent
2618 * "previous" value could be different from what we return (or
2619 * could even have been removed, in which case the put will
2620 * re-establish). We do not and cannot guarantee more.
2621 */
2622 public V setValue(V value) {
2623 if (value == null) throw new NullPointerException();
2624 V v = val;
2625 val = value;
2626 map.put(key, value);
2627 return v;
2628 }
2629 }
2630
2631 /* ----------------Views -------------- */
2632
2633 /**
2634 * Base class for views.
2635 */
2636 abstract static class CollectionView<K,V,E>
2637 implements Collection<E>, java.io.Serializable {
2638 private static final long serialVersionUID = 7249069246763182397L;
2639 final ConcurrentHashMap<K,V> map;
2640 CollectionView(ConcurrentHashMap<K,V> map) { this.map = map; }
2641
2642 /**
2643 * Returns the map backing this view.
2644 *
2645 * @return the map backing this view
2646 */
2647 public ConcurrentHashMap<K,V> getMap() { return map; }
2648
2649 /**
2650 * Removes all of the elements from this view, by removing all
2651 * the mappings from the map backing this view.
2652 */
2653 public final void clear() { map.clear(); }
2654 public final int size() { return map.size(); }
2655 public final boolean isEmpty() { return map.isEmpty(); }
2656
2657 // implementations below rely on concrete classes supplying these
2658 // abstract methods
2659 /**
2660 * Returns a "weakly consistent" iterator that will never
2661 * throw {@link ConcurrentModificationException}, and
2662 * guarantees to traverse elements as they existed upon
2663 * construction of the iterator, and may (but is not
2664 * guaranteed to) reflect any modifications subsequent to
2665 * construction.
2666 */
2667 public abstract Iterator<E> iterator();
2668 public abstract boolean contains(Object o);
2669 public abstract boolean remove(Object o);
2670
2671 private static final String oomeMsg = "Required array size too large";
2672
2673 public final Object[] toArray() {
2674 long sz = map.mappingCount();
2675 if (sz > MAX_ARRAY_SIZE)
2676 throw new OutOfMemoryError(oomeMsg);
2677 int n = (int)sz;
2678 Object[] r = new Object[n];
2679 int i = 0;
2680 for (E e : this) {
2681 if (i == n) {
2682 if (n >= MAX_ARRAY_SIZE)
2683 throw new OutOfMemoryError(oomeMsg);
2684 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
2685 n = MAX_ARRAY_SIZE;
2686 else
2687 n += (n >>> 1) + 1;
2688 r = Arrays.copyOf(r, n);
2689 }
2690 r[i++] = e;
2691 }
2692 return (i == n) ? r : Arrays.copyOf(r, i);
2693 }
2694
2695 @SuppressWarnings("unchecked")
2696 public final <T> T[] toArray(T[] a) {
2697 long sz = map.mappingCount();
2698 if (sz > MAX_ARRAY_SIZE)
2699 throw new OutOfMemoryError(oomeMsg);
2700 int m = (int)sz;
2701 T[] r = (a.length >= m) ? a :
2702 (T[])java.lang.reflect.Array
2703 .newInstance(a.getClass().getComponentType(), m);
2704 int n = r.length;
2705 int i = 0;
2706 for (E e : this) {
2707 if (i == n) {
2708 if (n >= MAX_ARRAY_SIZE)
2709 throw new OutOfMemoryError(oomeMsg);
2710 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
2711 n = MAX_ARRAY_SIZE;
2712 else
2713 n += (n >>> 1) + 1;
2714 r = Arrays.copyOf(r, n);
2715 }
2716 r[i++] = (T)e;
2717 }
2718 if (a == r && i < n) {
2719 r[i] = null; // null-terminate
2720 return r;
2721 }
2722 return (i == n) ? r : Arrays.copyOf(r, i);
2723 }
2724
2725 /**
2726 * Returns a string representation of this collection.
2727 * The string representation consists of the string representations
2728 * of the collection's elements in the order they are returned by
2729 * its iterator, enclosed in square brackets ({@code "[]"}).
2730 * Adjacent elements are separated by the characters {@code ", "}
2731 * (comma and space). Elements are converted to strings as by
2732 * {@link String#valueOf(Object)}.
2733 *
2734 * @return a string representation of this collection
2735 */
2736 public final String toString() {
2737 StringBuilder sb = new StringBuilder();
2738 sb.append('[');
2739 Iterator<E> it = iterator();
2740 if (it.hasNext()) {
2741 for (;;) {
2742 Object e = it.next();
2743 sb.append(e == this ? "(this Collection)" : e);
2744 if (!it.hasNext())
2745 break;
2746 sb.append(',').append(' ');
2747 }
2748 }
2749 return sb.append(']').toString();
2750 }
2751
2752 public final boolean containsAll(Collection<?> c) {
2753 if (c != this) {
2754 for (Object e : c) {
2755 if (e == null || !contains(e))
2756 return false;
2757 }
2758 }
2759 return true;
2760 }
2761
2762 public final boolean removeAll(Collection<?> c) {
2763 boolean modified = false;
2764 for (Iterator<E> it = iterator(); it.hasNext();) {
2765 if (c.contains(it.next())) {
2766 it.remove();
2767 modified = true;
2768 }
2769 }
2770 return modified;
2771 }
2772
2773 public final boolean retainAll(Collection<?> c) {
2774 boolean modified = false;
2775 for (Iterator<E> it = iterator(); it.hasNext();) {
2776 if (!c.contains(it.next())) {
2777 it.remove();
2778 modified = true;
2779 }
2780 }
2781 return modified;
2782 }
2783
2784 }
2785
2786 /**
2787 * A view of a ConcurrentHashMap as a {@link Set} of keys, in
2788 * which additions may optionally be enabled by mapping to a
2789 * common value. This class cannot be directly instantiated.
2790 * See {@link #keySet() keySet()},
2791 * {@link #keySet(Object) keySet(V)},
2792 * {@link #newKeySet() newKeySet()},
2793 * {@link #newKeySet(int) newKeySet(int)}.
2794 *
2795 * @since 1.8
2796 */
2797 public static class KeySetView<K,V> extends CollectionView<K,V,K>
2798 implements Set<K>, java.io.Serializable {
2799 private static final long serialVersionUID = 7249069246763182397L;
2800 private final V value;
2801 KeySetView(ConcurrentHashMap<K,V> map, V value) { // non-public
2802 super(map);
2803 this.value = value;
2804 }
2805
2806 /**
2807 * Returns the default mapped value for additions,
2808 * or {@code null} if additions are not supported.
2809 *
2810 * @return the default mapped value for additions, or {@code null}
2811 * if not supported
2812 */
2813 public V getMappedValue() { return value; }
2814
2815 /**
2816 * {@inheritDoc}
2817 * @throws NullPointerException if the specified key is null
2818 */
2819 public boolean contains(Object o) { return map.containsKey(o); }
2820
2821 /**
2822 * Removes the key from this map view, by removing the key (and its
2823 * corresponding value) from the backing map. This method does
2824 * nothing if the key is not in the map.
2825 *
2826 * @param o the key to be removed from the backing map
2827 * @return {@code true} if the backing map contained the specified key
2828 * @throws NullPointerException if the specified key is null
2829 */
2830 public boolean remove(Object o) { return map.remove(o) != null; }
2831
2832 /**
2833 * @return an iterator over the keys of the backing map
2834 */
2835 public Iterator<K> iterator() {
2836 Node<K,V>[] t;
2837 ConcurrentHashMap<K,V> m = map;
2838 int f = (t = m.table) == null ? 0 : t.length;
2839 return new KeyIterator<K,V>(t, f, 0, f, m);
2840 }
2841
2842 /**
2843 * Adds the specified key to this set view by mapping the key to
2844 * the default mapped value in the backing map, if defined.
2845 *
2846 * @param e key to be added
2847 * @return {@code true} if this set changed as a result of the call
2848 * @throws NullPointerException if the specified key is null
2849 * @throws UnsupportedOperationException if no default mapped value
2850 * for additions was provided
2851 */
2852 public boolean add(K e) {
2853 V v;
2854 if ((v = value) == null)
2855 throw new UnsupportedOperationException();
2856 return map.putVal(e, v, true) == null;
2857 }
2858
2859 /**
2860 * Adds all of the elements in the specified collection to this set,
2861 * as if by calling {@link #add} on each one.
2862 *
2863 * @param c the elements to be inserted into this set
2864 * @return {@code true} if this set changed as a result of the call
2865 * @throws NullPointerException if the collection or any of its
2866 * elements are {@code null}
2867 * @throws UnsupportedOperationException if no default mapped value
2868 * for additions was provided
2869 */
2870 public boolean addAll(Collection<? extends K> c) {
2871 boolean added = false;
2872 V v;
2873 if ((v = value) == null)
2874 throw new UnsupportedOperationException();
2875 for (K e : c) {
2876 if (map.putVal(e, v, true) == null)
2877 added = true;
2878 }
2879 return added;
2880 }
2881
2882 public int hashCode() {
2883 int h = 0;
2884 for (K e : this)
2885 h += e.hashCode();
2886 return h;
2887 }
2888
2889 public boolean equals(Object o) {
2890 Set<?> c;
2891 return ((o instanceof Set) &&
2892 ((c = (Set<?>)o) == this ||
2893 (containsAll(c) && c.containsAll(this))));
2894 }
2895
2896 }
2897
2898 /**
2899 * A view of a ConcurrentHashMap as a {@link Collection} of
2900 * values, in which additions are disabled. This class cannot be
2901 * directly instantiated. See {@link #values()}.
2902 */
2903 static final class ValuesView<K,V> extends CollectionView<K,V,V>
2904 implements Collection<V>, java.io.Serializable {
2905 private static final long serialVersionUID = 2249069246763182397L;
2906 ValuesView(ConcurrentHashMap<K,V> map) { super(map); }
2907 public final boolean contains(Object o) {
2908 return map.containsValue(o);
2909 }
2910
2911 public final boolean remove(Object o) {
2912 if (o != null) {
2913 for (Iterator<V> it = iterator(); it.hasNext();) {
2914 if (o.equals(it.next())) {
2915 it.remove();
2916 return true;
2917 }
2918 }
2919 }
2920 return false;
2921 }
2922
2923 public final Iterator<V> iterator() {
2924 ConcurrentHashMap<K,V> m = map;
2925 Node<K,V>[] t;
2926 int f = (t = m.table) == null ? 0 : t.length;
2927 return new ValueIterator<K,V>(t, f, 0, f, m);
2928 }
2929
2930 public final boolean add(V e) {
2931 throw new UnsupportedOperationException();
2932 }
2933 public final boolean addAll(Collection<? extends V> c) {
2934 throw new UnsupportedOperationException();
2935 }
2936
2937 }
2938
2939 /**
2940 * A view of a ConcurrentHashMap as a {@link Set} of (key, value)
2941 * entries. This class cannot be directly instantiated. See
2942 * {@link #entrySet()}.
2943 */
2944 static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>>
2945 implements Set<Map.Entry<K,V>>, java.io.Serializable {
2946 private static final long serialVersionUID = 2249069246763182397L;
2947 EntrySetView(ConcurrentHashMap<K,V> map) { super(map); }
2948
2949 public boolean contains(Object o) {
2950 Object k, v, r; Map.Entry<?,?> e;
2951 return ((o instanceof Map.Entry) &&
2952 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
2953 (r = map.get(k)) != null &&
2954 (v = e.getValue()) != null &&
2955 (v == r || v.equals(r)));
2956 }
2957
2958 public boolean remove(Object o) {
2959 Object k, v; Map.Entry<?,?> e;
2960 return ((o instanceof Map.Entry) &&
2961 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
2962 (v = e.getValue()) != null &&
2963 map.remove(k, v));
2964 }
2965
2966 /**
2967 * @return an iterator over the entries of the backing map
2968 */
2969 public Iterator<Map.Entry<K,V>> iterator() {
2970 ConcurrentHashMap<K,V> m = map;
2971 Node<K,V>[] t;
2972 int f = (t = m.table) == null ? 0 : t.length;
2973 return new EntryIterator<K,V>(t, f, 0, f, m);
2974 }
2975
2976 public boolean add(Entry<K,V> e) {
2977 return map.putVal(e.getKey(), e.getValue(), false) == null;
2978 }
2979
2980 public boolean addAll(Collection<? extends Entry<K,V>> c) {
2981 boolean added = false;
2982 for (Entry<K,V> e : c) {
2983 if (add(e))
2984 added = true;
2985 }
2986 return added;
2987 }
2988
2989 public final int hashCode() {
2990 int h = 0;
2991 Node<K,V>[] t;
2992 if ((t = map.table) != null) {
2993 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
2994 for (Node<K,V> p; (p = it.advance()) != null; ) {
2995 h += p.hashCode();
2996 }
2997 }
2998 return h;
2999 }
3000
3001 public final boolean equals(Object o) {
3002 Set<?> c;
3003 return ((o instanceof Set) &&
3004 ((c = (Set<?>)o) == this ||
3005 (containsAll(c) && c.containsAll(this))));
3006 }
3007
3008 }
3009
3010
3011 /* ---------------- Counters -------------- */
3012
3013 // Adapted from LongAdder and Striped64.
3014 // See their internal docs for explanation.
3015
3016 // A padded cell for distributing counts
3017 static final class CounterCell {
3018 volatile long p0, p1, p2, p3, p4, p5, p6;
3019 volatile long value;
3020 volatile long q0, q1, q2, q3, q4, q5, q6;
3021 CounterCell(long x) { value = x; }
3022 }
3023
3024 /**
3025 * Holder for the thread-local hash code determining which
3026 * CounterCell to use. The code is initialized via the
3027 * counterHashCodeGenerator, but may be moved upon collisions.
3028 */
3029 static final class CounterHashCode {
3030 int code;
3031 }
3032
3033 /**
3034 * Generates initial value for per-thread CounterHashCodes
3035 */
3036 static final AtomicInteger counterHashCodeGenerator = new AtomicInteger();
3037
3038 /**
3039 * Increment for counterHashCodeGenerator. See class ThreadLocal
3040 * for explanation.
3041 */
3042 static final int SEED_INCREMENT = 0x61c88647;
3043
3044 /**
3045 * Per-thread counter hash codes. Shared across all instances.
3046 */
3047 static final ThreadLocal<CounterHashCode> threadCounterHashCode =
3048 new ThreadLocal<CounterHashCode>();
3049
3050
3051 final long sumCount() {
3052 CounterCell[] as = counterCells; CounterCell a;
3053 long sum = baseCount;
3054 if (as != null) {
3055 for (int i = 0; i < as.length; ++i) {
3056 if ((a = as[i]) != null)
3057 sum += a.value;
3058 }
3059 }
3060 return sum;
3061 }
3062
3063 // See LongAdder version for explanation
3064 private final void fullAddCount(long x, CounterHashCode hc,
3065 boolean wasUncontended) {
3066 int h;
3067 if (hc == null) {
3068 hc = new CounterHashCode();
3069 int s = counterHashCodeGenerator.addAndGet(SEED_INCREMENT);
3070 h = hc.code = (s == 0) ? 1 : s; // Avoid zero
3071 threadCounterHashCode.set(hc);
3072 }
3073 else
3074 h = hc.code;
3075 boolean collide = false; // True if last slot nonempty
3076 for (;;) {
3077 CounterCell[] as; CounterCell a; int n; long v;
3078 if ((as = counterCells) != null && (n = as.length) > 0) {
3079 if ((a = as[(n - 1) & h]) == null) {
3080 if (cellsBusy == 0) { // Try to attach new Cell
3081 CounterCell r = new CounterCell(x); // Optimistic create
3082 if (cellsBusy == 0 &&
3083 U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
3084 boolean created = false;
3085 try { // Recheck under lock
3086 CounterCell[] rs; int m, j;
3087 if ((rs = counterCells) != null &&
3088 (m = rs.length) > 0 &&
3089 rs[j = (m - 1) & h] == null) {
3090 rs[j] = r;
3091 created = true;
3092 }
3093 } finally {
3094 cellsBusy = 0;
3095 }
3096 if (created)
3097 break;
3098 continue; // Slot is now non-empty
3099 }
3100 }
3101 collide = false;
3102 }
3103 else if (!wasUncontended) // CAS already known to fail
3104 wasUncontended = true; // Continue after rehash
3105 else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))
3106 break;
3107 else if (counterCells != as || n >= NCPU)
3108 collide = false; // At max size or stale
3109 else if (!collide)
3110 collide = true;
3111 else if (cellsBusy == 0 &&
3112 U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
3113 try {
3114 if (counterCells == as) {// Expand table unless stale
3115 CounterCell[] rs = new CounterCell[n << 1];
3116 for (int i = 0; i < n; ++i)
3117 rs[i] = as[i];
3118 counterCells = rs;
3119 }
3120 } finally {
3121 cellsBusy = 0;
3122 }
3123 collide = false;
3124 continue; // Retry with expanded table
3125 }
3126 h ^= h << 13; // Rehash
3127 h ^= h >>> 17;
3128 h ^= h << 5;
3129 }
3130 else if (cellsBusy == 0 && counterCells == as &&
3131 U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
3132 boolean init = false;
3133 try { // Initialize table
3134 if (counterCells == as) {
3135 CounterCell[] rs = new CounterCell[2];
3136 rs[h & 1] = new CounterCell(x);
3137 counterCells = rs;
3138 init = true;
3139 }
3140 } finally {
3141 cellsBusy = 0;
3142 }
3143 if (init)
3144 break;
3145 }
3146 else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x))
3147 break; // Fall back on using base
3148 }
3149 hc.code = h; // Record index for next time
3150 }
3151
3152 // Unsafe mechanics
3153 private static final sun.misc.Unsafe U;
3154 private static final long SIZECTL;
3155 private static final long TRANSFERINDEX;
3156 private static final long TRANSFERORIGIN;
3157 private static final long BASECOUNT;
3158 private static final long CELLSBUSY;
3159 private static final long CELLVALUE;
3160 private static final long ABASE;
3161 private static final int ASHIFT;
3162
3163 static {
3164 try {
3165 U = sun.misc.Unsafe.getUnsafe();
3166 Class<?> k = ConcurrentHashMap.class;
3167 SIZECTL = U.objectFieldOffset
3168 (k.getDeclaredField("sizeCtl"));
3169 TRANSFERINDEX = U.objectFieldOffset
3170 (k.getDeclaredField("transferIndex"));
3171 TRANSFERORIGIN = U.objectFieldOffset
3172 (k.getDeclaredField("transferOrigin"));
3173 BASECOUNT = U.objectFieldOffset
3174 (k.getDeclaredField("baseCount"));
3175 CELLSBUSY = U.objectFieldOffset
3176 (k.getDeclaredField("cellsBusy"));
3177 Class<?> ck = CounterCell.class;
3178 CELLVALUE = U.objectFieldOffset
3179 (ck.getDeclaredField("value"));
3180 Class<?> sc = Node[].class;
3181 ABASE = U.arrayBaseOffset(sc);
3182 int scale = U.arrayIndexScale(sc);
3183 if ((scale & (scale - 1)) != 0)
3184 throw new Error("data type scale not a power of two");
3185 ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
3186 } catch (Exception e) {
3187 throw new Error(e);
3188 }
3189 }
3190
3191 }