ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/main/java/util/concurrent/ConcurrentHashMap.java
Revision: 1.218
Committed: Sat Jun 1 06:15:45 2013 UTC (11 years ago) by jsr166
Branch: MAIN
Changes since 1.217: +1 -1 lines
Log Message:
typo

File Contents

# Content
1 /*
2 * Written by Doug Lea with assistance from members of JCP JSR-166
3 * Expert Group and released to the public domain, as explained at
4 * http://creativecommons.org/publicdomain/zero/1.0/
5 */
6
7 package java.util.concurrent;
8 import java.io.Serializable;
9 import java.io.ObjectStreamField;
10 import java.lang.reflect.ParameterizedType;
11 import java.lang.reflect.Type;
12 import java.util.Arrays;
13 import java.util.Collection;
14 import java.util.Comparator;
15 import java.util.ConcurrentModificationException;
16 import java.util.Enumeration;
17 import java.util.HashMap;
18 import java.util.Hashtable;
19 import java.util.Iterator;
20 import java.util.Map;
21 import java.util.NoSuchElementException;
22 import java.util.Set;
23 import java.util.Spliterator;
24 import java.util.concurrent.ConcurrentMap;
25 import java.util.concurrent.ForkJoinPool;
26 import java.util.concurrent.atomic.AtomicReference;
27 import java.util.concurrent.locks.ReentrantLock;
28 import java.util.concurrent.locks.StampedLock;
29 import java.util.function.BiConsumer;
30 import java.util.function.BiFunction;
31 import java.util.function.BinaryOperator;
32 import java.util.function.Consumer;
33 import java.util.function.DoubleBinaryOperator;
34 import java.util.function.Function;
35 import java.util.function.IntBinaryOperator;
36 import java.util.function.LongBinaryOperator;
37 import java.util.function.ToDoubleBiFunction;
38 import java.util.function.ToDoubleFunction;
39 import java.util.function.ToIntBiFunction;
40 import java.util.function.ToIntFunction;
41 import java.util.function.ToLongBiFunction;
42 import java.util.function.ToLongFunction;
43 import java.util.stream.Stream;
44
45 /**
46 * A hash table supporting full concurrency of retrievals and
47 * high expected concurrency for updates. This class obeys the
48 * same functional specification as {@link java.util.Hashtable}, and
49 * includes versions of methods corresponding to each method of
50 * {@code Hashtable}. However, even though all operations are
51 * thread-safe, retrieval operations do <em>not</em> entail locking,
52 * and there is <em>not</em> any support for locking the entire table
53 * in a way that prevents all access. This class is fully
54 * interoperable with {@code Hashtable} in programs that rely on its
55 * thread safety but not on its synchronization details.
56 *
57 * <p>Retrieval operations (including {@code get}) generally do not
58 * block, so may overlap with update operations (including {@code put}
59 * and {@code remove}). Retrievals reflect the results of the most
60 * recently <em>completed</em> update operations holding upon their
61 * onset. (More formally, an update operation for a given key bears a
62 * <em>happens-before</em> relation with any (non-null) retrieval for
63 * that key reporting the updated value.) For aggregate operations
64 * such as {@code putAll} and {@code clear}, concurrent retrievals may
65 * reflect insertion or removal of only some entries. Similarly,
66 * Iterators and Enumerations return elements reflecting the state of
67 * the hash table at some point at or since the creation of the
68 * iterator/enumeration. They do <em>not</em> throw {@link
69 * ConcurrentModificationException}. However, iterators are designed
70 * to be used by only one thread at a time. Bear in mind that the
71 * results of aggregate status methods including {@code size}, {@code
72 * isEmpty}, and {@code containsValue} are typically useful only when
73 * a map is not undergoing concurrent updates in other threads.
74 * Otherwise the results of these methods reflect transient states
75 * that may be adequate for monitoring or estimation purposes, but not
76 * for program control.
77 *
78 * <p>The table is dynamically expanded when there are too many
79 * collisions (i.e., keys that have distinct hash codes but fall into
80 * the same slot modulo the table size), with the expected average
81 * effect of maintaining roughly two bins per mapping (corresponding
82 * to a 0.75 load factor threshold for resizing). There may be much
83 * variance around this average as mappings are added and removed, but
84 * overall, this maintains a commonly accepted time/space tradeoff for
85 * hash tables. However, resizing this or any other kind of hash
86 * table may be a relatively slow operation. When possible, it is a
87 * good idea to provide a size estimate as an optional {@code
88 * initialCapacity} constructor argument. An additional optional
89 * {@code loadFactor} constructor argument provides a further means of
90 * customizing initial table capacity by specifying the table density
91 * to be used in calculating the amount of space to allocate for the
92 * given number of elements. Also, for compatibility with previous
93 * versions of this class, constructors may optionally specify an
94 * expected {@code concurrencyLevel} as an additional hint for
95 * internal sizing. Note that using many keys with exactly the same
96 * {@code hashCode()} is a sure way to slow down performance of any
97 * hash table. To ameliorate impact, when keys are {@link Comparable},
98 * this class may use comparison order among keys to help break ties.
99 *
100 * <p>A {@link Set} projection of a ConcurrentHashMap may be created
101 * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
102 * (using {@link #keySet(Object)} when only keys are of interest, and the
103 * mapped values are (perhaps transiently) not used or all take the
104 * same mapping value.
105 *
106 * <p>A ConcurrentHashMap can be used as scalable frequency map (a
107 * form of histogram or multiset) by using {@link
108 * java.util.concurrent.atomic.LongAdder} values and initializing via
109 * {@link #computeIfAbsent computeIfAbsent}. For example, to add a count
110 * to a {@code ConcurrentHashMap<String,LongAdder> freqs}, you can use
111 * {@code freqs.computeIfAbsent(k -> new LongAdder()).increment();}
112 *
113 * <p>This class and its views and iterators implement all of the
114 * <em>optional</em> methods of the {@link Map} and {@link Iterator}
115 * interfaces.
116 *
117 * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
118 * does <em>not</em> allow {@code null} to be used as a key or value.
119 *
120 * <p>ConcurrentHashMaps support a set of sequential and parallel bulk
121 * operations that, unlike most {@link Stream} methods, are designed
122 * to be safely, and often sensibly, applied even with maps that are
123 * being concurrently updated by other threads; for example, when
124 * computing a snapshot summary of the values in a shared registry.
125 * There are three kinds of operation, each with four forms, accepting
126 * functions with Keys, Values, Entries, and (Key, Value) arguments
127 * and/or return values. Because the elements of a ConcurrentHashMap
128 * are not ordered in any particular way, and may be processed in
129 * different orders in different parallel executions, the correctness
130 * of supplied functions should not depend on any ordering, or on any
131 * other objects or values that may transiently change while
132 * computation is in progress; and except for forEach actions, should
133 * ideally be side-effect-free. Bulk operations on {@link Map.Entry}
134 * objects do not support method {@code setValue}.
135 *
136 * <ul>
137 * <li> forEach: Perform a given action on each element.
138 * A variant form applies a given transformation on each element
139 * before performing the action.</li>
140 *
141 * <li> search: Return the first available non-null result of
142 * applying a given function on each element; skipping further
143 * search when a result is found.</li>
144 *
145 * <li> reduce: Accumulate each element. The supplied reduction
146 * function cannot rely on ordering (more formally, it should be
147 * both associative and commutative). There are five variants:
148 *
149 * <ul>
150 *
151 * <li> Plain reductions. (There is not a form of this method for
152 * (key, value) function arguments since there is no corresponding
153 * return type.)</li>
154 *
155 * <li> Mapped reductions that accumulate the results of a given
156 * function applied to each element.</li>
157 *
158 * <li> Reductions to scalar doubles, longs, and ints, using a
159 * given basis value.</li>
160 *
161 * </ul>
162 * </li>
163 * </ul>
164 *
165 * <p>These bulk operations accept a {@code parallelismThreshold}
166 * argument. Methods proceed sequentially if the current map size is
167 * estimated to be less than the given threshold. Using a value of
168 * {@code Long.MAX_VALUE} suppresses all parallelism. Using a value
169 * of {@code 1} results in maximal parallelism by partitioning into
170 * enough subtasks to utilize all processors. Normally, you would
171 * initially choose one of these extreme values, and then measure
172 * performance of using in-between values that trade off overhead
173 * versus throughput. Parallel forms use the {@link
174 * ForkJoinPool#commonPool()}.
175 *
176 * <p>The concurrency properties of bulk operations follow
177 * from those of ConcurrentHashMap: Any non-null result returned
178 * from {@code get(key)} and related access methods bears a
179 * happens-before relation with the associated insertion or
180 * update. The result of any bulk operation reflects the
181 * composition of these per-element relations (but is not
182 * necessarily atomic with respect to the map as a whole unless it
183 * is somehow known to be quiescent). Conversely, because keys
184 * and values in the map are never null, null serves as a reliable
185 * atomic indicator of the current lack of any result. To
186 * maintain this property, null serves as an implicit basis for
187 * all non-scalar reduction operations. For the double, long, and
188 * int versions, the basis should be one that, when combined with
189 * any other value, returns that other value (more formally, it
190 * should be the identity element for the reduction). Most common
191 * reductions have these properties; for example, computing a sum
192 * with basis 0 or a minimum with basis MAX_VALUE.
193 *
194 * <p>Search and transformation functions provided as arguments
195 * should similarly return null to indicate the lack of any result
196 * (in which case it is not used). In the case of mapped
197 * reductions, this also enables transformations to serve as
198 * filters, returning null (or, in the case of primitive
199 * specializations, the identity basis) if the element should not
200 * be combined. You can create compound transformations and
201 * filterings by composing them yourself under this "null means
202 * there is nothing there now" rule before using them in search or
203 * reduce operations.
204 *
205 * <p>Methods accepting and/or returning Entry arguments maintain
206 * key-value associations. They may be useful for example when
207 * finding the key for the greatest value. Note that "plain" Entry
208 * arguments can be supplied using {@code new
209 * AbstractMap.SimpleEntry(k,v)}.
210 *
211 * <p>Bulk operations may complete abruptly, throwing an
212 * exception encountered in the application of a supplied
213 * function. Bear in mind when handling such exceptions that other
214 * concurrently executing functions could also have thrown
215 * exceptions, or would have done so if the first exception had
216 * not occurred.
217 *
218 * <p>Speedups for parallel compared to sequential forms are common
219 * but not guaranteed. Parallel operations involving brief functions
220 * on small maps may execute more slowly than sequential forms if the
221 * underlying work to parallelize the computation is more expensive
222 * than the computation itself. Similarly, parallelization may not
223 * lead to much actual parallelism if all processors are busy
224 * performing unrelated tasks.
225 *
226 * <p>All arguments to all task methods must be non-null.
227 *
228 * <p>This class is a member of the
229 * <a href="{@docRoot}/../technotes/guides/collections/index.html">
230 * Java Collections Framework</a>.
231 *
232 * @since 1.5
233 * @author Doug Lea
234 * @param <K> the type of keys maintained by this map
235 * @param <V> the type of mapped values
236 */
237 @SuppressWarnings({"unchecked", "rawtypes", "serial"})
238 public class ConcurrentHashMap<K,V> implements ConcurrentMap<K,V>, Serializable {
239 private static final long serialVersionUID = 7249069246763182397L;
240
241 /*
242 * Overview:
243 *
244 * The primary design goal of this hash table is to maintain
245 * concurrent readability (typically method get(), but also
246 * iterators and related methods) while minimizing update
247 * contention. Secondary goals are to keep space consumption about
248 * the same or better than java.util.HashMap, and to support high
249 * initial insertion rates on an empty table by many threads.
250 *
251 * Each key-value mapping is held in a Node. Because Node key
252 * fields can contain special values, they are defined using plain
253 * Object types (not type "K"). This leads to a lot of explicit
254 * casting (and the use of class-wide warning suppressions). It
255 * also allows some of the public methods to be factored into a
256 * smaller number of internal methods (although sadly not so for
257 * the five variants of put-related operations). The
258 * validation-based approach explained below leads to a lot of
259 * code sprawl because retry-control precludes factoring into
260 * smaller methods.
261 *
262 * The table is lazily initialized to a power-of-two size upon the
263 * first insertion. Each bin in the table normally contains a
264 * list of Nodes (most often, the list has only zero or one Node).
265 * Table accesses require volatile/atomic reads, writes, and
266 * CASes. Because there is no other way to arrange this without
267 * adding further indirections, we use intrinsics
268 * (sun.misc.Unsafe) operations.
269 *
270 * We use the top (sign) bit of Node hash fields for control
271 * purposes -- it is available anyway because of addressing
272 * constraints. Nodes with negative hash fields are forwarding
273 * nodes to either TreeBins or resized tables. The lower 31 bits
274 * of each normal Node's hash field contain a transformation of
275 * the key's hash code.
276 *
277 * Insertion (via put or its variants) of the first node in an
278 * empty bin is performed by just CASing it to the bin. This is
279 * by far the most common case for put operations under most
280 * key/hash distributions. Other update operations (insert,
281 * delete, and replace) require locks. We do not want to waste
282 * the space required to associate a distinct lock object with
283 * each bin, so instead use the first node of a bin list itself as
284 * a lock. Locking support for these locks relies on builtin
285 * "synchronized" monitors.
286 *
287 * Using the first node of a list as a lock does not by itself
288 * suffice though: When a node is locked, any update must first
289 * validate that it is still the first node after locking it, and
290 * retry if not. Because new nodes are always appended to lists,
291 * once a node is first in a bin, it remains first until deleted
292 * or the bin becomes invalidated (upon resizing).
293 *
294 * The main disadvantage of per-bin locks is that other update
295 * operations on other nodes in a bin list protected by the same
296 * lock can stall, for example when user equals() or mapping
297 * functions take a long time. However, statistically, under
298 * random hash codes, this is not a common problem. Ideally, the
299 * frequency of nodes in bins follows a Poisson distribution
300 * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
301 * parameter of about 0.5 on average, given the resizing threshold
302 * of 0.75, although with a large variance because of resizing
303 * granularity. Ignoring variance, the expected occurrences of
304 * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
305 * first values are:
306 *
307 * 0: 0.60653066
308 * 1: 0.30326533
309 * 2: 0.07581633
310 * 3: 0.01263606
311 * 4: 0.00157952
312 * 5: 0.00015795
313 * 6: 0.00001316
314 * 7: 0.00000094
315 * 8: 0.00000006
316 * more: less than 1 in ten million
317 *
318 * Lock contention probability for two threads accessing distinct
319 * elements is roughly 1 / (8 * #elements) under random hashes.
320 *
321 * Actual hash code distributions encountered in practice
322 * sometimes deviate significantly from uniform randomness. This
323 * includes the case when N > (1<<30), so some keys MUST collide.
324 * Similarly for dumb or hostile usages in which multiple keys are
325 * designed to have identical hash codes. Also, although we guard
326 * against the worst effects of this (see method spread), sets of
327 * hashes may differ only in bits that do not impact their bin
328 * index for a given power-of-two mask. So we use a secondary
329 * strategy that applies when the number of nodes in a bin exceeds
330 * a threshold, and at least one of the keys implements
331 * Comparable. These TreeBins use a balanced tree to hold nodes
332 * (a specialized form of red-black trees), bounding search time
333 * to O(log N). Each search step in a TreeBin is at least twice as
334 * slow as in a regular list, but given that N cannot exceed
335 * (1<<64) (before running out of addresses) this bounds search
336 * steps, lock hold times, etc, to reasonable constants (roughly
337 * 100 nodes inspected per operation worst case) so long as keys
338 * are Comparable (which is very common -- String, Long, etc).
339 * TreeBin nodes (TreeNodes) also maintain the same "next"
340 * traversal pointers as regular nodes, so can be traversed in
341 * iterators in the same way.
342 *
343 * The table is resized when occupancy exceeds a percentage
344 * threshold (nominally, 0.75, but see below). Any thread
345 * noticing an overfull bin may assist in resizing after the
346 * initiating thread allocates and sets up the replacement
347 * array. However, rather than stalling, these other threads may
348 * proceed with insertions etc. The use of TreeBins shields us
349 * from the worst case effects of overfilling while resizes are in
350 * progress. Resizing proceeds by transferring bins, one by one,
351 * from the table to the next table. To enable concurrency, the
352 * next table must be (incrementally) prefilled with place-holders
353 * serving as reverse forwarders to the old table. Because we are
354 * using power-of-two expansion, the elements from each bin must
355 * either stay at same index, or move with a power of two
356 * offset. We eliminate unnecessary node creation by catching
357 * cases where old nodes can be reused because their next fields
358 * won't change. On average, only about one-sixth of them need
359 * cloning when a table doubles. The nodes they replace will be
360 * garbage collectable as soon as they are no longer referenced by
361 * any reader thread that may be in the midst of concurrently
362 * traversing table. Upon transfer, the old table bin contains
363 * only a special forwarding node (with hash field "MOVED") that
364 * contains the next table as its key. On encountering a
365 * forwarding node, access and update operations restart, using
366 * the new table.
367 *
368 * Each bin transfer requires its bin lock, which can stall
369 * waiting for locks while resizing. However, because other
370 * threads can join in and help resize rather than contend for
371 * locks, average aggregate waits become shorter as resizing
372 * progresses. The transfer operation must also ensure that all
373 * accessible bins in both the old and new table are usable by any
374 * traversal. This is arranged by proceeding from the last bin
375 * (table.length - 1) up towards the first. Upon seeing a
376 * forwarding node, traversals (see class Traverser) arrange to
377 * move to the new table without revisiting nodes. However, to
378 * ensure that no intervening nodes are skipped, bin splitting can
379 * only begin after the associated reverse-forwarders are in
380 * place.
381 *
382 * The traversal scheme also applies to partial traversals of
383 * ranges of bins (via an alternate Traverser constructor)
384 * to support partitioned aggregate operations. Also, read-only
385 * operations give up if ever forwarded to a null table, which
386 * provides support for shutdown-style clearing, which is also not
387 * currently implemented.
388 *
389 * Lazy table initialization minimizes footprint until first use,
390 * and also avoids resizings when the first operation is from a
391 * putAll, constructor with map argument, or deserialization.
392 * These cases attempt to override the initial capacity settings,
393 * but harmlessly fail to take effect in cases of races.
394 *
395 * The element count is maintained using a specialization of
396 * LongAdder. We need to incorporate a specialization rather than
397 * just use a LongAdder in order to access implicit
398 * contention-sensing that leads to creation of multiple
399 * Cells. The counter mechanics avoid contention on
400 * updates but can encounter cache thrashing if read too
401 * frequently during concurrent access. To avoid reading so often,
402 * resizing under contention is attempted only upon adding to a
403 * bin already holding two or more nodes. Under uniform hash
404 * distributions, the probability of this occurring at threshold
405 * is around 13%, meaning that only about 1 in 8 puts check
406 * threshold (and after resizing, many fewer do so). The bulk
407 * putAll operation further reduces contention by only committing
408 * count updates upon these size checks.
409 *
410 * Maintaining API and serialization compatibility with previous
411 * versions of this class introduces several oddities. Mainly: We
412 * leave untouched but unused constructor arguments refering to
413 * concurrencyLevel. We accept a loadFactor constructor argument,
414 * but apply it only to initial table capacity (which is the only
415 * time that we can guarantee to honor it.) We also declare an
416 * unused "Segment" class that is instantiated in minimal form
417 * only when serializing.
418 */
419
420 /* ---------------- Constants -------------- */
421
422 /**
423 * The largest possible table capacity. This value must be
424 * exactly 1<<30 to stay within Java array allocation and indexing
425 * bounds for power of two table sizes, and is further required
426 * because the top two bits of 32bit hash fields are used for
427 * control purposes.
428 */
429 private static final int MAXIMUM_CAPACITY = 1 << 30;
430
431 /**
432 * The default initial table capacity. Must be a power of 2
433 * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
434 */
435 private static final int DEFAULT_CAPACITY = 16;
436
437 /**
438 * The largest possible (non-power of two) array size.
439 * Needed by toArray and related methods.
440 */
441 static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
442
443 /**
444 * The default concurrency level for this table. Unused but
445 * defined for compatibility with previous versions of this class.
446 */
447 private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
448
449 /**
450 * The load factor for this table. Overrides of this value in
451 * constructors affect only the initial table capacity. The
452 * actual floating point value isn't normally used -- it is
453 * simpler to use expressions such as {@code n - (n >>> 2)} for
454 * the associated resizing threshold.
455 */
456 private static final float LOAD_FACTOR = 0.75f;
457
458 /**
459 * The bin count threshold for using a tree rather than list for a
460 * bin. The value reflects the approximate break-even point for
461 * using tree-based operations.
462 */
463 private static final int TREE_THRESHOLD = 8;
464
465 /**
466 * Minimum number of rebinnings per transfer step. Ranges are
467 * subdivided to allow multiple resizer threads. This value
468 * serves as a lower bound to avoid resizers encountering
469 * excessive memory contention. The value should be at least
470 * DEFAULT_CAPACITY.
471 */
472 private static final int MIN_TRANSFER_STRIDE = 16;
473
474 /*
475 * Encodings for Node hash fields. See above for explanation.
476 */
477 static final int MOVED = 0x80000000; // hash field for forwarding nodes
478 static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
479
480 /** Number of CPUS, to place bounds on some sizings */
481 static final int NCPU = Runtime.getRuntime().availableProcessors();
482
483 /** For serialization compatibility. */
484 private static final ObjectStreamField[] serialPersistentFields = {
485 new ObjectStreamField("segments", Segment[].class),
486 new ObjectStreamField("segmentMask", Integer.TYPE),
487 new ObjectStreamField("segmentShift", Integer.TYPE)
488 };
489
490 /**
491 * A padded cell for distributing counts. Adapted from LongAdder
492 * and Striped64. See their internal docs for explanation.
493 */
494 @sun.misc.Contended static final class Cell {
495 volatile long value;
496 Cell(long x) { value = x; }
497 }
498
499 /* ---------------- Fields -------------- */
500
501 /**
502 * The array of bins. Lazily initialized upon first insertion.
503 * Size is always a power of two. Accessed directly by iterators.
504 */
505 transient volatile Node<K,V>[] table;
506
507 /**
508 * The next table to use; non-null only while resizing.
509 */
510 private transient volatile Node<K,V>[] nextTable;
511
512 /**
513 * Base counter value, used mainly when there is no contention,
514 * but also as a fallback during table initialization
515 * races. Updated via CAS.
516 */
517 private transient volatile long baseCount;
518
519 /**
520 * Table initialization and resizing control. When negative, the
521 * table is being initialized or resized: -1 for initialization,
522 * else -(1 + the number of active resizing threads). Otherwise,
523 * when table is null, holds the initial table size to use upon
524 * creation, or 0 for default. After initialization, holds the
525 * next element count value upon which to resize the table.
526 */
527 private transient volatile int sizeCtl;
528
529 /**
530 * The next table index (plus one) to split while resizing.
531 */
532 private transient volatile int transferIndex;
533
534 /**
535 * The least available table index to split while resizing.
536 */
537 private transient volatile int transferOrigin;
538
539 /**
540 * Spinlock (locked via CAS) used when resizing and/or creating Cells.
541 */
542 private transient volatile int cellsBusy;
543
544 /**
545 * Table of counter cells. When non-null, size is a power of 2.
546 */
547 private transient volatile Cell[] counterCells;
548
549 // views
550 private transient KeySetView<K,V> keySet;
551 private transient ValuesView<K,V> values;
552 private transient EntrySetView<K,V> entrySet;
553
554 /* ---------------- Table element access -------------- */
555
556 /*
557 * Volatile access methods are used for table elements as well as
558 * elements of in-progress next table while resizing. Uses are
559 * null checked by callers, and implicitly bounds-checked, relying
560 * on the invariants that tab arrays have non-zero size, and all
561 * indices are masked with (tab.length - 1) which is never
562 * negative and always less than length. Note that, to be correct
563 * wrt arbitrary concurrency errors by users, bounds checks must
564 * operate on local variables, which accounts for some odd-looking
565 * inline assignments below.
566 */
567
568 static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) {
569 return (Node<K,V>)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE);
570 }
571
572 static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i,
573 Node<K,V> c, Node<K,V> v) {
574 return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
575 }
576
577 static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) {
578 U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v);
579 }
580
581 /* ---------------- Nodes -------------- */
582
583 /**
584 * Key-value entry. This class is never exported out as a
585 * user-mutable Map.Entry (i.e., one supporting setValue; see
586 * MapEntry below), but can be used for read-only traversals used
587 * in bulk tasks. Nodes with a hash field of MOVED are special,
588 * and do not contain user keys or values (and are never
589 * exported). Otherwise, keys and vals are never null.
590 */
591 static class Node<K,V> implements Map.Entry<K,V> {
592 final int hash;
593 final Object key;
594 volatile V val;
595 Node<K,V> next;
596
597 Node(int hash, Object key, V val, Node<K,V> next) {
598 this.hash = hash;
599 this.key = key;
600 this.val = val;
601 this.next = next;
602 }
603
604 public final K getKey() { return (K)key; }
605 public final V getValue() { return val; }
606 public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
607 public final String toString(){ return key + "=" + val; }
608 public final V setValue(V value) {
609 throw new UnsupportedOperationException();
610 }
611
612 public final boolean equals(Object o) {
613 Object k, v, u; Map.Entry<?,?> e;
614 return ((o instanceof Map.Entry) &&
615 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
616 (v = e.getValue()) != null &&
617 (k == key || k.equals(key)) &&
618 (v == (u = val) || v.equals(u)));
619 }
620 }
621
622 /**
623 * Exported Entry for EntryIterator
624 */
625 static final class MapEntry<K,V> implements Map.Entry<K,V> {
626 final K key; // non-null
627 V val; // non-null
628 final ConcurrentHashMap<K,V> map;
629 MapEntry(K key, V val, ConcurrentHashMap<K,V> map) {
630 this.key = key;
631 this.val = val;
632 this.map = map;
633 }
634 public K getKey() { return key; }
635 public V getValue() { return val; }
636 public int hashCode() { return key.hashCode() ^ val.hashCode(); }
637 public String toString() { return key + "=" + val; }
638
639 public boolean equals(Object o) {
640 Object k, v; Map.Entry<?,?> e;
641 return ((o instanceof Map.Entry) &&
642 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
643 (v = e.getValue()) != null &&
644 (k == key || k.equals(key)) &&
645 (v == val || v.equals(val)));
646 }
647
648 /**
649 * Sets our entry's value and writes through to the map. The
650 * value to return is somewhat arbitrary here. Since we do not
651 * necessarily track asynchronous changes, the most recent
652 * "previous" value could be different from what we return (or
653 * could even have been removed, in which case the put will
654 * re-establish). We do not and cannot guarantee more.
655 */
656 public V setValue(V value) {
657 if (value == null) throw new NullPointerException();
658 V v = val;
659 val = value;
660 map.put(key, value);
661 return v;
662 }
663 }
664
665
666 /* ---------------- TreeBins -------------- */
667
668 /**
669 * Nodes for use in TreeBins
670 */
671 static final class TreeNode<K,V> extends Node<K,V> {
672 TreeNode<K,V> parent; // red-black tree links
673 TreeNode<K,V> left;
674 TreeNode<K,V> right;
675 TreeNode<K,V> prev; // needed to unlink next upon deletion
676 boolean red;
677
678 TreeNode(int hash, Object key, V val, Node<K,V> next,
679 TreeNode<K,V> parent) {
680 super(hash, key, val, next);
681 this.parent = parent;
682 }
683 }
684
685 /**
686 * Returns a Class for the given object of the form "class C
687 * implements Comparable<C>", if one exists, else null. See below
688 * for explanation.
689 */
690 static Class<?> comparableClassFor(Object x) {
691 Class<?> c, s, cmpc; Type[] ts, as; Type t; ParameterizedType p;
692 if ((c = x.getClass()) == String.class) // bypass checks
693 return c;
694 if ((cmpc = Comparable.class).isAssignableFrom(c)) {
695 while (cmpc.isAssignableFrom(s = c.getSuperclass()))
696 c = s; // find topmost comparable class
697 if ((ts = c.getGenericInterfaces()) != null) {
698 for (int i = 0; i < ts.length; ++i) {
699 if (((t = ts[i]) instanceof ParameterizedType) &&
700 ((p = (ParameterizedType)t).getRawType() == cmpc) &&
701 (as = p.getActualTypeArguments()) != null &&
702 as.length == 1 && as[0] == c) // type arg is c
703 return c;
704 }
705 }
706 }
707 return null;
708 }
709
710 /**
711 * A specialized form of red-black tree for use in bins
712 * whose size exceeds a threshold.
713 *
714 * TreeBins use a special form of comparison for search and
715 * related operations (which is the main reason we cannot use
716 * existing collections such as TreeMaps). TreeBins contain
717 * Comparable elements, but may contain others, as well as
718 * elements that are Comparable but not necessarily Comparable
719 * for the same T, so we cannot invoke compareTo among them. To
720 * handle this, the tree is ordered primarily by hash value, then
721 * by Comparable.compareTo order if applicable. On lookup at a
722 * node, if elements are not comparable or compare as 0 then both
723 * left and right children may need to be searched in the case of
724 * tied hash values. (This corresponds to the full list search
725 * that would be necessary if all elements were non-Comparable and
726 * had tied hashes.) The red-black balancing code is updated from
727 * pre-jdk-collections
728 * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
729 * based in turn on Cormen, Leiserson, and Rivest "Introduction to
730 * Algorithms" (CLR).
731 *
732 * TreeBins also maintain a separate locking discipline than
733 * regular bins. Because they are forwarded via special MOVED
734 * nodes at bin heads (which can never change once established),
735 * we cannot use those nodes as locks. Instead, TreeBin extends
736 * StampedLock to support a form of read-write lock. For update
737 * operations and table validation, the exclusive form of lock
738 * behaves in the same way as bin-head locks. However, lookups use
739 * shared read-lock mechanics to allow multiple readers in the
740 * absence of writers. Additionally, these lookups do not ever
741 * block: While the lock is not available, they proceed along the
742 * slow traversal path (via next-pointers) until the lock becomes
743 * available or the list is exhausted, whichever comes
744 * first. These cases are not fast, but maximize aggregate
745 * expected throughput.
746 */
747 static final class TreeBin<K,V> extends StampedLock {
748 private static final long serialVersionUID = 2249069246763182397L;
749 transient TreeNode<K,V> root; // root of tree
750 transient TreeNode<K,V> first; // head of next-pointer list
751
752 /** From CLR */
753 private void rotateLeft(TreeNode<K,V> p) {
754 if (p != null) {
755 TreeNode<K,V> r = p.right, pp, rl;
756 if ((rl = p.right = r.left) != null)
757 rl.parent = p;
758 if ((pp = r.parent = p.parent) == null)
759 root = r;
760 else if (pp.left == p)
761 pp.left = r;
762 else
763 pp.right = r;
764 r.left = p;
765 p.parent = r;
766 }
767 }
768
769 /** From CLR */
770 private void rotateRight(TreeNode<K,V> p) {
771 if (p != null) {
772 TreeNode<K,V> l = p.left, pp, lr;
773 if ((lr = p.left = l.right) != null)
774 lr.parent = p;
775 if ((pp = l.parent = p.parent) == null)
776 root = l;
777 else if (pp.right == p)
778 pp.right = l;
779 else
780 pp.left = l;
781 l.right = p;
782 p.parent = l;
783 }
784 }
785
786 /**
787 * Returns the TreeNode (or null if not found) for the given key
788 * starting at given root.
789 */
790 final TreeNode<K,V> getTreeNode(int h, Object k, TreeNode<K,V> p,
791 Class<?> cc) {
792 while (p != null) {
793 int dir, ph; Object pk;
794 if ((ph = p.hash) != h)
795 dir = (h < ph) ? -1 : 1;
796 else if ((pk = p.key) == k || k.equals(pk))
797 return p;
798 else if (cc == null || comparableClassFor(pk) != cc ||
799 (dir = ((Comparable<Object>)k).compareTo(pk)) == 0) {
800 TreeNode<K,V> r, pr; // check both sides
801 if ((pr = p.right) != null &&
802 (r = getTreeNode(h, k, pr, cc)) != null)
803 return r;
804 else // continue left
805 dir = -1;
806 }
807 p = (dir > 0) ? p.right : p.left;
808 }
809 return null;
810 }
811
812 /**
813 * Wrapper for getTreeNode used by CHM.get. Tries to obtain
814 * read-lock to call getTreeNode, but during failure to get
815 * lock, searches along next links.
816 */
817 final V getValue(int h, Object k) {
818 Class<?> cc = comparableClassFor(k);
819 Node<K,V> r = null;
820 for (Node<K,V> e = first; e != null; e = e.next) {
821 long s;
822 if ((s = tryReadLock()) != 0L) {
823 try {
824 r = getTreeNode(h, k, root, cc);
825 } finally {
826 unlockRead(s);
827 }
828 break;
829 }
830 else if (e.hash == h && k.equals(e.key)) {
831 r = e;
832 break;
833 }
834 }
835 return r == null ? null : r.val;
836 }
837
838 /**
839 * Finds or adds a node.
840 * @return null if added
841 */
842 final TreeNode<K,V> putTreeNode(int h, Object k, V v) {
843 Class<?> cc = comparableClassFor(k);
844 TreeNode<K,V> pp = root, p = null;
845 int dir = 0;
846 while (pp != null) { // find existing node or leaf to insert at
847 int ph; Object pk;
848 p = pp;
849 if ((ph = p.hash) != h)
850 dir = (h < ph) ? -1 : 1;
851 else if ((pk = p.key) == k || k.equals(pk))
852 return p;
853 else if (cc == null || comparableClassFor(pk) != cc ||
854 (dir = ((Comparable<Object>)k).compareTo(pk)) == 0) {
855 TreeNode<K,V> r, pr;
856 if ((pr = p.right) != null &&
857 (r = getTreeNode(h, k, pr, cc)) != null)
858 return r;
859 else // continue left
860 dir = -1;
861 }
862 pp = (dir > 0) ? p.right : p.left;
863 }
864
865 TreeNode<K,V> f = first;
866 TreeNode<K,V> x = first = new TreeNode<K,V>(h, k, v, f, p);
867 if (p == null)
868 root = x;
869 else { // attach and rebalance; adapted from CLR
870 if (f != null)
871 f.prev = x;
872 if (dir <= 0)
873 p.left = x;
874 else
875 p.right = x;
876 x.red = true;
877 for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
878 if ((xp = x.parent) == null) {
879 (root = x).red = false;
880 break;
881 }
882 else if (!xp.red || (xpp = xp.parent) == null) {
883 TreeNode<K,V> r = root;
884 if (r != null && r.red)
885 r.red = false;
886 break;
887 }
888 else if ((xppl = xpp.left) == xp) {
889 if ((xppr = xpp.right) != null && xppr.red) {
890 xppr.red = false;
891 xp.red = false;
892 xpp.red = true;
893 x = xpp;
894 }
895 else {
896 if (x == xp.right) {
897 rotateLeft(x = xp);
898 xpp = (xp = x.parent) == null ? null : xp.parent;
899 }
900 if (xp != null) {
901 xp.red = false;
902 if (xpp != null) {
903 xpp.red = true;
904 rotateRight(xpp);
905 }
906 }
907 }
908 }
909 else {
910 if (xppl != null && xppl.red) {
911 xppl.red = false;
912 xp.red = false;
913 xpp.red = true;
914 x = xpp;
915 }
916 else {
917 if (x == xp.left) {
918 rotateRight(x = xp);
919 xpp = (xp = x.parent) == null ? null : xp.parent;
920 }
921 if (xp != null) {
922 xp.red = false;
923 if (xpp != null) {
924 xpp.red = true;
925 rotateLeft(xpp);
926 }
927 }
928 }
929 }
930 }
931 }
932 assert checkInvariants();
933 return null;
934 }
935
936 /**
937 * Removes the given node, that must be present before this
938 * call. This is messier than typical red-black deletion code
939 * because we cannot swap the contents of an interior node
940 * with a leaf successor that is pinned by "next" pointers
941 * that are accessible independently of lock. So instead we
942 * swap the tree linkages.
943 */
944 final void deleteTreeNode(TreeNode<K,V> p) {
945 TreeNode<K,V> next = (TreeNode<K,V>)p.next;
946 TreeNode<K,V> pred = p.prev; // unlink traversal pointers
947 if (pred == null)
948 first = next;
949 else
950 pred.next = next;
951 if (next != null)
952 next.prev = pred;
953 else if (pred == null) {
954 root = null;
955 return;
956 }
957 TreeNode<K,V> replacement;
958 TreeNode<K,V> pl = p.left;
959 TreeNode<K,V> pr = p.right;
960 if (pl != null && pr != null) {
961 TreeNode<K,V> s = pr, sl;
962 while ((sl = s.left) != null) // find successor
963 s = sl;
964 boolean c = s.red; s.red = p.red; p.red = c; // swap colors
965 TreeNode<K,V> sr = s.right;
966 TreeNode<K,V> pp = p.parent;
967 if (s == pr) { // p was s's direct parent
968 p.parent = s;
969 s.right = p;
970 }
971 else {
972 TreeNode<K,V> sp = s.parent;
973 if ((p.parent = sp) != null) {
974 if (s == sp.left)
975 sp.left = p;
976 else
977 sp.right = p;
978 }
979 if ((s.right = pr) != null)
980 pr.parent = s;
981 }
982 p.left = null;
983 if ((p.right = sr) != null)
984 sr.parent = p;
985 if ((s.left = pl) != null)
986 pl.parent = s;
987 if ((s.parent = pp) == null)
988 root = s;
989 else if (p == pp.left)
990 pp.left = s;
991 else
992 pp.right = s;
993 if (sr != null)
994 replacement = sr;
995 else
996 replacement = p;
997 }
998 else if (pl != null)
999 replacement = pl;
1000 else if (pr != null)
1001 replacement = pr;
1002 else
1003 replacement = p;
1004 if (replacement != p) {
1005 TreeNode<K,V> pp = replacement.parent = p.parent;
1006 if (pp == null)
1007 root = replacement;
1008 else if (p == pp.left)
1009 pp.left = replacement;
1010 else
1011 pp.right = replacement;
1012 p.left = p.right = p.parent = null;
1013 }
1014 if (!p.red) { // rebalance, from CLR
1015 for (TreeNode<K,V> x = replacement; x != null; ) {
1016 TreeNode<K,V> xp, xpl, xpr;
1017 if (x.red || (xp = x.parent) == null) {
1018 x.red = false;
1019 break;
1020 }
1021 else if ((xpl = xp.left) == x) {
1022 if ((xpr = xp.right) != null && xpr.red) {
1023 xpr.red = false;
1024 xp.red = true;
1025 rotateLeft(xp);
1026 xpr = (xp = x.parent) == null ? null : xp.right;
1027 }
1028 if (xpr == null)
1029 x = xp;
1030 else {
1031 TreeNode<K,V> sl = xpr.left, sr = xpr.right;
1032 if ((sr == null || !sr.red) &&
1033 (sl == null || !sl.red)) {
1034 xpr.red = true;
1035 x = xp;
1036 }
1037 else {
1038 if (sr == null || !sr.red) {
1039 if (sl != null)
1040 sl.red = false;
1041 xpr.red = true;
1042 rotateRight(xpr);
1043 xpr = (xp = x.parent) == null ?
1044 null : xp.right;
1045 }
1046 if (xpr != null) {
1047 xpr.red = (xp == null) ? false : xp.red;
1048 if ((sr = xpr.right) != null)
1049 sr.red = false;
1050 }
1051 if (xp != null) {
1052 xp.red = false;
1053 rotateLeft(xp);
1054 }
1055 x = root;
1056 }
1057 }
1058 }
1059 else { // symmetric
1060 if (xpl != null && xpl.red) {
1061 xpl.red = false;
1062 xp.red = true;
1063 rotateRight(xp);
1064 xpl = (xp = x.parent) == null ? null : xp.left;
1065 }
1066 if (xpl == null)
1067 x = xp;
1068 else {
1069 TreeNode<K,V> sl = xpl.left, sr = xpl.right;
1070 if ((sl == null || !sl.red) &&
1071 (sr == null || !sr.red)) {
1072 xpl.red = true;
1073 x = xp;
1074 }
1075 else {
1076 if (sl == null || !sl.red) {
1077 if (sr != null)
1078 sr.red = false;
1079 xpl.red = true;
1080 rotateLeft(xpl);
1081 xpl = (xp = x.parent) == null ?
1082 null : xp.left;
1083 }
1084 if (xpl != null) {
1085 xpl.red = (xp == null) ? false : xp.red;
1086 if ((sl = xpl.left) != null)
1087 sl.red = false;
1088 }
1089 if (xp != null) {
1090 xp.red = false;
1091 rotateRight(xp);
1092 }
1093 x = root;
1094 }
1095 }
1096 }
1097 }
1098 }
1099 if (p == replacement) { // detach pointers
1100 TreeNode<K,V> pp;
1101 if ((pp = p.parent) != null) {
1102 if (p == pp.left)
1103 pp.left = null;
1104 else if (p == pp.right)
1105 pp.right = null;
1106 p.parent = null;
1107 }
1108 }
1109 assert checkInvariants();
1110 }
1111
1112 /**
1113 * Checks linkage and balance invariants at root
1114 */
1115 final boolean checkInvariants() {
1116 TreeNode<K,V> r = root;
1117 if (r == null)
1118 return (first == null);
1119 else
1120 return (first != null) && checkTreeNode(r);
1121 }
1122
1123 /**
1124 * Recursive invariant check
1125 */
1126 final boolean checkTreeNode(TreeNode<K,V> t) {
1127 TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
1128 tb = t.prev, tn = (TreeNode<K,V>)t.next;
1129 if (tb != null && tb.next != t)
1130 return false;
1131 if (tn != null && tn.prev != t)
1132 return false;
1133 if (tp != null && t != tp.left && t != tp.right)
1134 return false;
1135 if (tl != null && (tl.parent != t || tl.hash > t.hash))
1136 return false;
1137 if (tr != null && (tr.parent != t || tr.hash < t.hash))
1138 return false;
1139 if (t.red && tl != null && tl.red && tr != null && tr.red)
1140 return false;
1141 if (tl != null && !checkTreeNode(tl))
1142 return false;
1143 if (tr != null && !checkTreeNode(tr))
1144 return false;
1145 return true;
1146 }
1147 }
1148
1149 /* ---------------- Collision reduction methods -------------- */
1150
1151 /**
1152 * Spreads higher bits to lower, and also forces top bit to 0.
1153 * Because the table uses power-of-two masking, sets of hashes
1154 * that vary only in bits above the current mask will always
1155 * collide. (Among known examples are sets of Float keys holding
1156 * consecutive whole numbers in small tables.) To counter this,
1157 * we apply a transform that spreads the impact of higher bits
1158 * downward. There is a tradeoff between speed, utility, and
1159 * quality of bit-spreading. Because many common sets of hashes
1160 * are already reasonably distributed across bits (so don't benefit
1161 * from spreading), and because we use trees to handle large sets
1162 * of collisions in bins, we don't need excessively high quality.
1163 */
1164 private static final int spread(int h) {
1165 h ^= (h >>> 18) ^ (h >>> 12);
1166 return (h ^ (h >>> 10)) & HASH_BITS;
1167 }
1168
1169 /**
1170 * Replaces a list bin with a tree bin if key is comparable. Call
1171 * only when locked.
1172 */
1173 private final void replaceWithTreeBin(Node<K,V>[] tab, int index, Object key) {
1174 if (tab != null && comparableClassFor(key) != null) {
1175 TreeBin<K,V> t = new TreeBin<K,V>();
1176 for (Node<K,V> e = tabAt(tab, index); e != null; e = e.next)
1177 t.putTreeNode(e.hash, e.key, e.val);
1178 setTabAt(tab, index, new Node<K,V>(MOVED, t, null, null));
1179 }
1180 }
1181
1182 /* ---------------- Internal access and update methods -------------- */
1183
1184 /** Implementation for get and containsKey */
1185 private final V internalGet(Object k) {
1186 int h = spread(k.hashCode());
1187 V v = null;
1188 Node<K,V>[] tab; Node<K,V> e;
1189 if ((tab = table) != null &&
1190 (e = tabAt(tab, (tab.length - 1) & h)) != null) {
1191 for (;;) {
1192 int eh; Object ek;
1193 if ((eh = e.hash) < 0) {
1194 if ((ek = e.key) instanceof TreeBin) { // search TreeBin
1195 v = ((TreeBin<K,V>)ek).getValue(h, k);
1196 break;
1197 }
1198 else if (!(ek instanceof Node[]) || // try new table
1199 (e = tabAt(tab = (Node<K,V>[])ek,
1200 (tab.length - 1) & h)) == null)
1201 break;
1202 }
1203 else if (eh == h && ((ek = e.key) == k || k.equals(ek))) {
1204 v = e.val;
1205 break;
1206 }
1207 else if ((e = e.next) == null)
1208 break;
1209 }
1210 }
1211 return v;
1212 }
1213
1214 /**
1215 * Implementation for the four public remove/replace methods:
1216 * Replaces node value with v, conditional upon match of cv if
1217 * non-null. If resulting value is null, delete.
1218 */
1219 private final V internalReplace(Object k, V v, Object cv) {
1220 int h = spread(k.hashCode());
1221 V oldVal = null;
1222 for (Node<K,V>[] tab = table;;) {
1223 Node<K,V> f; int i, fh; Object fk;
1224 if (tab == null ||
1225 (f = tabAt(tab, i = (tab.length - 1) & h)) == null)
1226 break;
1227 else if ((fh = f.hash) < 0) {
1228 if ((fk = f.key) instanceof TreeBin) {
1229 TreeBin<K,V> t = (TreeBin<K,V>)fk;
1230 long stamp = t.writeLock();
1231 boolean validated = false;
1232 boolean deleted = false;
1233 try {
1234 if (tabAt(tab, i) == f) {
1235 validated = true;
1236 Class<?> cc = comparableClassFor(k);
1237 TreeNode<K,V> p = t.getTreeNode(h, k, t.root, cc);
1238 if (p != null) {
1239 V pv = p.val;
1240 if (cv == null || cv == pv || cv.equals(pv)) {
1241 oldVal = pv;
1242 if (v != null)
1243 p.val = v;
1244 else {
1245 deleted = true;
1246 t.deleteTreeNode(p);
1247 }
1248 }
1249 }
1250 }
1251 } finally {
1252 t.unlockWrite(stamp);
1253 }
1254 if (validated) {
1255 if (deleted)
1256 addCount(-1L, -1);
1257 break;
1258 }
1259 }
1260 else
1261 tab = (Node<K,V>[])fk;
1262 }
1263 else {
1264 boolean validated = false;
1265 boolean deleted = false;
1266 synchronized (f) {
1267 if (tabAt(tab, i) == f) {
1268 validated = true;
1269 for (Node<K,V> e = f, pred = null;;) {
1270 Object ek;
1271 if (e.hash == h &&
1272 ((ek = e.key) == k || k.equals(ek))) {
1273 V ev = e.val;
1274 if (cv == null || cv == ev || cv.equals(ev)) {
1275 oldVal = ev;
1276 if (v != null)
1277 e.val = v;
1278 else {
1279 deleted = true;
1280 Node<K,V> en = e.next;
1281 if (pred != null)
1282 pred.next = en;
1283 else
1284 setTabAt(tab, i, en);
1285 }
1286 }
1287 break;
1288 }
1289 pred = e;
1290 if ((e = e.next) == null)
1291 break;
1292 }
1293 }
1294 }
1295 if (validated) {
1296 if (deleted)
1297 addCount(-1L, -1);
1298 break;
1299 }
1300 }
1301 }
1302 return oldVal;
1303 }
1304
1305 /*
1306 * Internal versions of insertion methods
1307 * All have the same basic structure as the first (internalPut):
1308 * 1. If table uninitialized, create
1309 * 2. If bin empty, try to CAS new node
1310 * 3. If bin stale, use new table
1311 * 4. if bin converted to TreeBin, validate and relay to TreeBin methods
1312 * 5. Lock and validate; if valid, scan and add or update
1313 *
1314 * The putAll method differs mainly in attempting to pre-allocate
1315 * enough table space, and also more lazily performs count updates
1316 * and checks.
1317 *
1318 * Most of the function-accepting methods can't be factored nicely
1319 * because they require different functional forms, so instead
1320 * sprawl out similar mechanics.
1321 */
1322
1323 /** Implementation for put and putIfAbsent */
1324 private final V internalPut(K k, V v, boolean onlyIfAbsent) {
1325 if (k == null || v == null) throw new NullPointerException();
1326 int h = spread(k.hashCode());
1327 int len = 0;
1328 for (Node<K,V>[] tab = table;;) {
1329 int i, fh; Node<K,V> f; Object fk;
1330 if (tab == null)
1331 tab = initTable();
1332 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1333 if (casTabAt(tab, i, null, new Node<K,V>(h, k, v, null)))
1334 break; // no lock when adding to empty bin
1335 }
1336 else if ((fh = f.hash) < 0) {
1337 if ((fk = f.key) instanceof TreeBin) {
1338 TreeBin<K,V> t = (TreeBin<K,V>)fk;
1339 long stamp = t.writeLock();
1340 V oldVal = null;
1341 try {
1342 if (tabAt(tab, i) == f) {
1343 len = 2;
1344 TreeNode<K,V> p = t.putTreeNode(h, k, v);
1345 if (p != null) {
1346 oldVal = p.val;
1347 if (!onlyIfAbsent)
1348 p.val = v;
1349 }
1350 }
1351 } finally {
1352 t.unlockWrite(stamp);
1353 }
1354 if (len != 0) {
1355 if (oldVal != null)
1356 return oldVal;
1357 break;
1358 }
1359 }
1360 else
1361 tab = (Node<K,V>[])fk;
1362 }
1363 else {
1364 V oldVal = null;
1365 synchronized (f) {
1366 if (tabAt(tab, i) == f) {
1367 len = 1;
1368 for (Node<K,V> e = f;; ++len) {
1369 Object ek;
1370 if (e.hash == h &&
1371 ((ek = e.key) == k || k.equals(ek))) {
1372 oldVal = e.val;
1373 if (!onlyIfAbsent)
1374 e.val = v;
1375 break;
1376 }
1377 Node<K,V> last = e;
1378 if ((e = e.next) == null) {
1379 last.next = new Node<K,V>(h, k, v, null);
1380 if (len > TREE_THRESHOLD)
1381 replaceWithTreeBin(tab, i, k);
1382 break;
1383 }
1384 }
1385 }
1386 }
1387 if (len != 0) {
1388 if (oldVal != null)
1389 return oldVal;
1390 break;
1391 }
1392 }
1393 }
1394 addCount(1L, len);
1395 return null;
1396 }
1397
1398 /** Implementation for computeIfAbsent */
1399 private final V internalComputeIfAbsent(K k, Function<? super K, ? extends V> mf) {
1400 if (k == null || mf == null)
1401 throw new NullPointerException();
1402 int h = spread(k.hashCode());
1403 V val = null;
1404 int len = 0;
1405 for (Node<K,V>[] tab = table;;) {
1406 Node<K,V> f; int i; Object fk;
1407 if (tab == null)
1408 tab = initTable();
1409 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1410 Node<K,V> node = new Node<K,V>(h, k, null, null);
1411 synchronized (node) {
1412 if (casTabAt(tab, i, null, node)) {
1413 len = 1;
1414 try {
1415 if ((val = mf.apply(k)) != null)
1416 node.val = val;
1417 } finally {
1418 if (val == null)
1419 setTabAt(tab, i, null);
1420 }
1421 }
1422 }
1423 if (len != 0)
1424 break;
1425 }
1426 else if (f.hash < 0) {
1427 if ((fk = f.key) instanceof TreeBin) {
1428 TreeBin<K,V> t = (TreeBin<K,V>)fk;
1429 long stamp = t.writeLock();
1430 boolean added = false;
1431 try {
1432 if (tabAt(tab, i) == f) {
1433 len = 2;
1434 Class<?> cc = comparableClassFor(k);
1435 TreeNode<K,V> p = t.getTreeNode(h, k, t.root, cc);
1436 if (p != null)
1437 val = p.val;
1438 else if ((val = mf.apply(k)) != null) {
1439 added = true;
1440 t.putTreeNode(h, k, val);
1441 }
1442 }
1443 } finally {
1444 t.unlockWrite(stamp);
1445 }
1446 if (len != 0) {
1447 if (!added)
1448 return val;
1449 break;
1450 }
1451 }
1452 else
1453 tab = (Node<K,V>[])fk;
1454 }
1455 else {
1456 boolean added = false;
1457 synchronized (f) {
1458 if (tabAt(tab, i) == f) {
1459 len = 1;
1460 for (Node<K,V> e = f;; ++len) {
1461 Object ek; V ev;
1462 if (e.hash == h &&
1463 ((ek = e.key) == k || k.equals(ek))) {
1464 val = e.val;
1465 break;
1466 }
1467 Node<K,V> last = e;
1468 if ((e = e.next) == null) {
1469 if ((val = mf.apply(k)) != null) {
1470 added = true;
1471 last.next = new Node<K,V>(h, k, val, null);
1472 if (len > TREE_THRESHOLD)
1473 replaceWithTreeBin(tab, i, k);
1474 }
1475 break;
1476 }
1477 }
1478 }
1479 }
1480 if (len != 0) {
1481 if (!added)
1482 return val;
1483 break;
1484 }
1485 }
1486 }
1487 if (val != null)
1488 addCount(1L, len);
1489 return val;
1490 }
1491
1492 /** Implementation for compute */
1493 private final V internalCompute(K k, boolean onlyIfPresent,
1494 BiFunction<? super K, ? super V, ? extends V> mf) {
1495 if (k == null || mf == null)
1496 throw new NullPointerException();
1497 int h = spread(k.hashCode());
1498 V val = null;
1499 int delta = 0;
1500 int len = 0;
1501 for (Node<K,V>[] tab = table;;) {
1502 Node<K,V> f; int i, fh; Object fk;
1503 if (tab == null)
1504 tab = initTable();
1505 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1506 if (onlyIfPresent)
1507 break;
1508 Node<K,V> node = new Node<K,V>(h, k, null, null);
1509 synchronized (node) {
1510 if (casTabAt(tab, i, null, node)) {
1511 try {
1512 len = 1;
1513 if ((val = mf.apply(k, null)) != null) {
1514 node.val = val;
1515 delta = 1;
1516 }
1517 } finally {
1518 if (delta == 0)
1519 setTabAt(tab, i, null);
1520 }
1521 }
1522 }
1523 if (len != 0)
1524 break;
1525 }
1526 else if ((fh = f.hash) < 0) {
1527 if ((fk = f.key) instanceof TreeBin) {
1528 TreeBin<K,V> t = (TreeBin<K,V>)fk;
1529 long stamp = t.writeLock();
1530 try {
1531 if (tabAt(tab, i) == f) {
1532 len = 2;
1533 Class<?> cc = comparableClassFor(k);
1534 TreeNode<K,V> p = t.getTreeNode(h, k, t.root, cc);
1535 if (p != null || !onlyIfPresent) {
1536 V pv = (p == null) ? null : p.val;
1537 if ((val = mf.apply(k, pv)) != null) {
1538 if (p != null)
1539 p.val = val;
1540 else {
1541 delta = 1;
1542 t.putTreeNode(h, k, val);
1543 }
1544 }
1545 else if (p != null) {
1546 delta = -1;
1547 t.deleteTreeNode(p);
1548 }
1549 }
1550 }
1551 } finally {
1552 t.unlockWrite(stamp);
1553 }
1554 if (len != 0)
1555 break;
1556 }
1557 else
1558 tab = (Node<K,V>[])fk;
1559 }
1560 else {
1561 synchronized (f) {
1562 if (tabAt(tab, i) == f) {
1563 len = 1;
1564 for (Node<K,V> e = f, pred = null;; ++len) {
1565 Object ek;
1566 if (e.hash == h &&
1567 ((ek = e.key) == k || k.equals(ek))) {
1568 val = mf.apply(k, e.val);
1569 if (val != null)
1570 e.val = val;
1571 else {
1572 delta = -1;
1573 Node<K,V> en = e.next;
1574 if (pred != null)
1575 pred.next = en;
1576 else
1577 setTabAt(tab, i, en);
1578 }
1579 break;
1580 }
1581 pred = e;
1582 if ((e = e.next) == null) {
1583 if (!onlyIfPresent &&
1584 (val = mf.apply(k, null)) != null) {
1585 pred.next = new Node<K,V>(h, k, val, null);
1586 delta = 1;
1587 if (len > TREE_THRESHOLD)
1588 replaceWithTreeBin(tab, i, k);
1589 }
1590 break;
1591 }
1592 }
1593 }
1594 }
1595 if (len != 0)
1596 break;
1597 }
1598 }
1599 if (delta != 0)
1600 addCount((long)delta, len);
1601 return val;
1602 }
1603
1604 /** Implementation for merge */
1605 private final V internalMerge(K k, V v,
1606 BiFunction<? super V, ? super V, ? extends V> mf) {
1607 if (k == null || v == null || mf == null)
1608 throw new NullPointerException();
1609 int h = spread(k.hashCode());
1610 V val = null;
1611 int delta = 0;
1612 int len = 0;
1613 for (Node<K,V>[] tab = table;;) {
1614 int i; Node<K,V> f; Object fk;
1615 if (tab == null)
1616 tab = initTable();
1617 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1618 if (casTabAt(tab, i, null, new Node<K,V>(h, k, v, null))) {
1619 delta = 1;
1620 val = v;
1621 break;
1622 }
1623 }
1624 else if (f.hash < 0) {
1625 if ((fk = f.key) instanceof TreeBin) {
1626 TreeBin<K,V> t = (TreeBin<K,V>)fk;
1627 long stamp = t.writeLock();
1628 try {
1629 if (tabAt(tab, i) == f) {
1630 len = 2;
1631 Class<?> cc = comparableClassFor(k);
1632 TreeNode<K,V> p = t.getTreeNode(h, k, t.root, cc);
1633 val = (p == null) ? v : mf.apply(p.val, v);
1634 if (val != null) {
1635 if (p != null)
1636 p.val = val;
1637 else {
1638 delta = 1;
1639 t.putTreeNode(h, k, val);
1640 }
1641 }
1642 else if (p != null) {
1643 delta = -1;
1644 t.deleteTreeNode(p);
1645 }
1646 }
1647 } finally {
1648 t.unlockWrite(stamp);
1649 }
1650 if (len != 0)
1651 break;
1652 }
1653 else
1654 tab = (Node<K,V>[])fk;
1655 }
1656 else {
1657 synchronized (f) {
1658 if (tabAt(tab, i) == f) {
1659 len = 1;
1660 for (Node<K,V> e = f, pred = null;; ++len) {
1661 Object ek;
1662 if (e.hash == h &&
1663 ((ek = e.key) == k || k.equals(ek))) {
1664 val = mf.apply(e.val, v);
1665 if (val != null)
1666 e.val = val;
1667 else {
1668 delta = -1;
1669 Node<K,V> en = e.next;
1670 if (pred != null)
1671 pred.next = en;
1672 else
1673 setTabAt(tab, i, en);
1674 }
1675 break;
1676 }
1677 pred = e;
1678 if ((e = e.next) == null) {
1679 delta = 1;
1680 val = v;
1681 pred.next = new Node<K,V>(h, k, val, null);
1682 if (len > TREE_THRESHOLD)
1683 replaceWithTreeBin(tab, i, k);
1684 break;
1685 }
1686 }
1687 }
1688 }
1689 if (len != 0)
1690 break;
1691 }
1692 }
1693 if (delta != 0)
1694 addCount((long)delta, len);
1695 return val;
1696 }
1697
1698 /** Implementation for putAll */
1699 private final void internalPutAll(Map<? extends K, ? extends V> m) {
1700 tryPresize(m.size());
1701 long delta = 0L; // number of uncommitted additions
1702 boolean npe = false; // to throw exception on exit for nulls
1703 try { // to clean up counts on other exceptions
1704 for (Map.Entry<?, ? extends V> entry : m.entrySet()) {
1705 Object k; V v;
1706 if (entry == null || (k = entry.getKey()) == null ||
1707 (v = entry.getValue()) == null) {
1708 npe = true;
1709 break;
1710 }
1711 int h = spread(k.hashCode());
1712 for (Node<K,V>[] tab = table;;) {
1713 int i; Node<K,V> f; int fh; Object fk;
1714 if (tab == null)
1715 tab = initTable();
1716 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null){
1717 if (casTabAt(tab, i, null, new Node<K,V>(h, k, v, null))) {
1718 ++delta;
1719 break;
1720 }
1721 }
1722 else if ((fh = f.hash) < 0) {
1723 if ((fk = f.key) instanceof TreeBin) {
1724 TreeBin<K,V> t = (TreeBin<K,V>)fk;
1725 long stamp = t.writeLock();
1726 boolean validated = false;
1727 try {
1728 if (tabAt(tab, i) == f) {
1729 validated = true;
1730 Class<?> cc = comparableClassFor(k);
1731 TreeNode<K,V> p = t.getTreeNode(h, k,
1732 t.root, cc);
1733 if (p != null)
1734 p.val = v;
1735 else {
1736 ++delta;
1737 t.putTreeNode(h, k, v);
1738 }
1739 }
1740 } finally {
1741 t.unlockWrite(stamp);
1742 }
1743 if (validated)
1744 break;
1745 }
1746 else
1747 tab = (Node<K,V>[])fk;
1748 }
1749 else {
1750 int len = 0;
1751 synchronized (f) {
1752 if (tabAt(tab, i) == f) {
1753 len = 1;
1754 for (Node<K,V> e = f;; ++len) {
1755 Object ek;
1756 if (e.hash == h &&
1757 ((ek = e.key) == k || k.equals(ek))) {
1758 e.val = v;
1759 break;
1760 }
1761 Node<K,V> last = e;
1762 if ((e = e.next) == null) {
1763 ++delta;
1764 last.next = new Node<K,V>(h, k, v, null);
1765 if (len > TREE_THRESHOLD)
1766 replaceWithTreeBin(tab, i, k);
1767 break;
1768 }
1769 }
1770 }
1771 }
1772 if (len != 0) {
1773 if (len > 1) {
1774 addCount(delta, len);
1775 delta = 0L;
1776 }
1777 break;
1778 }
1779 }
1780 }
1781 }
1782 } finally {
1783 if (delta != 0L)
1784 addCount(delta, 2);
1785 }
1786 if (npe)
1787 throw new NullPointerException();
1788 }
1789
1790 /**
1791 * Implementation for clear. Steps through each bin, removing all
1792 * nodes.
1793 */
1794 private final void internalClear() {
1795 long delta = 0L; // negative number of deletions
1796 int i = 0;
1797 Node<K,V>[] tab = table;
1798 while (tab != null && i < tab.length) {
1799 Node<K,V> f = tabAt(tab, i);
1800 if (f == null)
1801 ++i;
1802 else if (f.hash < 0) {
1803 Object fk;
1804 if ((fk = f.key) instanceof TreeBin) {
1805 TreeBin<K,V> t = (TreeBin<K,V>)fk;
1806 long stamp = t.writeLock();
1807 try {
1808 if (tabAt(tab, i) == f) {
1809 for (Node<K,V> p = t.first; p != null; p = p.next)
1810 --delta;
1811 t.first = null;
1812 t.root = null;
1813 ++i;
1814 }
1815 } finally {
1816 t.unlockWrite(stamp);
1817 }
1818 }
1819 else
1820 tab = (Node<K,V>[])fk;
1821 }
1822 else {
1823 synchronized (f) {
1824 if (tabAt(tab, i) == f) {
1825 for (Node<K,V> e = f; e != null; e = e.next)
1826 --delta;
1827 setTabAt(tab, i, null);
1828 ++i;
1829 }
1830 }
1831 }
1832 }
1833 if (delta != 0L)
1834 addCount(delta, -1);
1835 }
1836
1837 /* ---------------- Table Initialization and Resizing -------------- */
1838
1839 /**
1840 * Returns a power of two table size for the given desired capacity.
1841 * See Hackers Delight, sec 3.2
1842 */
1843 private static final int tableSizeFor(int c) {
1844 int n = c - 1;
1845 n |= n >>> 1;
1846 n |= n >>> 2;
1847 n |= n >>> 4;
1848 n |= n >>> 8;
1849 n |= n >>> 16;
1850 return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
1851 }
1852
1853 /**
1854 * Initializes table, using the size recorded in sizeCtl.
1855 */
1856 private final Node<K,V>[] initTable() {
1857 Node<K,V>[] tab; int sc;
1858 while ((tab = table) == null) {
1859 if ((sc = sizeCtl) < 0)
1860 Thread.yield(); // lost initialization race; just spin
1861 else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
1862 try {
1863 if ((tab = table) == null) {
1864 int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
1865 table = tab = (Node<K,V>[])new Node[n];
1866 sc = n - (n >>> 2);
1867 }
1868 } finally {
1869 sizeCtl = sc;
1870 }
1871 break;
1872 }
1873 }
1874 return tab;
1875 }
1876
1877 /**
1878 * Adds to count, and if table is too small and not already
1879 * resizing, initiates transfer. If already resizing, helps
1880 * perform transfer if work is available. Rechecks occupancy
1881 * after a transfer to see if another resize is already needed
1882 * because resizings are lagging additions.
1883 *
1884 * @param x the count to add
1885 * @param check if <0, don't check resize, if <= 1 only check if uncontended
1886 */
1887 private final void addCount(long x, int check) {
1888 Cell[] as; long b, s;
1889 if ((as = counterCells) != null ||
1890 !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
1891 Cell a; long v; int m;
1892 boolean uncontended = true;
1893 if (as == null || (m = as.length - 1) < 0 ||
1894 (a = as[ThreadLocalRandom.getProbe() & m]) == null ||
1895 !(uncontended =
1896 U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
1897 fullAddCount(x, uncontended);
1898 return;
1899 }
1900 if (check <= 1)
1901 return;
1902 s = sumCount();
1903 }
1904 if (check >= 0) {
1905 Node<K,V>[] tab, nt; int sc;
1906 while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
1907 tab.length < MAXIMUM_CAPACITY) {
1908 if (sc < 0) {
1909 if (sc == -1 || transferIndex <= transferOrigin ||
1910 (nt = nextTable) == null)
1911 break;
1912 if (U.compareAndSwapInt(this, SIZECTL, sc, sc - 1))
1913 transfer(tab, nt);
1914 }
1915 else if (U.compareAndSwapInt(this, SIZECTL, sc, -2))
1916 transfer(tab, null);
1917 s = sumCount();
1918 }
1919 }
1920 }
1921
1922 /**
1923 * Tries to presize table to accommodate the given number of elements.
1924 *
1925 * @param size number of elements (doesn't need to be perfectly accurate)
1926 */
1927 private final void tryPresize(int size) {
1928 int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
1929 tableSizeFor(size + (size >>> 1) + 1);
1930 int sc;
1931 while ((sc = sizeCtl) >= 0) {
1932 Node<K,V>[] tab = table; int n;
1933 if (tab == null || (n = tab.length) == 0) {
1934 n = (sc > c) ? sc : c;
1935 if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
1936 try {
1937 if (table == tab) {
1938 table = (Node<K,V>[])new Node[n];
1939 sc = n - (n >>> 2);
1940 }
1941 } finally {
1942 sizeCtl = sc;
1943 }
1944 }
1945 }
1946 else if (c <= sc || n >= MAXIMUM_CAPACITY)
1947 break;
1948 else if (tab == table &&
1949 U.compareAndSwapInt(this, SIZECTL, sc, -2))
1950 transfer(tab, null);
1951 }
1952 }
1953
1954 /**
1955 * Moves and/or copies the nodes in each bin to new table. See
1956 * above for explanation.
1957 */
1958 private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
1959 int n = tab.length, stride;
1960 if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
1961 stride = MIN_TRANSFER_STRIDE; // subdivide range
1962 if (nextTab == null) { // initiating
1963 try {
1964 nextTab = (Node<K,V>[])new Node[n << 1];
1965 } catch (Throwable ex) { // try to cope with OOME
1966 sizeCtl = Integer.MAX_VALUE;
1967 return;
1968 }
1969 nextTable = nextTab;
1970 transferOrigin = n;
1971 transferIndex = n;
1972 Node<K,V> rev = new Node<K,V>(MOVED, tab, null, null);
1973 for (int k = n; k > 0;) { // progressively reveal ready slots
1974 int nextk = (k > stride) ? k - stride : 0;
1975 for (int m = nextk; m < k; ++m)
1976 nextTab[m] = rev;
1977 for (int m = n + nextk; m < n + k; ++m)
1978 nextTab[m] = rev;
1979 U.putOrderedInt(this, TRANSFERORIGIN, k = nextk);
1980 }
1981 }
1982 int nextn = nextTab.length;
1983 Node<K,V> fwd = new Node<K,V>(MOVED, nextTab, null, null);
1984 boolean advance = true;
1985 for (int i = 0, bound = 0;;) {
1986 int nextIndex, nextBound; Node<K,V> f; Object fk;
1987 while (advance) {
1988 if (--i >= bound)
1989 advance = false;
1990 else if ((nextIndex = transferIndex) <= transferOrigin) {
1991 i = -1;
1992 advance = false;
1993 }
1994 else if (U.compareAndSwapInt
1995 (this, TRANSFERINDEX, nextIndex,
1996 nextBound = (nextIndex > stride ?
1997 nextIndex - stride : 0))) {
1998 bound = nextBound;
1999 i = nextIndex - 1;
2000 advance = false;
2001 }
2002 }
2003 if (i < 0 || i >= n || i + n >= nextn) {
2004 for (int sc;;) {
2005 if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, ++sc)) {
2006 if (sc == -1) {
2007 nextTable = null;
2008 table = nextTab;
2009 sizeCtl = (n << 1) - (n >>> 1);
2010 }
2011 return;
2012 }
2013 }
2014 }
2015 else if ((f = tabAt(tab, i)) == null) {
2016 if (casTabAt(tab, i, null, fwd)) {
2017 setTabAt(nextTab, i, null);
2018 setTabAt(nextTab, i + n, null);
2019 advance = true;
2020 }
2021 }
2022 else if (f.hash >= 0) {
2023 synchronized (f) {
2024 if (tabAt(tab, i) == f) {
2025 int runBit = f.hash & n;
2026 Node<K,V> lastRun = f, lo = null, hi = null;
2027 for (Node<K,V> p = f.next; p != null; p = p.next) {
2028 int b = p.hash & n;
2029 if (b != runBit) {
2030 runBit = b;
2031 lastRun = p;
2032 }
2033 }
2034 if (runBit == 0)
2035 lo = lastRun;
2036 else
2037 hi = lastRun;
2038 for (Node<K,V> p = f; p != lastRun; p = p.next) {
2039 int ph = p.hash; Object pk = p.key; V pv = p.val;
2040 if ((ph & n) == 0)
2041 lo = new Node<K,V>(ph, pk, pv, lo);
2042 else
2043 hi = new Node<K,V>(ph, pk, pv, hi);
2044 }
2045 setTabAt(nextTab, i, lo);
2046 setTabAt(nextTab, i + n, hi);
2047 setTabAt(tab, i, fwd);
2048 advance = true;
2049 }
2050 }
2051 }
2052 else if ((fk = f.key) instanceof TreeBin) {
2053 TreeBin<K,V> t = (TreeBin<K,V>)fk;
2054 long stamp = t.writeLock();
2055 try {
2056 if (tabAt(tab, i) == f) {
2057 TreeNode<K,V> root;
2058 Node<K,V> ln = null, hn = null;
2059 if ((root = t.root) != null) {
2060 Node<K,V> e, p; TreeNode<K,V> lr, rr; int lh;
2061 TreeBin<K,V> lt = null, ht = null;
2062 for (lr = root; lr.left != null; lr = lr.left);
2063 for (rr = root; rr.right != null; rr = rr.right);
2064 if ((lh = lr.hash) == rr.hash) { // move entire tree
2065 if ((lh & n) == 0)
2066 lt = t;
2067 else
2068 ht = t;
2069 }
2070 else {
2071 lt = new TreeBin<K,V>();
2072 ht = new TreeBin<K,V>();
2073 int lc = 0, hc = 0;
2074 for (e = t.first; e != null; e = e.next) {
2075 int h = e.hash;
2076 Object k = e.key; V v = e.val;
2077 if ((h & n) == 0) {
2078 ++lc;
2079 lt.putTreeNode(h, k, v);
2080 }
2081 else {
2082 ++hc;
2083 ht.putTreeNode(h, k, v);
2084 }
2085 }
2086 if (lc < TREE_THRESHOLD) { // throw away
2087 for (p = lt.first; p != null; p = p.next)
2088 ln = new Node<K,V>(p.hash, p.key,
2089 p.val, ln);
2090 lt = null;
2091 }
2092 if (hc < TREE_THRESHOLD) {
2093 for (p = ht.first; p != null; p = p.next)
2094 hn = new Node<K,V>(p.hash, p.key,
2095 p.val, hn);
2096 ht = null;
2097 }
2098 }
2099 if (ln == null && lt != null)
2100 ln = new Node<K,V>(MOVED, lt, null, null);
2101 if (hn == null && ht != null)
2102 hn = new Node<K,V>(MOVED, ht, null, null);
2103 }
2104 setTabAt(nextTab, i, ln);
2105 setTabAt(nextTab, i + n, hn);
2106 setTabAt(tab, i, fwd);
2107 advance = true;
2108 }
2109 } finally {
2110 t.unlockWrite(stamp);
2111 }
2112 }
2113 else
2114 advance = true; // already processed
2115 }
2116 }
2117
2118 /* ---------------- Counter support -------------- */
2119
2120 final long sumCount() {
2121 Cell[] as = counterCells; Cell a;
2122 long sum = baseCount;
2123 if (as != null) {
2124 for (int i = 0; i < as.length; ++i) {
2125 if ((a = as[i]) != null)
2126 sum += a.value;
2127 }
2128 }
2129 return sum;
2130 }
2131
2132 // See LongAdder version for explanation
2133 private final void fullAddCount(long x, boolean wasUncontended) {
2134 int h;
2135 if ((h = ThreadLocalRandom.getProbe()) == 0) {
2136 ThreadLocalRandom.localInit(); // force initialization
2137 h = ThreadLocalRandom.getProbe();
2138 wasUncontended = true;
2139 }
2140 boolean collide = false; // True if last slot nonempty
2141 for (;;) {
2142 Cell[] as; Cell a; int n; long v;
2143 if ((as = counterCells) != null && (n = as.length) > 0) {
2144 if ((a = as[(n - 1) & h]) == null) {
2145 if (cellsBusy == 0) { // Try to attach new Cell
2146 Cell r = new Cell(x); // Optimistic create
2147 if (cellsBusy == 0 &&
2148 U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
2149 boolean created = false;
2150 try { // Recheck under lock
2151 Cell[] rs; int m, j;
2152 if ((rs = counterCells) != null &&
2153 (m = rs.length) > 0 &&
2154 rs[j = (m - 1) & h] == null) {
2155 rs[j] = r;
2156 created = true;
2157 }
2158 } finally {
2159 cellsBusy = 0;
2160 }
2161 if (created)
2162 break;
2163 continue; // Slot is now non-empty
2164 }
2165 }
2166 collide = false;
2167 }
2168 else if (!wasUncontended) // CAS already known to fail
2169 wasUncontended = true; // Continue after rehash
2170 else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))
2171 break;
2172 else if (counterCells != as || n >= NCPU)
2173 collide = false; // At max size or stale
2174 else if (!collide)
2175 collide = true;
2176 else if (cellsBusy == 0 &&
2177 U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
2178 try {
2179 if (counterCells == as) {// Expand table unless stale
2180 Cell[] rs = new Cell[n << 1];
2181 for (int i = 0; i < n; ++i)
2182 rs[i] = as[i];
2183 counterCells = rs;
2184 }
2185 } finally {
2186 cellsBusy = 0;
2187 }
2188 collide = false;
2189 continue; // Retry with expanded table
2190 }
2191 h = ThreadLocalRandom.advanceProbe(h);
2192 }
2193 else if (cellsBusy == 0 && counterCells == as &&
2194 U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
2195 boolean init = false;
2196 try { // Initialize table
2197 if (counterCells == as) {
2198 Cell[] rs = new Cell[2];
2199 rs[h & 1] = new Cell(x);
2200 counterCells = rs;
2201 init = true;
2202 }
2203 } finally {
2204 cellsBusy = 0;
2205 }
2206 if (init)
2207 break;
2208 }
2209 else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x))
2210 break; // Fall back on using base
2211 }
2212 }
2213
2214 /* ----------------Table Traversal -------------- */
2215
2216 /**
2217 * Encapsulates traversal for methods such as containsValue; also
2218 * serves as a base class for other iterators and spliterators.
2219 *
2220 * Method advance visits once each still-valid node that was
2221 * reachable upon iterator construction. It might miss some that
2222 * were added to a bin after the bin was visited, which is OK wrt
2223 * consistency guarantees. Maintaining this property in the face
2224 * of possible ongoing resizes requires a fair amount of
2225 * bookkeeping state that is difficult to optimize away amidst
2226 * volatile accesses. Even so, traversal maintains reasonable
2227 * throughput.
2228 *
2229 * Normally, iteration proceeds bin-by-bin traversing lists.
2230 * However, if the table has been resized, then all future steps
2231 * must traverse both the bin at the current index as well as at
2232 * (index + baseSize); and so on for further resizings. To
2233 * paranoically cope with potential sharing by users of iterators
2234 * across threads, iteration terminates if a bounds checks fails
2235 * for a table read.
2236 */
2237 static class Traverser<K,V> {
2238 Node<K,V>[] tab; // current table; updated if resized
2239 Node<K,V> next; // the next entry to use
2240 int index; // index of bin to use next
2241 int baseIndex; // current index of initial table
2242 int baseLimit; // index bound for initial table
2243 final int baseSize; // initial table size
2244
2245 Traverser(Node<K,V>[] tab, int size, int index, int limit) {
2246 this.tab = tab;
2247 this.baseSize = size;
2248 this.baseIndex = this.index = index;
2249 this.baseLimit = limit;
2250 this.next = null;
2251 }
2252
2253 /**
2254 * Advances if possible, returning next valid node, or null if none.
2255 */
2256 final Node<K,V> advance() {
2257 Node<K,V> e;
2258 if ((e = next) != null)
2259 e = e.next;
2260 for (;;) {
2261 Node<K,V>[] t; int i, n; Object ek; // must use locals in checks
2262 if (e != null)
2263 return next = e;
2264 if (baseIndex >= baseLimit || (t = tab) == null ||
2265 (n = t.length) <= (i = index) || i < 0)
2266 return next = null;
2267 if ((e = tabAt(t, index)) != null && e.hash < 0) {
2268 if ((ek = e.key) instanceof TreeBin)
2269 e = ((TreeBin<K,V>)ek).first;
2270 else {
2271 tab = (Node<K,V>[])ek;
2272 e = null;
2273 continue;
2274 }
2275 }
2276 if ((index += baseSize) >= n)
2277 index = ++baseIndex; // visit upper slots if present
2278 }
2279 }
2280 }
2281
2282 /**
2283 * Base of key, value, and entry Iterators. Adds fields to
2284 * Traverser to support iterator.remove
2285 */
2286 static class BaseIterator<K,V> extends Traverser<K,V> {
2287 final ConcurrentHashMap<K,V> map;
2288 Node<K,V> lastReturned;
2289 BaseIterator(Node<K,V>[] tab, int size, int index, int limit,
2290 ConcurrentHashMap<K,V> map) {
2291 super(tab, size, index, limit);
2292 this.map = map;
2293 advance();
2294 }
2295
2296 public final boolean hasNext() { return next != null; }
2297 public final boolean hasMoreElements() { return next != null; }
2298
2299 public final void remove() {
2300 Node<K,V> p;
2301 if ((p = lastReturned) == null)
2302 throw new IllegalStateException();
2303 lastReturned = null;
2304 map.internalReplace((K)p.key, null, null);
2305 }
2306 }
2307
2308 static final class KeyIterator<K,V> extends BaseIterator<K,V>
2309 implements Iterator<K>, Enumeration<K> {
2310 KeyIterator(Node<K,V>[] tab, int index, int size, int limit,
2311 ConcurrentHashMap<K,V> map) {
2312 super(tab, index, size, limit, map);
2313 }
2314
2315 public final K next() {
2316 Node<K,V> p;
2317 if ((p = next) == null)
2318 throw new NoSuchElementException();
2319 K k = (K)p.key;
2320 lastReturned = p;
2321 advance();
2322 return k;
2323 }
2324
2325 public final K nextElement() { return next(); }
2326 }
2327
2328 static final class ValueIterator<K,V> extends BaseIterator<K,V>
2329 implements Iterator<V>, Enumeration<V> {
2330 ValueIterator(Node<K,V>[] tab, int index, int size, int limit,
2331 ConcurrentHashMap<K,V> map) {
2332 super(tab, index, size, limit, map);
2333 }
2334
2335 public final V next() {
2336 Node<K,V> p;
2337 if ((p = next) == null)
2338 throw new NoSuchElementException();
2339 V v = p.val;
2340 lastReturned = p;
2341 advance();
2342 return v;
2343 }
2344
2345 public final V nextElement() { return next(); }
2346 }
2347
2348 static final class EntryIterator<K,V> extends BaseIterator<K,V>
2349 implements Iterator<Map.Entry<K,V>> {
2350 EntryIterator(Node<K,V>[] tab, int index, int size, int limit,
2351 ConcurrentHashMap<K,V> map) {
2352 super(tab, index, size, limit, map);
2353 }
2354
2355 public final Map.Entry<K,V> next() {
2356 Node<K,V> p;
2357 if ((p = next) == null)
2358 throw new NoSuchElementException();
2359 K k = (K)p.key;
2360 V v = p.val;
2361 lastReturned = p;
2362 advance();
2363 return new MapEntry<K,V>(k, v, map);
2364 }
2365 }
2366
2367 static final class KeySpliterator<K,V> extends Traverser<K,V>
2368 implements Spliterator<K> {
2369 long est; // size estimate
2370 KeySpliterator(Node<K,V>[] tab, int size, int index, int limit,
2371 long est) {
2372 super(tab, size, index, limit);
2373 this.est = est;
2374 }
2375
2376 public Spliterator<K> trySplit() {
2377 int i, f, h;
2378 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
2379 new KeySpliterator<K,V>(tab, baseSize, baseLimit = h,
2380 f, est >>>= 1);
2381 }
2382
2383 public void forEachRemaining(Consumer<? super K> action) {
2384 if (action == null) throw new NullPointerException();
2385 for (Node<K,V> p; (p = advance()) != null;)
2386 action.accept((K)p.key);
2387 }
2388
2389 public boolean tryAdvance(Consumer<? super K> action) {
2390 if (action == null) throw new NullPointerException();
2391 Node<K,V> p;
2392 if ((p = advance()) == null)
2393 return false;
2394 action.accept((K)p.key);
2395 return true;
2396 }
2397
2398 public long estimateSize() { return est; }
2399
2400 public int characteristics() {
2401 return Spliterator.DISTINCT | Spliterator.CONCURRENT |
2402 Spliterator.NONNULL;
2403 }
2404 }
2405
2406 static final class ValueSpliterator<K,V> extends Traverser<K,V>
2407 implements Spliterator<V> {
2408 long est; // size estimate
2409 ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit,
2410 long est) {
2411 super(tab, size, index, limit);
2412 this.est = est;
2413 }
2414
2415 public Spliterator<V> trySplit() {
2416 int i, f, h;
2417 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
2418 new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h,
2419 f, est >>>= 1);
2420 }
2421
2422 public void forEachRemaining(Consumer<? super V> action) {
2423 if (action == null) throw new NullPointerException();
2424 for (Node<K,V> p; (p = advance()) != null;)
2425 action.accept(p.val);
2426 }
2427
2428 public boolean tryAdvance(Consumer<? super V> action) {
2429 if (action == null) throw new NullPointerException();
2430 Node<K,V> p;
2431 if ((p = advance()) == null)
2432 return false;
2433 action.accept(p.val);
2434 return true;
2435 }
2436
2437 public long estimateSize() { return est; }
2438
2439 public int characteristics() {
2440 return Spliterator.CONCURRENT | Spliterator.NONNULL;
2441 }
2442 }
2443
2444 static final class EntrySpliterator<K,V> extends Traverser<K,V>
2445 implements Spliterator<Map.Entry<K,V>> {
2446 final ConcurrentHashMap<K,V> map; // To export MapEntry
2447 long est; // size estimate
2448 EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit,
2449 long est, ConcurrentHashMap<K,V> map) {
2450 super(tab, size, index, limit);
2451 this.map = map;
2452 this.est = est;
2453 }
2454
2455 public Spliterator<Map.Entry<K,V>> trySplit() {
2456 int i, f, h;
2457 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
2458 new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h,
2459 f, est >>>= 1, map);
2460 }
2461
2462 public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) {
2463 if (action == null) throw new NullPointerException();
2464 for (Node<K,V> p; (p = advance()) != null; )
2465 action.accept(new MapEntry<K,V>((K)p.key, p.val, map));
2466 }
2467
2468 public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {
2469 if (action == null) throw new NullPointerException();
2470 Node<K,V> p;
2471 if ((p = advance()) == null)
2472 return false;
2473 action.accept(new MapEntry<K,V>((K)p.key, p.val, map));
2474 return true;
2475 }
2476
2477 public long estimateSize() { return est; }
2478
2479 public int characteristics() {
2480 return Spliterator.DISTINCT | Spliterator.CONCURRENT |
2481 Spliterator.NONNULL;
2482 }
2483 }
2484
2485
2486 /* ---------------- Public operations -------------- */
2487
2488 /**
2489 * Creates a new, empty map with the default initial table size (16).
2490 */
2491 public ConcurrentHashMap() {
2492 }
2493
2494 /**
2495 * Creates a new, empty map with an initial table size
2496 * accommodating the specified number of elements without the need
2497 * to dynamically resize.
2498 *
2499 * @param initialCapacity The implementation performs internal
2500 * sizing to accommodate this many elements.
2501 * @throws IllegalArgumentException if the initial capacity of
2502 * elements is negative
2503 */
2504 public ConcurrentHashMap(int initialCapacity) {
2505 if (initialCapacity < 0)
2506 throw new IllegalArgumentException();
2507 int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
2508 MAXIMUM_CAPACITY :
2509 tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
2510 this.sizeCtl = cap;
2511 }
2512
2513 /**
2514 * Creates a new map with the same mappings as the given map.
2515 *
2516 * @param m the map
2517 */
2518 public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
2519 this.sizeCtl = DEFAULT_CAPACITY;
2520 internalPutAll(m);
2521 }
2522
2523 /**
2524 * Creates a new, empty map with an initial table size based on
2525 * the given number of elements ({@code initialCapacity}) and
2526 * initial table density ({@code loadFactor}).
2527 *
2528 * @param initialCapacity the initial capacity. The implementation
2529 * performs internal sizing to accommodate this many elements,
2530 * given the specified load factor.
2531 * @param loadFactor the load factor (table density) for
2532 * establishing the initial table size
2533 * @throws IllegalArgumentException if the initial capacity of
2534 * elements is negative or the load factor is nonpositive
2535 *
2536 * @since 1.6
2537 */
2538 public ConcurrentHashMap(int initialCapacity, float loadFactor) {
2539 this(initialCapacity, loadFactor, 1);
2540 }
2541
2542 /**
2543 * Creates a new, empty map with an initial table size based on
2544 * the given number of elements ({@code initialCapacity}), table
2545 * density ({@code loadFactor}), and number of concurrently
2546 * updating threads ({@code concurrencyLevel}).
2547 *
2548 * @param initialCapacity the initial capacity. The implementation
2549 * performs internal sizing to accommodate this many elements,
2550 * given the specified load factor.
2551 * @param loadFactor the load factor (table density) for
2552 * establishing the initial table size
2553 * @param concurrencyLevel the estimated number of concurrently
2554 * updating threads. The implementation may use this value as
2555 * a sizing hint.
2556 * @throws IllegalArgumentException if the initial capacity is
2557 * negative or the load factor or concurrencyLevel are
2558 * nonpositive
2559 */
2560 public ConcurrentHashMap(int initialCapacity,
2561 float loadFactor, int concurrencyLevel) {
2562 if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
2563 throw new IllegalArgumentException();
2564 if (initialCapacity < concurrencyLevel) // Use at least as many bins
2565 initialCapacity = concurrencyLevel; // as estimated threads
2566 long size = (long)(1.0 + (long)initialCapacity / loadFactor);
2567 int cap = (size >= (long)MAXIMUM_CAPACITY) ?
2568 MAXIMUM_CAPACITY : tableSizeFor((int)size);
2569 this.sizeCtl = cap;
2570 }
2571
2572 /**
2573 * Creates a new {@link Set} backed by a ConcurrentHashMap
2574 * from the given type to {@code Boolean.TRUE}.
2575 *
2576 * @return the new set
2577 */
2578 public static <K> KeySetView<K,Boolean> newKeySet() {
2579 return new KeySetView<K,Boolean>
2580 (new ConcurrentHashMap<K,Boolean>(), Boolean.TRUE);
2581 }
2582
2583 /**
2584 * Creates a new {@link Set} backed by a ConcurrentHashMap
2585 * from the given type to {@code Boolean.TRUE}.
2586 *
2587 * @param initialCapacity The implementation performs internal
2588 * sizing to accommodate this many elements.
2589 * @throws IllegalArgumentException if the initial capacity of
2590 * elements is negative
2591 * @return the new set
2592 */
2593 public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2594 return new KeySetView<K,Boolean>
2595 (new ConcurrentHashMap<K,Boolean>(initialCapacity), Boolean.TRUE);
2596 }
2597
2598 /**
2599 * {@inheritDoc}
2600 */
2601 public boolean isEmpty() {
2602 return sumCount() <= 0L; // ignore transient negative values
2603 }
2604
2605 /**
2606 * {@inheritDoc}
2607 */
2608 public int size() {
2609 long n = sumCount();
2610 return ((n < 0L) ? 0 :
2611 (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
2612 (int)n);
2613 }
2614
2615 /**
2616 * Returns the number of mappings. This method should be used
2617 * instead of {@link #size} because a ConcurrentHashMap may
2618 * contain more mappings than can be represented as an int. The
2619 * value returned is an estimate; the actual count may differ if
2620 * there are concurrent insertions or removals.
2621 *
2622 * @return the number of mappings
2623 */
2624 public long mappingCount() {
2625 long n = sumCount();
2626 return (n < 0L) ? 0L : n; // ignore transient negative values
2627 }
2628
2629 /**
2630 * Returns the value to which the specified key is mapped,
2631 * or {@code null} if this map contains no mapping for the key.
2632 *
2633 * <p>More formally, if this map contains a mapping from a key
2634 * {@code k} to a value {@code v} such that {@code key.equals(k)},
2635 * then this method returns {@code v}; otherwise it returns
2636 * {@code null}. (There can be at most one such mapping.)
2637 *
2638 * @throws NullPointerException if the specified key is null
2639 */
2640 public V get(Object key) {
2641 return internalGet(key);
2642 }
2643
2644 /**
2645 * Returns the value to which the specified key is mapped, or the
2646 * given default value if this map contains no mapping for the
2647 * key.
2648 *
2649 * @param key the key whose associated value is to be returned
2650 * @param defaultValue the value to return if this map contains
2651 * no mapping for the given key
2652 * @return the mapping for the key, if present; else the default value
2653 * @throws NullPointerException if the specified key is null
2654 */
2655 public V getOrDefault(Object key, V defaultValue) {
2656 V v;
2657 return (v = internalGet(key)) == null ? defaultValue : v;
2658 }
2659
2660 /**
2661 * Tests if the specified object is a key in this table.
2662 *
2663 * @param key possible key
2664 * @return {@code true} if and only if the specified object
2665 * is a key in this table, as determined by the
2666 * {@code equals} method; {@code false} otherwise
2667 * @throws NullPointerException if the specified key is null
2668 */
2669 public boolean containsKey(Object key) {
2670 return internalGet(key) != null;
2671 }
2672
2673 /**
2674 * Returns {@code true} if this map maps one or more keys to the
2675 * specified value. Note: This method may require a full traversal
2676 * of the map, and is much slower than method {@code containsKey}.
2677 *
2678 * @param value value whose presence in this map is to be tested
2679 * @return {@code true} if this map maps one or more keys to the
2680 * specified value
2681 * @throws NullPointerException if the specified value is null
2682 */
2683 public boolean containsValue(Object value) {
2684 if (value == null)
2685 throw new NullPointerException();
2686 Node<K,V>[] t;
2687 if ((t = table) != null) {
2688 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
2689 for (Node<K,V> p; (p = it.advance()) != null; ) {
2690 V v;
2691 if ((v = p.val) == value || value.equals(v))
2692 return true;
2693 }
2694 }
2695 return false;
2696 }
2697
2698 /**
2699 * Legacy method testing if some key maps into the specified value
2700 * in this table. This method is identical in functionality to
2701 * {@link #containsValue(Object)}, and exists solely to ensure
2702 * full compatibility with class {@link java.util.Hashtable},
2703 * which supported this method prior to introduction of the
2704 * Java Collections framework.
2705 *
2706 * @param value a value to search for
2707 * @return {@code true} if and only if some key maps to the
2708 * {@code value} argument in this table as
2709 * determined by the {@code equals} method;
2710 * {@code false} otherwise
2711 * @throws NullPointerException if the specified value is null
2712 */
2713 @Deprecated public boolean contains(Object value) {
2714 return containsValue(value);
2715 }
2716
2717 /**
2718 * Maps the specified key to the specified value in this table.
2719 * Neither the key nor the value can be null.
2720 *
2721 * <p>The value can be retrieved by calling the {@code get} method
2722 * with a key that is equal to the original key.
2723 *
2724 * @param key key with which the specified value is to be associated
2725 * @param value value to be associated with the specified key
2726 * @return the previous value associated with {@code key}, or
2727 * {@code null} if there was no mapping for {@code key}
2728 * @throws NullPointerException if the specified key or value is null
2729 */
2730 public V put(K key, V value) {
2731 return internalPut(key, value, false);
2732 }
2733
2734 /**
2735 * {@inheritDoc}
2736 *
2737 * @return the previous value associated with the specified key,
2738 * or {@code null} if there was no mapping for the key
2739 * @throws NullPointerException if the specified key or value is null
2740 */
2741 public V putIfAbsent(K key, V value) {
2742 return internalPut(key, value, true);
2743 }
2744
2745 /**
2746 * Copies all of the mappings from the specified map to this one.
2747 * These mappings replace any mappings that this map had for any of the
2748 * keys currently in the specified map.
2749 *
2750 * @param m mappings to be stored in this map
2751 */
2752 public void putAll(Map<? extends K, ? extends V> m) {
2753 internalPutAll(m);
2754 }
2755
2756 /**
2757 * If the specified key is not already associated with a value,
2758 * attempts to compute its value using the given mapping function
2759 * and enters it into this map unless {@code null}. The entire
2760 * method invocation is performed atomically, so the function is
2761 * applied at most once per key. Some attempted update operations
2762 * on this map by other threads may be blocked while computation
2763 * is in progress, so the computation should be short and simple,
2764 * and must not attempt to update any other mappings of this map.
2765 *
2766 * @param key key with which the specified value is to be associated
2767 * @param mappingFunction the function to compute a value
2768 * @return the current (existing or computed) value associated with
2769 * the specified key, or null if the computed value is null
2770 * @throws NullPointerException if the specified key or mappingFunction
2771 * is null
2772 * @throws IllegalStateException if the computation detectably
2773 * attempts a recursive update to this map that would
2774 * otherwise never complete
2775 * @throws RuntimeException or Error if the mappingFunction does so,
2776 * in which case the mapping is left unestablished
2777 */
2778 public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) {
2779 return internalComputeIfAbsent(key, mappingFunction);
2780 }
2781
2782 /**
2783 * If the value for the specified key is present, attempts to
2784 * compute a new mapping given the key and its current mapped
2785 * value. The entire method invocation is performed atomically.
2786 * Some attempted update operations on this map by other threads
2787 * may be blocked while computation is in progress, so the
2788 * computation should be short and simple, and must not attempt to
2789 * update any other mappings of this map.
2790 *
2791 * @param key key with which a value may be associated
2792 * @param remappingFunction the function to compute a value
2793 * @return the new value associated with the specified key, or null if none
2794 * @throws NullPointerException if the specified key or remappingFunction
2795 * is null
2796 * @throws IllegalStateException if the computation detectably
2797 * attempts a recursive update to this map that would
2798 * otherwise never complete
2799 * @throws RuntimeException or Error if the remappingFunction does so,
2800 * in which case the mapping is unchanged
2801 */
2802 public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
2803 return internalCompute(key, true, remappingFunction);
2804 }
2805
2806 /**
2807 * Attempts to compute a mapping for the specified key and its
2808 * current mapped value (or {@code null} if there is no current
2809 * mapping). The entire method invocation is performed atomically.
2810 * Some attempted update operations on this map by other threads
2811 * may be blocked while computation is in progress, so the
2812 * computation should be short and simple, and must not attempt to
2813 * update any other mappings of this Map.
2814 *
2815 * @param key key with which the specified value is to be associated
2816 * @param remappingFunction the function to compute a value
2817 * @return the new value associated with the specified key, or null if none
2818 * @throws NullPointerException if the specified key or remappingFunction
2819 * is null
2820 * @throws IllegalStateException if the computation detectably
2821 * attempts a recursive update to this map that would
2822 * otherwise never complete
2823 * @throws RuntimeException or Error if the remappingFunction does so,
2824 * in which case the mapping is unchanged
2825 */
2826 public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
2827 return internalCompute(key, false, remappingFunction);
2828 }
2829
2830 /**
2831 * If the specified key is not already associated with a
2832 * (non-null) value, associates it with the given value.
2833 * Otherwise, replaces the value with the results of the given
2834 * remapping function, or removes if {@code null}. The entire
2835 * method invocation is performed atomically. Some attempted
2836 * update operations on this map by other threads may be blocked
2837 * while computation is in progress, so the computation should be
2838 * short and simple, and must not attempt to update any other
2839 * mappings of this Map.
2840 *
2841 * @param key key with which the specified value is to be associated
2842 * @param value the value to use if absent
2843 * @param remappingFunction the function to recompute a value if present
2844 * @return the new value associated with the specified key, or null if none
2845 * @throws NullPointerException if the specified key or the
2846 * remappingFunction is null
2847 * @throws RuntimeException or Error if the remappingFunction does so,
2848 * in which case the mapping is unchanged
2849 */
2850 public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
2851 return internalMerge(key, value, remappingFunction);
2852 }
2853
2854 /**
2855 * Removes the key (and its corresponding value) from this map.
2856 * This method does nothing if the key is not in the map.
2857 *
2858 * @param key the key that needs to be removed
2859 * @return the previous value associated with {@code key}, or
2860 * {@code null} if there was no mapping for {@code key}
2861 * @throws NullPointerException if the specified key is null
2862 */
2863 public V remove(Object key) {
2864 return internalReplace(key, null, null);
2865 }
2866
2867 /**
2868 * {@inheritDoc}
2869 *
2870 * @throws NullPointerException if the specified key is null
2871 */
2872 public boolean remove(Object key, Object value) {
2873 if (key == null)
2874 throw new NullPointerException();
2875 return value != null && internalReplace(key, null, value) != null;
2876 }
2877
2878 /**
2879 * {@inheritDoc}
2880 *
2881 * @throws NullPointerException if any of the arguments are null
2882 */
2883 public boolean replace(K key, V oldValue, V newValue) {
2884 if (key == null || oldValue == null || newValue == null)
2885 throw new NullPointerException();
2886 return internalReplace(key, newValue, oldValue) != null;
2887 }
2888
2889 /**
2890 * {@inheritDoc}
2891 *
2892 * @return the previous value associated with the specified key,
2893 * or {@code null} if there was no mapping for the key
2894 * @throws NullPointerException if the specified key or value is null
2895 */
2896 public V replace(K key, V value) {
2897 if (key == null || value == null)
2898 throw new NullPointerException();
2899 return internalReplace(key, value, null);
2900 }
2901
2902 /**
2903 * Removes all of the mappings from this map.
2904 */
2905 public void clear() {
2906 internalClear();
2907 }
2908
2909 /**
2910 * Returns a {@link Set} view of the keys contained in this map.
2911 * The set is backed by the map, so changes to the map are
2912 * reflected in the set, and vice-versa. The set supports element
2913 * removal, which removes the corresponding mapping from this map,
2914 * via the {@code Iterator.remove}, {@code Set.remove},
2915 * {@code removeAll}, {@code retainAll}, and {@code clear}
2916 * operations. It does not support the {@code add} or
2917 * {@code addAll} operations.
2918 *
2919 * <p>The view's {@code iterator} is a "weakly consistent" iterator
2920 * that will never throw {@link ConcurrentModificationException},
2921 * and guarantees to traverse elements as they existed upon
2922 * construction of the iterator, and may (but is not guaranteed to)
2923 * reflect any modifications subsequent to construction.
2924 *
2925 * @return the set view
2926 */
2927 public KeySetView<K,V> keySet() {
2928 KeySetView<K,V> ks = keySet;
2929 return (ks != null) ? ks : (keySet = new KeySetView<K,V>(this, null));
2930 }
2931
2932 /**
2933 * Returns a {@link Set} view of the keys in this map, using the
2934 * given common mapped value for any additions (i.e., {@link
2935 * Collection#add} and {@link Collection#addAll(Collection)}).
2936 * This is of course only appropriate if it is acceptable to use
2937 * the same value for all additions from this view.
2938 *
2939 * @param mappedValue the mapped value to use for any additions
2940 * @return the set view
2941 * @throws NullPointerException if the mappedValue is null
2942 */
2943 public KeySetView<K,V> keySet(V mappedValue) {
2944 if (mappedValue == null)
2945 throw new NullPointerException();
2946 return new KeySetView<K,V>(this, mappedValue);
2947 }
2948
2949 /**
2950 * Returns a {@link Collection} view of the values contained in this map.
2951 * The collection is backed by the map, so changes to the map are
2952 * reflected in the collection, and vice-versa. The collection
2953 * supports element removal, which removes the corresponding
2954 * mapping from this map, via the {@code Iterator.remove},
2955 * {@code Collection.remove}, {@code removeAll},
2956 * {@code retainAll}, and {@code clear} operations. It does not
2957 * support the {@code add} or {@code addAll} operations.
2958 *
2959 * <p>The view's {@code iterator} is a "weakly consistent" iterator
2960 * that will never throw {@link ConcurrentModificationException},
2961 * and guarantees to traverse elements as they existed upon
2962 * construction of the iterator, and may (but is not guaranteed to)
2963 * reflect any modifications subsequent to construction.
2964 *
2965 * @return the collection view
2966 */
2967 public Collection<V> values() {
2968 ValuesView<K,V> vs = values;
2969 return (vs != null) ? vs : (values = new ValuesView<K,V>(this));
2970 }
2971
2972 /**
2973 * Returns a {@link Set} view of the mappings contained in this map.
2974 * The set is backed by the map, so changes to the map are
2975 * reflected in the set, and vice-versa. The set supports element
2976 * removal, which removes the corresponding mapping from the map,
2977 * via the {@code Iterator.remove}, {@code Set.remove},
2978 * {@code removeAll}, {@code retainAll}, and {@code clear}
2979 * operations.
2980 *
2981 * <p>The view's {@code iterator} is a "weakly consistent" iterator
2982 * that will never throw {@link ConcurrentModificationException},
2983 * and guarantees to traverse elements as they existed upon
2984 * construction of the iterator, and may (but is not guaranteed to)
2985 * reflect any modifications subsequent to construction.
2986 *
2987 * @return the set view
2988 */
2989 public Set<Map.Entry<K,V>> entrySet() {
2990 EntrySetView<K,V> es = entrySet;
2991 return (es != null) ? es : (entrySet = new EntrySetView<K,V>(this));
2992 }
2993
2994 /**
2995 * Returns an enumeration of the keys in this table.
2996 *
2997 * @return an enumeration of the keys in this table
2998 * @see #keySet()
2999 */
3000 public Enumeration<K> keys() {
3001 Node<K,V>[] t;
3002 int f = (t = table) == null ? 0 : t.length;
3003 return new KeyIterator<K,V>(t, f, 0, f, this);
3004 }
3005
3006 /**
3007 * Returns an enumeration of the values in this table.
3008 *
3009 * @return an enumeration of the values in this table
3010 * @see #values()
3011 */
3012 public Enumeration<V> elements() {
3013 Node<K,V>[] t;
3014 int f = (t = table) == null ? 0 : t.length;
3015 return new ValueIterator<K,V>(t, f, 0, f, this);
3016 }
3017
3018 /**
3019 * Returns the hash code value for this {@link Map}, i.e.,
3020 * the sum of, for each key-value pair in the map,
3021 * {@code key.hashCode() ^ value.hashCode()}.
3022 *
3023 * @return the hash code value for this map
3024 */
3025 public int hashCode() {
3026 int h = 0;
3027 Node<K,V>[] t;
3028 if ((t = table) != null) {
3029 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
3030 for (Node<K,V> p; (p = it.advance()) != null; )
3031 h += p.key.hashCode() ^ p.val.hashCode();
3032 }
3033 return h;
3034 }
3035
3036 /**
3037 * Returns a string representation of this map. The string
3038 * representation consists of a list of key-value mappings (in no
3039 * particular order) enclosed in braces ("{@code {}}"). Adjacent
3040 * mappings are separated by the characters {@code ", "} (comma
3041 * and space). Each key-value mapping is rendered as the key
3042 * followed by an equals sign ("{@code =}") followed by the
3043 * associated value.
3044 *
3045 * @return a string representation of this map
3046 */
3047 public String toString() {
3048 Node<K,V>[] t;
3049 int f = (t = table) == null ? 0 : t.length;
3050 Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
3051 StringBuilder sb = new StringBuilder();
3052 sb.append('{');
3053 Node<K,V> p;
3054 if ((p = it.advance()) != null) {
3055 for (;;) {
3056 K k = (K)p.key;
3057 V v = p.val;
3058 sb.append(k == this ? "(this Map)" : k);
3059 sb.append('=');
3060 sb.append(v == this ? "(this Map)" : v);
3061 if ((p = it.advance()) == null)
3062 break;
3063 sb.append(',').append(' ');
3064 }
3065 }
3066 return sb.append('}').toString();
3067 }
3068
3069 /**
3070 * Compares the specified object with this map for equality.
3071 * Returns {@code true} if the given object is a map with the same
3072 * mappings as this map. This operation may return misleading
3073 * results if either map is concurrently modified during execution
3074 * of this method.
3075 *
3076 * @param o object to be compared for equality with this map
3077 * @return {@code true} if the specified object is equal to this map
3078 */
3079 public boolean equals(Object o) {
3080 if (o != this) {
3081 if (!(o instanceof Map))
3082 return false;
3083 Map<?,?> m = (Map<?,?>) o;
3084 Node<K,V>[] t;
3085 int f = (t = table) == null ? 0 : t.length;
3086 Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
3087 for (Node<K,V> p; (p = it.advance()) != null; ) {
3088 V val = p.val;
3089 Object v = m.get(p.key);
3090 if (v == null || (v != val && !v.equals(val)))
3091 return false;
3092 }
3093 for (Map.Entry<?,?> e : m.entrySet()) {
3094 Object mk, mv, v;
3095 if ((mk = e.getKey()) == null ||
3096 (mv = e.getValue()) == null ||
3097 (v = internalGet(mk)) == null ||
3098 (mv != v && !mv.equals(v)))
3099 return false;
3100 }
3101 }
3102 return true;
3103 }
3104
3105 /* ---------------- Serialization Support -------------- */
3106
3107 /**
3108 * Stripped-down version of helper class used in previous version,
3109 * declared for the sake of serialization compatibility
3110 */
3111 static class Segment<K,V> extends ReentrantLock implements Serializable {
3112 private static final long serialVersionUID = 2249069246763182397L;
3113 final float loadFactor;
3114 Segment(float lf) { this.loadFactor = lf; }
3115 }
3116
3117 /**
3118 * Saves the state of the {@code ConcurrentHashMap} instance to a
3119 * stream (i.e., serializes it).
3120 * @param s the stream
3121 * @serialData
3122 * the key (Object) and value (Object)
3123 * for each key-value mapping, followed by a null pair.
3124 * The key-value mappings are emitted in no particular order.
3125 */
3126 private void writeObject(java.io.ObjectOutputStream s)
3127 throws java.io.IOException {
3128 // For serialization compatibility
3129 // Emulate segment calculation from previous version of this class
3130 int sshift = 0;
3131 int ssize = 1;
3132 while (ssize < DEFAULT_CONCURRENCY_LEVEL) {
3133 ++sshift;
3134 ssize <<= 1;
3135 }
3136 int segmentShift = 32 - sshift;
3137 int segmentMask = ssize - 1;
3138 Segment<K,V>[] segments = (Segment<K,V>[])
3139 new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
3140 for (int i = 0; i < segments.length; ++i)
3141 segments[i] = new Segment<K,V>(LOAD_FACTOR);
3142 s.putFields().put("segments", segments);
3143 s.putFields().put("segmentShift", segmentShift);
3144 s.putFields().put("segmentMask", segmentMask);
3145 s.writeFields();
3146
3147 Node<K,V>[] t;
3148 if ((t = table) != null) {
3149 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
3150 for (Node<K,V> p; (p = it.advance()) != null; ) {
3151 s.writeObject(p.key);
3152 s.writeObject(p.val);
3153 }
3154 }
3155 s.writeObject(null);
3156 s.writeObject(null);
3157 segments = null; // throw away
3158 }
3159
3160 /**
3161 * Reconstitutes the instance from a stream (that is, deserializes it).
3162 * @param s the stream
3163 */
3164 private void readObject(java.io.ObjectInputStream s)
3165 throws java.io.IOException, ClassNotFoundException {
3166 s.defaultReadObject();
3167
3168 // Create all nodes, then place in table once size is known
3169 long size = 0L;
3170 Node<K,V> p = null;
3171 for (;;) {
3172 K k = (K) s.readObject();
3173 V v = (V) s.readObject();
3174 if (k != null && v != null) {
3175 int h = spread(k.hashCode());
3176 p = new Node<K,V>(h, k, v, p);
3177 ++size;
3178 }
3179 else
3180 break;
3181 }
3182 if (p != null) {
3183 boolean init = false;
3184 int n;
3185 if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
3186 n = MAXIMUM_CAPACITY;
3187 else {
3188 int sz = (int)size;
3189 n = tableSizeFor(sz + (sz >>> 1) + 1);
3190 }
3191 int sc = sizeCtl;
3192 boolean collide = false;
3193 if (n > sc &&
3194 U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
3195 try {
3196 if (table == null) {
3197 init = true;
3198 Node<K,V>[] tab = (Node<K,V>[])new Node[n];
3199 int mask = n - 1;
3200 while (p != null) {
3201 int j = p.hash & mask;
3202 Node<K,V> next = p.next;
3203 Node<K,V> q = p.next = tabAt(tab, j);
3204 setTabAt(tab, j, p);
3205 if (!collide && q != null && q.hash == p.hash)
3206 collide = true;
3207 p = next;
3208 }
3209 table = tab;
3210 addCount(size, -1);
3211 sc = n - (n >>> 2);
3212 }
3213 } finally {
3214 sizeCtl = sc;
3215 }
3216 if (collide) { // rescan and convert to TreeBins
3217 Node<K,V>[] tab = table;
3218 for (int i = 0; i < tab.length; ++i) {
3219 int c = 0;
3220 for (Node<K,V> e = tabAt(tab, i); e != null; e = e.next) {
3221 if (++c > TREE_THRESHOLD &&
3222 (e.key instanceof Comparable)) {
3223 replaceWithTreeBin(tab, i, e.key);
3224 break;
3225 }
3226 }
3227 }
3228 }
3229 }
3230 if (!init) { // Can only happen if unsafely published.
3231 while (p != null) {
3232 internalPut((K)p.key, p.val, false);
3233 p = p.next;
3234 }
3235 }
3236 }
3237 }
3238
3239 // -------------------------------------------------------
3240
3241 // Overrides of other default Map methods
3242
3243 public void forEach(BiConsumer<? super K, ? super V> action) {
3244 if (action == null) throw new NullPointerException();
3245 Node<K,V>[] t;
3246 if ((t = table) != null) {
3247 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
3248 for (Node<K,V> p; (p = it.advance()) != null; ) {
3249 action.accept((K)p.key, p.val);
3250 }
3251 }
3252 }
3253
3254 public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
3255 if (function == null) throw new NullPointerException();
3256 Node<K,V>[] t;
3257 if ((t = table) != null) {
3258 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
3259 for (Node<K,V> p; (p = it.advance()) != null; ) {
3260 K k = (K)p.key;
3261 internalPut(k, function.apply(k, p.val), false);
3262 }
3263 }
3264 }
3265
3266 // -------------------------------------------------------
3267
3268 // Parallel bulk operations
3269
3270 /**
3271 * Computes initial batch value for bulk tasks. The returned value
3272 * is approximately exp2 of the number of times (minus one) to
3273 * split task by two before executing leaf action. This value is
3274 * faster to compute and more convenient to use as a guide to
3275 * splitting than is the depth, since it is used while dividing by
3276 * two anyway.
3277 */
3278 final int batchFor(long b) {
3279 long n;
3280 if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b)
3281 return 0;
3282 int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4
3283 return (b <= 0L || (n /= b) >= sp) ? sp : (int)n;
3284 }
3285
3286 /**
3287 * Performs the given action for each (key, value).
3288 *
3289 * @param parallelismThreshold the (estimated) number of elements
3290 * needed for this operation to be executed in parallel
3291 * @param action the action
3292 */
3293 public void forEach(long parallelismThreshold,
3294 BiConsumer<? super K,? super V> action) {
3295 if (action == null) throw new NullPointerException();
3296 new ForEachMappingTask<K,V>
3297 (null, batchFor(parallelismThreshold), 0, 0, table,
3298 action).invoke();
3299 }
3300
3301 /**
3302 * Performs the given action for each non-null transformation
3303 * of each (key, value).
3304 *
3305 * @param parallelismThreshold the (estimated) number of elements
3306 * needed for this operation to be executed in parallel
3307 * @param transformer a function returning the transformation
3308 * for an element, or null if there is no transformation (in
3309 * which case the action is not applied)
3310 * @param action the action
3311 */
3312 public <U> void forEach(long parallelismThreshold,
3313 BiFunction<? super K, ? super V, ? extends U> transformer,
3314 Consumer<? super U> action) {
3315 if (transformer == null || action == null)
3316 throw new NullPointerException();
3317 new ForEachTransformedMappingTask<K,V,U>
3318 (null, batchFor(parallelismThreshold), 0, 0, table,
3319 transformer, action).invoke();
3320 }
3321
3322 /**
3323 * Returns a non-null result from applying the given search
3324 * function on each (key, value), or null if none. Upon
3325 * success, further element processing is suppressed and the
3326 * results of any other parallel invocations of the search
3327 * function are ignored.
3328 *
3329 * @param parallelismThreshold the (estimated) number of elements
3330 * needed for this operation to be executed in parallel
3331 * @param searchFunction a function returning a non-null
3332 * result on success, else null
3333 * @return a non-null result from applying the given search
3334 * function on each (key, value), or null if none
3335 */
3336 public <U> U search(long parallelismThreshold,
3337 BiFunction<? super K, ? super V, ? extends U> searchFunction) {
3338 if (searchFunction == null) throw new NullPointerException();
3339 return new SearchMappingsTask<K,V,U>
3340 (null, batchFor(parallelismThreshold), 0, 0, table,
3341 searchFunction, new AtomicReference<U>()).invoke();
3342 }
3343
3344 /**
3345 * Returns the result of accumulating the given transformation
3346 * of all (key, value) pairs using the given reducer to
3347 * combine values, or null if none.
3348 *
3349 * @param parallelismThreshold the (estimated) number of elements
3350 * needed for this operation to be executed in parallel
3351 * @param transformer a function returning the transformation
3352 * for an element, or null if there is no transformation (in
3353 * which case it is not combined)
3354 * @param reducer a commutative associative combining function
3355 * @return the result of accumulating the given transformation
3356 * of all (key, value) pairs
3357 */
3358 public <U> U reduce(long parallelismThreshold,
3359 BiFunction<? super K, ? super V, ? extends U> transformer,
3360 BiFunction<? super U, ? super U, ? extends U> reducer) {
3361 if (transformer == null || reducer == null)
3362 throw new NullPointerException();
3363 return new MapReduceMappingsTask<K,V,U>
3364 (null, batchFor(parallelismThreshold), 0, 0, table,
3365 null, transformer, reducer).invoke();
3366 }
3367
3368 /**
3369 * Returns the result of accumulating the given transformation
3370 * of all (key, value) pairs using the given reducer to
3371 * combine values, and the given basis as an identity value.
3372 *
3373 * @param parallelismThreshold the (estimated) number of elements
3374 * needed for this operation to be executed in parallel
3375 * @param transformer a function returning the transformation
3376 * for an element
3377 * @param basis the identity (initial default value) for the reduction
3378 * @param reducer a commutative associative combining function
3379 * @return the result of accumulating the given transformation
3380 * of all (key, value) pairs
3381 */
3382 public double reduceToDoubleIn(long parallelismThreshold,
3383 ToDoubleBiFunction<? super K, ? super V> transformer,
3384 double basis,
3385 DoubleBinaryOperator reducer) {
3386 if (transformer == null || reducer == null)
3387 throw new NullPointerException();
3388 return new MapReduceMappingsToDoubleTask<K,V>
3389 (null, batchFor(parallelismThreshold), 0, 0, table,
3390 null, transformer, basis, reducer).invoke();
3391 }
3392
3393 /**
3394 * Returns the result of accumulating the given transformation
3395 * of all (key, value) pairs using the given reducer to
3396 * combine values, and the given basis as an identity value.
3397 *
3398 * @param parallelismThreshold the (estimated) number of elements
3399 * needed for this operation to be executed in parallel
3400 * @param transformer a function returning the transformation
3401 * for an element
3402 * @param basis the identity (initial default value) for the reduction
3403 * @param reducer a commutative associative combining function
3404 * @return the result of accumulating the given transformation
3405 * of all (key, value) pairs
3406 */
3407 public long reduceToLong(long parallelismThreshold,
3408 ToLongBiFunction<? super K, ? super V> transformer,
3409 long basis,
3410 LongBinaryOperator reducer) {
3411 if (transformer == null || reducer == null)
3412 throw new NullPointerException();
3413 return new MapReduceMappingsToLongTask<K,V>
3414 (null, batchFor(parallelismThreshold), 0, 0, table,
3415 null, transformer, basis, reducer).invoke();
3416 }
3417
3418 /**
3419 * Returns the result of accumulating the given transformation
3420 * of all (key, value) pairs using the given reducer to
3421 * combine values, and the given basis as an identity value.
3422 *
3423 * @param parallelismThreshold the (estimated) number of elements
3424 * needed for this operation to be executed in parallel
3425 * @param transformer a function returning the transformation
3426 * for an element
3427 * @param basis the identity (initial default value) for the reduction
3428 * @param reducer a commutative associative combining function
3429 * @return the result of accumulating the given transformation
3430 * of all (key, value) pairs
3431 */
3432 public int reduceToInt(long parallelismThreshold,
3433 ToIntBiFunction<? super K, ? super V> transformer,
3434 int basis,
3435 IntBinaryOperator reducer) {
3436 if (transformer == null || reducer == null)
3437 throw new NullPointerException();
3438 return new MapReduceMappingsToIntTask<K,V>
3439 (null, batchFor(parallelismThreshold), 0, 0, table,
3440 null, transformer, basis, reducer).invoke();
3441 }
3442
3443 /**
3444 * Performs the given action for each key.
3445 *
3446 * @param parallelismThreshold the (estimated) number of elements
3447 * needed for this operation to be executed in parallel
3448 * @param action the action
3449 */
3450 public void forEachKey(long parallelismThreshold,
3451 Consumer<? super K> action) {
3452 if (action == null) throw new NullPointerException();
3453 new ForEachKeyTask<K,V>
3454 (null, batchFor(parallelismThreshold), 0, 0, table,
3455 action).invoke();
3456 }
3457
3458 /**
3459 * Performs the given action for each non-null transformation
3460 * of each key.
3461 *
3462 * @param parallelismThreshold the (estimated) number of elements
3463 * needed for this operation to be executed in parallel
3464 * @param transformer a function returning the transformation
3465 * for an element, or null if there is no transformation (in
3466 * which case the action is not applied)
3467 * @param action the action
3468 */
3469 public <U> void forEachKey(long parallelismThreshold,
3470 Function<? super K, ? extends U> transformer,
3471 Consumer<? super U> action) {
3472 if (transformer == null || action == null)
3473 throw new NullPointerException();
3474 new ForEachTransformedKeyTask<K,V,U>
3475 (null, batchFor(parallelismThreshold), 0, 0, table,
3476 transformer, action).invoke();
3477 }
3478
3479 /**
3480 * Returns a non-null result from applying the given search
3481 * function on each key, or null if none. Upon success,
3482 * further element processing is suppressed and the results of
3483 * any other parallel invocations of the search function are
3484 * ignored.
3485 *
3486 * @param parallelismThreshold the (estimated) number of elements
3487 * needed for this operation to be executed in parallel
3488 * @param searchFunction a function returning a non-null
3489 * result on success, else null
3490 * @return a non-null result from applying the given search
3491 * function on each key, or null if none
3492 */
3493 public <U> U searchKeys(long parallelismThreshold,
3494 Function<? super K, ? extends U> searchFunction) {
3495 if (searchFunction == null) throw new NullPointerException();
3496 return new SearchKeysTask<K,V,U>
3497 (null, batchFor(parallelismThreshold), 0, 0, table,
3498 searchFunction, new AtomicReference<U>()).invoke();
3499 }
3500
3501 /**
3502 * Returns the result of accumulating all keys using the given
3503 * reducer to combine values, or null if none.
3504 *
3505 * @param parallelismThreshold the (estimated) number of elements
3506 * needed for this operation to be executed in parallel
3507 * @param reducer a commutative associative combining function
3508 * @return the result of accumulating all keys using the given
3509 * reducer to combine values, or null if none
3510 */
3511 public K reduceKeys(long parallelismThreshold,
3512 BiFunction<? super K, ? super K, ? extends K> reducer) {
3513 if (reducer == null) throw new NullPointerException();
3514 return new ReduceKeysTask<K,V>
3515 (null, batchFor(parallelismThreshold), 0, 0, table,
3516 null, reducer).invoke();
3517 }
3518
3519 /**
3520 * Returns the result of accumulating the given transformation
3521 * of all keys using the given reducer to combine values, or
3522 * null if none.
3523 *
3524 * @param parallelismThreshold the (estimated) number of elements
3525 * needed for this operation to be executed in parallel
3526 * @param transformer a function returning the transformation
3527 * for an element, or null if there is no transformation (in
3528 * which case it is not combined)
3529 * @param reducer a commutative associative combining function
3530 * @return the result of accumulating the given transformation
3531 * of all keys
3532 */
3533 public <U> U reduceKeys(long parallelismThreshold,
3534 Function<? super K, ? extends U> transformer,
3535 BiFunction<? super U, ? super U, ? extends U> reducer) {
3536 if (transformer == null || reducer == null)
3537 throw new NullPointerException();
3538 return new MapReduceKeysTask<K,V,U>
3539 (null, batchFor(parallelismThreshold), 0, 0, table,
3540 null, transformer, reducer).invoke();
3541 }
3542
3543 /**
3544 * Returns the result of accumulating the given transformation
3545 * of all keys using the given reducer to combine values, and
3546 * the given basis as an identity value.
3547 *
3548 * @param parallelismThreshold the (estimated) number of elements
3549 * needed for this operation to be executed in parallel
3550 * @param transformer a function returning the transformation
3551 * for an element
3552 * @param basis the identity (initial default value) for the reduction
3553 * @param reducer a commutative associative combining function
3554 * @return the result of accumulating the given transformation
3555 * of all keys
3556 */
3557 public double reduceKeysToDouble(long parallelismThreshold,
3558 ToDoubleFunction<? super K> transformer,
3559 double basis,
3560 DoubleBinaryOperator reducer) {
3561 if (transformer == null || reducer == null)
3562 throw new NullPointerException();
3563 return new MapReduceKeysToDoubleTask<K,V>
3564 (null, batchFor(parallelismThreshold), 0, 0, table,
3565 null, transformer, basis, reducer).invoke();
3566 }
3567
3568 /**
3569 * Returns the result of accumulating the given transformation
3570 * of all keys using the given reducer to combine values, and
3571 * the given basis as an identity value.
3572 *
3573 * @param parallelismThreshold the (estimated) number of elements
3574 * needed for this operation to be executed in parallel
3575 * @param transformer a function returning the transformation
3576 * for an element
3577 * @param basis the identity (initial default value) for the reduction
3578 * @param reducer a commutative associative combining function
3579 * @return the result of accumulating the given transformation
3580 * of all keys
3581 */
3582 public long reduceKeysToLong(long parallelismThreshold,
3583 ToLongFunction<? super K> transformer,
3584 long basis,
3585 LongBinaryOperator reducer) {
3586 if (transformer == null || reducer == null)
3587 throw new NullPointerException();
3588 return new MapReduceKeysToLongTask<K,V>
3589 (null, batchFor(parallelismThreshold), 0, 0, table,
3590 null, transformer, basis, reducer).invoke();
3591 }
3592
3593 /**
3594 * Returns the result of accumulating the given transformation
3595 * of all keys using the given reducer to combine values, and
3596 * the given basis as an identity value.
3597 *
3598 * @param parallelismThreshold the (estimated) number of elements
3599 * needed for this operation to be executed in parallel
3600 * @param transformer a function returning the transformation
3601 * for an element
3602 * @param basis the identity (initial default value) for the reduction
3603 * @param reducer a commutative associative combining function
3604 * @return the result of accumulating the given transformation
3605 * of all keys
3606 */
3607 public int reduceKeysToInt(long parallelismThreshold,
3608 ToIntFunction<? super K> transformer,
3609 int basis,
3610 IntBinaryOperator reducer) {
3611 if (transformer == null || reducer == null)
3612 throw new NullPointerException();
3613 return new MapReduceKeysToIntTask<K,V>
3614 (null, batchFor(parallelismThreshold), 0, 0, table,
3615 null, transformer, basis, reducer).invoke();
3616 }
3617
3618 /**
3619 * Performs the given action for each value.
3620 *
3621 * @param parallelismThreshold the (estimated) number of elements
3622 * needed for this operation to be executed in parallel
3623 * @param action the action
3624 */
3625 public void forEachValue(long parallelismThreshold,
3626 Consumer<? super V> action) {
3627 if (action == null)
3628 throw new NullPointerException();
3629 new ForEachValueTask<K,V>
3630 (null, batchFor(parallelismThreshold), 0, 0, table,
3631 action).invoke();
3632 }
3633
3634 /**
3635 * Performs the given action for each non-null transformation
3636 * of each value.
3637 *
3638 * @param parallelismThreshold the (estimated) number of elements
3639 * needed for this operation to be executed in parallel
3640 * @param transformer a function returning the transformation
3641 * for an element, or null if there is no transformation (in
3642 * which case the action is not applied)
3643 * @param action the action
3644 */
3645 public <U> void forEachValue(long parallelismThreshold,
3646 Function<? super V, ? extends U> transformer,
3647 Consumer<? super U> action) {
3648 if (transformer == null || action == null)
3649 throw new NullPointerException();
3650 new ForEachTransformedValueTask<K,V,U>
3651 (null, batchFor(parallelismThreshold), 0, 0, table,
3652 transformer, action).invoke();
3653 }
3654
3655 /**
3656 * Returns a non-null result from applying the given search
3657 * function on each value, or null if none. Upon success,
3658 * further element processing is suppressed and the results of
3659 * any other parallel invocations of the search function are
3660 * ignored.
3661 *
3662 * @param parallelismThreshold the (estimated) number of elements
3663 * needed for this operation to be executed in parallel
3664 * @param searchFunction a function returning a non-null
3665 * result on success, else null
3666 * @return a non-null result from applying the given search
3667 * function on each value, or null if none
3668 */
3669 public <U> U searchValues(long parallelismThreshold,
3670 Function<? super V, ? extends U> searchFunction) {
3671 if (searchFunction == null) throw new NullPointerException();
3672 return new SearchValuesTask<K,V,U>
3673 (null, batchFor(parallelismThreshold), 0, 0, table,
3674 searchFunction, new AtomicReference<U>()).invoke();
3675 }
3676
3677 /**
3678 * Returns the result of accumulating all values using the
3679 * given reducer to combine values, or null if none.
3680 *
3681 * @param parallelismThreshold the (estimated) number of elements
3682 * needed for this operation to be executed in parallel
3683 * @param reducer a commutative associative combining function
3684 * @return the result of accumulating all values
3685 */
3686 public V reduceValues(long parallelismThreshold,
3687 BiFunction<? super V, ? super V, ? extends V> reducer) {
3688 if (reducer == null) throw new NullPointerException();
3689 return new ReduceValuesTask<K,V>
3690 (null, batchFor(parallelismThreshold), 0, 0, table,
3691 null, reducer).invoke();
3692 }
3693
3694 /**
3695 * Returns the result of accumulating the given transformation
3696 * of all values using the given reducer to combine values, or
3697 * null if none.
3698 *
3699 * @param parallelismThreshold the (estimated) number of elements
3700 * needed for this operation to be executed in parallel
3701 * @param transformer a function returning the transformation
3702 * for an element, or null if there is no transformation (in
3703 * which case it is not combined)
3704 * @param reducer a commutative associative combining function
3705 * @return the result of accumulating the given transformation
3706 * of all values
3707 */
3708 public <U> U reduceValues(long parallelismThreshold,
3709 Function<? super V, ? extends U> transformer,
3710 BiFunction<? super U, ? super U, ? extends U> reducer) {
3711 if (transformer == null || reducer == null)
3712 throw new NullPointerException();
3713 return new MapReduceValuesTask<K,V,U>
3714 (null, batchFor(parallelismThreshold), 0, 0, table,
3715 null, transformer, reducer).invoke();
3716 }
3717
3718 /**
3719 * Returns the result of accumulating the given transformation
3720 * of all values using the given reducer to combine values,
3721 * and the given basis as an identity value.
3722 *
3723 * @param parallelismThreshold the (estimated) number of elements
3724 * needed for this operation to be executed in parallel
3725 * @param transformer a function returning the transformation
3726 * for an element
3727 * @param basis the identity (initial default value) for the reduction
3728 * @param reducer a commutative associative combining function
3729 * @return the result of accumulating the given transformation
3730 * of all values
3731 */
3732 public double reduceValuesToDouble(long parallelismThreshold,
3733 ToDoubleFunction<? super V> transformer,
3734 double basis,
3735 DoubleBinaryOperator reducer) {
3736 if (transformer == null || reducer == null)
3737 throw new NullPointerException();
3738 return new MapReduceValuesToDoubleTask<K,V>
3739 (null, batchFor(parallelismThreshold), 0, 0, table,
3740 null, transformer, basis, reducer).invoke();
3741 }
3742
3743 /**
3744 * Returns the result of accumulating the given transformation
3745 * of all values using the given reducer to combine values,
3746 * and the given basis as an identity value.
3747 *
3748 * @param parallelismThreshold the (estimated) number of elements
3749 * needed for this operation to be executed in parallel
3750 * @param transformer a function returning the transformation
3751 * for an element
3752 * @param basis the identity (initial default value) for the reduction
3753 * @param reducer a commutative associative combining function
3754 * @return the result of accumulating the given transformation
3755 * of all values
3756 */
3757 public long reduceValuesToLong(long parallelismThreshold,
3758 ToLongFunction<? super V> transformer,
3759 long basis,
3760 LongBinaryOperator reducer) {
3761 if (transformer == null || reducer == null)
3762 throw new NullPointerException();
3763 return new MapReduceValuesToLongTask<K,V>
3764 (null, batchFor(parallelismThreshold), 0, 0, table,
3765 null, transformer, basis, reducer).invoke();
3766 }
3767
3768 /**
3769 * Returns the result of accumulating the given transformation
3770 * of all values using the given reducer to combine values,
3771 * and the given basis as an identity value.
3772 *
3773 * @param parallelismThreshold the (estimated) number of elements
3774 * needed for this operation to be executed in parallel
3775 * @param transformer a function returning the transformation
3776 * for an element
3777 * @param basis the identity (initial default value) for the reduction
3778 * @param reducer a commutative associative combining function
3779 * @return the result of accumulating the given transformation
3780 * of all values
3781 */
3782 public int reduceValuesToInt(long parallelismThreshold,
3783 ToIntFunction<? super V> transformer,
3784 int basis,
3785 IntBinaryOperator reducer) {
3786 if (transformer == null || reducer == null)
3787 throw new NullPointerException();
3788 return new MapReduceValuesToIntTask<K,V>
3789 (null, batchFor(parallelismThreshold), 0, 0, table,
3790 null, transformer, basis, reducer).invoke();
3791 }
3792
3793 /**
3794 * Performs the given action for each entry.
3795 *
3796 * @param parallelismThreshold the (estimated) number of elements
3797 * needed for this operation to be executed in parallel
3798 * @param action the action
3799 */
3800 public void forEachEntry(long parallelismThreshold,
3801 Consumer<? super Map.Entry<K,V>> action) {
3802 if (action == null) throw new NullPointerException();
3803 new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table,
3804 action).invoke();
3805 }
3806
3807 /**
3808 * Performs the given action for each non-null transformation
3809 * of each entry.
3810 *
3811 * @param parallelismThreshold the (estimated) number of elements
3812 * needed for this operation to be executed in parallel
3813 * @param transformer a function returning the transformation
3814 * for an element, or null if there is no transformation (in
3815 * which case the action is not applied)
3816 * @param action the action
3817 */
3818 public <U> void forEachEntry(long parallelismThreshold,
3819 Function<Map.Entry<K,V>, ? extends U> transformer,
3820 Consumer<? super U> action) {
3821 if (transformer == null || action == null)
3822 throw new NullPointerException();
3823 new ForEachTransformedEntryTask<K,V,U>
3824 (null, batchFor(parallelismThreshold), 0, 0, table,
3825 transformer, action).invoke();
3826 }
3827
3828 /**
3829 * Returns a non-null result from applying the given search
3830 * function on each entry, or null if none. Upon success,
3831 * further element processing is suppressed and the results of
3832 * any other parallel invocations of the search function are
3833 * ignored.
3834 *
3835 * @param parallelismThreshold the (estimated) number of elements
3836 * needed for this operation to be executed in parallel
3837 * @param searchFunction a function returning a non-null
3838 * result on success, else null
3839 * @return a non-null result from applying the given search
3840 * function on each entry, or null if none
3841 */
3842 public <U> U searchEntries(long parallelismThreshold,
3843 Function<Map.Entry<K,V>, ? extends U> searchFunction) {
3844 if (searchFunction == null) throw new NullPointerException();
3845 return new SearchEntriesTask<K,V,U>
3846 (null, batchFor(parallelismThreshold), 0, 0, table,
3847 searchFunction, new AtomicReference<U>()).invoke();
3848 }
3849
3850 /**
3851 * Returns the result of accumulating all entries using the
3852 * given reducer to combine values, or null if none.
3853 *
3854 * @param parallelismThreshold the (estimated) number of elements
3855 * needed for this operation to be executed in parallel
3856 * @param reducer a commutative associative combining function
3857 * @return the result of accumulating all entries
3858 */
3859 public Map.Entry<K,V> reduceEntries(long parallelismThreshold,
3860 BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
3861 if (reducer == null) throw new NullPointerException();
3862 return new ReduceEntriesTask<K,V>
3863 (null, batchFor(parallelismThreshold), 0, 0, table,
3864 null, reducer).invoke();
3865 }
3866
3867 /**
3868 * Returns the result of accumulating the given transformation
3869 * of all entries using the given reducer to combine values,
3870 * or null if none.
3871 *
3872 * @param parallelismThreshold the (estimated) number of elements
3873 * needed for this operation to be executed in parallel
3874 * @param transformer a function returning the transformation
3875 * for an element, or null if there is no transformation (in
3876 * which case it is not combined)
3877 * @param reducer a commutative associative combining function
3878 * @return the result of accumulating the given transformation
3879 * of all entries
3880 */
3881 public <U> U reduceEntries(long parallelismThreshold,
3882 Function<Map.Entry<K,V>, ? extends U> transformer,
3883 BiFunction<? super U, ? super U, ? extends U> reducer) {
3884 if (transformer == null || reducer == null)
3885 throw new NullPointerException();
3886 return new MapReduceEntriesTask<K,V,U>
3887 (null, batchFor(parallelismThreshold), 0, 0, table,
3888 null, transformer, reducer).invoke();
3889 }
3890
3891 /**
3892 * Returns the result of accumulating the given transformation
3893 * of all entries using the given reducer to combine values,
3894 * and the given basis as an identity value.
3895 *
3896 * @param parallelismThreshold the (estimated) number of elements
3897 * needed for this operation to be executed in parallel
3898 * @param transformer a function returning the transformation
3899 * for an element
3900 * @param basis the identity (initial default value) for the reduction
3901 * @param reducer a commutative associative combining function
3902 * @return the result of accumulating the given transformation
3903 * of all entries
3904 */
3905 public double reduceEntriesToDouble(long parallelismThreshold,
3906 ToDoubleFunction<Map.Entry<K,V>> transformer,
3907 double basis,
3908 DoubleBinaryOperator reducer) {
3909 if (transformer == null || reducer == null)
3910 throw new NullPointerException();
3911 return new MapReduceEntriesToDoubleTask<K,V>
3912 (null, batchFor(parallelismThreshold), 0, 0, table,
3913 null, transformer, basis, reducer).invoke();
3914 }
3915
3916 /**
3917 * Returns the result of accumulating the given transformation
3918 * of all entries using the given reducer to combine values,
3919 * and the given basis as an identity value.
3920 *
3921 * @param parallelismThreshold the (estimated) number of elements
3922 * needed for this operation to be executed in parallel
3923 * @param transformer a function returning the transformation
3924 * for an element
3925 * @param basis the identity (initial default value) for the reduction
3926 * @param reducer a commutative associative combining function
3927 * @return the result of accumulating the given transformation
3928 * of all entries
3929 */
3930 public long reduceEntriesToLong(long parallelismThreshold,
3931 ToLongFunction<Map.Entry<K,V>> transformer,
3932 long basis,
3933 LongBinaryOperator reducer) {
3934 if (transformer == null || reducer == null)
3935 throw new NullPointerException();
3936 return new MapReduceEntriesToLongTask<K,V>
3937 (null, batchFor(parallelismThreshold), 0, 0, table,
3938 null, transformer, basis, reducer).invoke();
3939 }
3940
3941 /**
3942 * Returns the result of accumulating the given transformation
3943 * of all entries using the given reducer to combine values,
3944 * and the given basis as an identity value.
3945 *
3946 * @param parallelismThreshold the (estimated) number of elements
3947 * needed for this operation to be executed in parallel
3948 * @param transformer a function returning the transformation
3949 * for an element
3950 * @param basis the identity (initial default value) for the reduction
3951 * @param reducer a commutative associative combining function
3952 * @return the result of accumulating the given transformation
3953 * of all entries
3954 */
3955 public int reduceEntriesToInt(long parallelismThreshold,
3956 ToIntFunction<Map.Entry<K,V>> transformer,
3957 int basis,
3958 IntBinaryOperator reducer) {
3959 if (transformer == null || reducer == null)
3960 throw new NullPointerException();
3961 return new MapReduceEntriesToIntTask<K,V>
3962 (null, batchFor(parallelismThreshold), 0, 0, table,
3963 null, transformer, basis, reducer).invoke();
3964 }
3965
3966
3967 /* ----------------Views -------------- */
3968
3969 /**
3970 * Base class for views.
3971 */
3972 abstract static class CollectionView<K,V,E>
3973 implements Collection<E>, java.io.Serializable {
3974 private static final long serialVersionUID = 7249069246763182397L;
3975 final ConcurrentHashMap<K,V> map;
3976 CollectionView(ConcurrentHashMap<K,V> map) { this.map = map; }
3977
3978 /**
3979 * Returns the map backing this view.
3980 *
3981 * @return the map backing this view
3982 */
3983 public ConcurrentHashMap<K,V> getMap() { return map; }
3984
3985 /**
3986 * Removes all of the elements from this view, by removing all
3987 * the mappings from the map backing this view.
3988 */
3989 public final void clear() { map.clear(); }
3990 public final int size() { return map.size(); }
3991 public final boolean isEmpty() { return map.isEmpty(); }
3992
3993 // implementations below rely on concrete classes supplying these
3994 // abstract methods
3995 /**
3996 * Returns a "weakly consistent" iterator that will never
3997 * throw {@link ConcurrentModificationException}, and
3998 * guarantees to traverse elements as they existed upon
3999 * construction of the iterator, and may (but is not
4000 * guaranteed to) reflect any modifications subsequent to
4001 * construction.
4002 */
4003 public abstract Iterator<E> iterator();
4004 public abstract boolean contains(Object o);
4005 public abstract boolean remove(Object o);
4006
4007 private static final String oomeMsg = "Required array size too large";
4008
4009 public final Object[] toArray() {
4010 long sz = map.mappingCount();
4011 if (sz > MAX_ARRAY_SIZE)
4012 throw new OutOfMemoryError(oomeMsg);
4013 int n = (int)sz;
4014 Object[] r = new Object[n];
4015 int i = 0;
4016 for (E e : this) {
4017 if (i == n) {
4018 if (n >= MAX_ARRAY_SIZE)
4019 throw new OutOfMemoryError(oomeMsg);
4020 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4021 n = MAX_ARRAY_SIZE;
4022 else
4023 n += (n >>> 1) + 1;
4024 r = Arrays.copyOf(r, n);
4025 }
4026 r[i++] = e;
4027 }
4028 return (i == n) ? r : Arrays.copyOf(r, i);
4029 }
4030
4031 public final <T> T[] toArray(T[] a) {
4032 long sz = map.mappingCount();
4033 if (sz > MAX_ARRAY_SIZE)
4034 throw new OutOfMemoryError(oomeMsg);
4035 int m = (int)sz;
4036 T[] r = (a.length >= m) ? a :
4037 (T[])java.lang.reflect.Array
4038 .newInstance(a.getClass().getComponentType(), m);
4039 int n = r.length;
4040 int i = 0;
4041 for (E e : this) {
4042 if (i == n) {
4043 if (n >= MAX_ARRAY_SIZE)
4044 throw new OutOfMemoryError(oomeMsg);
4045 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4046 n = MAX_ARRAY_SIZE;
4047 else
4048 n += (n >>> 1) + 1;
4049 r = Arrays.copyOf(r, n);
4050 }
4051 r[i++] = (T)e;
4052 }
4053 if (a == r && i < n) {
4054 r[i] = null; // null-terminate
4055 return r;
4056 }
4057 return (i == n) ? r : Arrays.copyOf(r, i);
4058 }
4059
4060 /**
4061 * Returns a string representation of this collection.
4062 * The string representation consists of the string representations
4063 * of the collection's elements in the order they are returned by
4064 * its iterator, enclosed in square brackets ({@code "[]"}).
4065 * Adjacent elements are separated by the characters {@code ", "}
4066 * (comma and space). Elements are converted to strings as by
4067 * {@link String#valueOf(Object)}.
4068 *
4069 * @return a string representation of this collection
4070 */
4071 public final String toString() {
4072 StringBuilder sb = new StringBuilder();
4073 sb.append('[');
4074 Iterator<E> it = iterator();
4075 if (it.hasNext()) {
4076 for (;;) {
4077 Object e = it.next();
4078 sb.append(e == this ? "(this Collection)" : e);
4079 if (!it.hasNext())
4080 break;
4081 sb.append(',').append(' ');
4082 }
4083 }
4084 return sb.append(']').toString();
4085 }
4086
4087 public final boolean containsAll(Collection<?> c) {
4088 if (c != this) {
4089 for (Object e : c) {
4090 if (e == null || !contains(e))
4091 return false;
4092 }
4093 }
4094 return true;
4095 }
4096
4097 public final boolean removeAll(Collection<?> c) {
4098 boolean modified = false;
4099 for (Iterator<E> it = iterator(); it.hasNext();) {
4100 if (c.contains(it.next())) {
4101 it.remove();
4102 modified = true;
4103 }
4104 }
4105 return modified;
4106 }
4107
4108 public final boolean retainAll(Collection<?> c) {
4109 boolean modified = false;
4110 for (Iterator<E> it = iterator(); it.hasNext();) {
4111 if (!c.contains(it.next())) {
4112 it.remove();
4113 modified = true;
4114 }
4115 }
4116 return modified;
4117 }
4118
4119 }
4120
4121 /**
4122 * A view of a ConcurrentHashMap as a {@link Set} of keys, in
4123 * which additions may optionally be enabled by mapping to a
4124 * common value. This class cannot be directly instantiated.
4125 * See {@link #keySet() keySet()},
4126 * {@link #keySet(Object) keySet(V)},
4127 * {@link #newKeySet() newKeySet()},
4128 * {@link #newKeySet(int) newKeySet(int)}.
4129 */
4130 public static class KeySetView<K,V> extends CollectionView<K,V,K>
4131 implements Set<K>, java.io.Serializable {
4132 private static final long serialVersionUID = 7249069246763182397L;
4133 private final V value;
4134 KeySetView(ConcurrentHashMap<K,V> map, V value) { // non-public
4135 super(map);
4136 this.value = value;
4137 }
4138
4139 /**
4140 * Returns the default mapped value for additions,
4141 * or {@code null} if additions are not supported.
4142 *
4143 * @return the default mapped value for additions, or {@code null}
4144 * if not supported
4145 */
4146 public V getMappedValue() { return value; }
4147
4148 /**
4149 * {@inheritDoc}
4150 * @throws NullPointerException if the specified key is null
4151 */
4152 public boolean contains(Object o) { return map.containsKey(o); }
4153
4154 /**
4155 * Removes the key from this map view, by removing the key (and its
4156 * corresponding value) from the backing map. This method does
4157 * nothing if the key is not in the map.
4158 *
4159 * @param o the key to be removed from the backing map
4160 * @return {@code true} if the backing map contained the specified key
4161 * @throws NullPointerException if the specified key is null
4162 */
4163 public boolean remove(Object o) { return map.remove(o) != null; }
4164
4165 /**
4166 * @return an iterator over the keys of the backing map
4167 */
4168 public Iterator<K> iterator() {
4169 Node<K,V>[] t;
4170 ConcurrentHashMap<K,V> m = map;
4171 int f = (t = m.table) == null ? 0 : t.length;
4172 return new KeyIterator<K,V>(t, f, 0, f, m);
4173 }
4174
4175 /**
4176 * Adds the specified key to this set view by mapping the key to
4177 * the default mapped value in the backing map, if defined.
4178 *
4179 * @param e key to be added
4180 * @return {@code true} if this set changed as a result of the call
4181 * @throws NullPointerException if the specified key is null
4182 * @throws UnsupportedOperationException if no default mapped value
4183 * for additions was provided
4184 */
4185 public boolean add(K e) {
4186 V v;
4187 if ((v = value) == null)
4188 throw new UnsupportedOperationException();
4189 return map.internalPut(e, v, true) == null;
4190 }
4191
4192 /**
4193 * Adds all of the elements in the specified collection to this set,
4194 * as if by calling {@link #add} on each one.
4195 *
4196 * @param c the elements to be inserted into this set
4197 * @return {@code true} if this set changed as a result of the call
4198 * @throws NullPointerException if the collection or any of its
4199 * elements are {@code null}
4200 * @throws UnsupportedOperationException if no default mapped value
4201 * for additions was provided
4202 */
4203 public boolean addAll(Collection<? extends K> c) {
4204 boolean added = false;
4205 V v;
4206 if ((v = value) == null)
4207 throw new UnsupportedOperationException();
4208 for (K e : c) {
4209 if (map.internalPut(e, v, true) == null)
4210 added = true;
4211 }
4212 return added;
4213 }
4214
4215 public int hashCode() {
4216 int h = 0;
4217 for (K e : this)
4218 h += e.hashCode();
4219 return h;
4220 }
4221
4222 public boolean equals(Object o) {
4223 Set<?> c;
4224 return ((o instanceof Set) &&
4225 ((c = (Set<?>)o) == this ||
4226 (containsAll(c) && c.containsAll(this))));
4227 }
4228
4229 public Spliterator<K> spliterator() {
4230 Node<K,V>[] t;
4231 ConcurrentHashMap<K,V> m = map;
4232 long n = m.sumCount();
4233 int f = (t = m.table) == null ? 0 : t.length;
4234 return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4235 }
4236
4237 public void forEach(Consumer<? super K> action) {
4238 if (action == null) throw new NullPointerException();
4239 Node<K,V>[] t;
4240 if ((t = map.table) != null) {
4241 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4242 for (Node<K,V> p; (p = it.advance()) != null; )
4243 action.accept((K)p.key);
4244 }
4245 }
4246 }
4247
4248 /**
4249 * A view of a ConcurrentHashMap as a {@link Collection} of
4250 * values, in which additions are disabled. This class cannot be
4251 * directly instantiated. See {@link #values()}.
4252 */
4253 static final class ValuesView<K,V> extends CollectionView<K,V,V>
4254 implements Collection<V>, java.io.Serializable {
4255 private static final long serialVersionUID = 2249069246763182397L;
4256 ValuesView(ConcurrentHashMap<K,V> map) { super(map); }
4257 public final boolean contains(Object o) {
4258 return map.containsValue(o);
4259 }
4260
4261 public final boolean remove(Object o) {
4262 if (o != null) {
4263 for (Iterator<V> it = iterator(); it.hasNext();) {
4264 if (o.equals(it.next())) {
4265 it.remove();
4266 return true;
4267 }
4268 }
4269 }
4270 return false;
4271 }
4272
4273 public final Iterator<V> iterator() {
4274 ConcurrentHashMap<K,V> m = map;
4275 Node<K,V>[] t;
4276 int f = (t = m.table) == null ? 0 : t.length;
4277 return new ValueIterator<K,V>(t, f, 0, f, m);
4278 }
4279
4280 public final boolean add(V e) {
4281 throw new UnsupportedOperationException();
4282 }
4283 public final boolean addAll(Collection<? extends V> c) {
4284 throw new UnsupportedOperationException();
4285 }
4286
4287 public Spliterator<V> spliterator() {
4288 Node<K,V>[] t;
4289 ConcurrentHashMap<K,V> m = map;
4290 long n = m.sumCount();
4291 int f = (t = m.table) == null ? 0 : t.length;
4292 return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4293 }
4294
4295 public void forEach(Consumer<? super V> action) {
4296 if (action == null) throw new NullPointerException();
4297 Node<K,V>[] t;
4298 if ((t = map.table) != null) {
4299 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4300 for (Node<K,V> p; (p = it.advance()) != null; )
4301 action.accept(p.val);
4302 }
4303 }
4304 }
4305
4306 /**
4307 * A view of a ConcurrentHashMap as a {@link Set} of (key, value)
4308 * entries. This class cannot be directly instantiated. See
4309 * {@link #entrySet()}.
4310 */
4311 static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>>
4312 implements Set<Map.Entry<K,V>>, java.io.Serializable {
4313 private static final long serialVersionUID = 2249069246763182397L;
4314 EntrySetView(ConcurrentHashMap<K,V> map) { super(map); }
4315
4316 public boolean contains(Object o) {
4317 Object k, v, r; Map.Entry<?,?> e;
4318 return ((o instanceof Map.Entry) &&
4319 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4320 (r = map.get(k)) != null &&
4321 (v = e.getValue()) != null &&
4322 (v == r || v.equals(r)));
4323 }
4324
4325 public boolean remove(Object o) {
4326 Object k, v; Map.Entry<?,?> e;
4327 return ((o instanceof Map.Entry) &&
4328 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4329 (v = e.getValue()) != null &&
4330 map.remove(k, v));
4331 }
4332
4333 /**
4334 * @return an iterator over the entries of the backing map
4335 */
4336 public Iterator<Map.Entry<K,V>> iterator() {
4337 ConcurrentHashMap<K,V> m = map;
4338 Node<K,V>[] t;
4339 int f = (t = m.table) == null ? 0 : t.length;
4340 return new EntryIterator<K,V>(t, f, 0, f, m);
4341 }
4342
4343 public boolean add(Entry<K,V> e) {
4344 return map.internalPut(e.getKey(), e.getValue(), false) == null;
4345 }
4346
4347 public boolean addAll(Collection<? extends Entry<K,V>> c) {
4348 boolean added = false;
4349 for (Entry<K,V> e : c) {
4350 if (add(e))
4351 added = true;
4352 }
4353 return added;
4354 }
4355
4356 public final int hashCode() {
4357 int h = 0;
4358 Node<K,V>[] t;
4359 if ((t = map.table) != null) {
4360 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4361 for (Node<K,V> p; (p = it.advance()) != null; ) {
4362 h += p.hashCode();
4363 }
4364 }
4365 return h;
4366 }
4367
4368 public final boolean equals(Object o) {
4369 Set<?> c;
4370 return ((o instanceof Set) &&
4371 ((c = (Set<?>)o) == this ||
4372 (containsAll(c) && c.containsAll(this))));
4373 }
4374
4375 public Spliterator<Map.Entry<K,V>> spliterator() {
4376 Node<K,V>[] t;
4377 ConcurrentHashMap<K,V> m = map;
4378 long n = m.sumCount();
4379 int f = (t = m.table) == null ? 0 : t.length;
4380 return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m);
4381 }
4382
4383 public void forEach(Consumer<? super Map.Entry<K,V>> action) {
4384 if (action == null) throw new NullPointerException();
4385 Node<K,V>[] t;
4386 if ((t = map.table) != null) {
4387 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4388 for (Node<K,V> p; (p = it.advance()) != null; )
4389 action.accept(new MapEntry<K,V>((K)p.key, p.val, map));
4390 }
4391 }
4392
4393 }
4394
4395 // -------------------------------------------------------
4396
4397 /**
4398 * Base class for bulk tasks. Repeats some fields and code from
4399 * class Traverser, because we need to subclass CountedCompleter.
4400 */
4401 abstract static class BulkTask<K,V,R> extends CountedCompleter<R> {
4402 Node<K,V>[] tab; // same as Traverser
4403 Node<K,V> next;
4404 int index;
4405 int baseIndex;
4406 int baseLimit;
4407 final int baseSize;
4408 int batch; // split control
4409
4410 BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) {
4411 super(par);
4412 this.batch = b;
4413 this.index = this.baseIndex = i;
4414 if ((this.tab = t) == null)
4415 this.baseSize = this.baseLimit = 0;
4416 else if (par == null)
4417 this.baseSize = this.baseLimit = t.length;
4418 else {
4419 this.baseLimit = f;
4420 this.baseSize = par.baseSize;
4421 }
4422 }
4423
4424 /**
4425 * Same as Traverser version
4426 */
4427 final Node<K,V> advance() {
4428 Node<K,V> e;
4429 if ((e = next) != null)
4430 e = e.next;
4431 for (;;) {
4432 Node<K,V>[] t; int i, n; Object ek;
4433 if (e != null)
4434 return next = e;
4435 if (baseIndex >= baseLimit || (t = tab) == null ||
4436 (n = t.length) <= (i = index) || i < 0)
4437 return next = null;
4438 if ((e = tabAt(t, index)) != null && e.hash < 0) {
4439 if ((ek = e.key) instanceof TreeBin)
4440 e = ((TreeBin<K,V>)ek).first;
4441 else {
4442 tab = (Node<K,V>[])ek;
4443 e = null;
4444 continue;
4445 }
4446 }
4447 if ((index += baseSize) >= n)
4448 index = ++baseIndex;
4449 }
4450 }
4451 }
4452
4453 /*
4454 * Task classes. Coded in a regular but ugly format/style to
4455 * simplify checks that each variant differs in the right way from
4456 * others. The null screenings exist because compilers cannot tell
4457 * that we've already null-checked task arguments, so we force
4458 * simplest hoisted bypass to help avoid convoluted traps.
4459 */
4460
4461 static final class ForEachKeyTask<K,V>
4462 extends BulkTask<K,V,Void> {
4463 final Consumer<? super K> action;
4464 ForEachKeyTask
4465 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4466 Consumer<? super K> action) {
4467 super(p, b, i, f, t);
4468 this.action = action;
4469 }
4470 public final void compute() {
4471 final Consumer<? super K> action;
4472 if ((action = this.action) != null) {
4473 for (int i = baseIndex, f, h; batch > 0 &&
4474 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4475 addToPendingCount(1);
4476 new ForEachKeyTask<K,V>
4477 (this, batch >>>= 1, baseLimit = h, f, tab,
4478 action).fork();
4479 }
4480 for (Node<K,V> p; (p = advance()) != null;)
4481 action.accept((K)p.key);
4482 propagateCompletion();
4483 }
4484 }
4485 }
4486
4487 static final class ForEachValueTask<K,V>
4488 extends BulkTask<K,V,Void> {
4489 final Consumer<? super V> action;
4490 ForEachValueTask
4491 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4492 Consumer<? super V> action) {
4493 super(p, b, i, f, t);
4494 this.action = action;
4495 }
4496 public final void compute() {
4497 final Consumer<? super V> action;
4498 if ((action = this.action) != null) {
4499 for (int i = baseIndex, f, h; batch > 0 &&
4500 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4501 addToPendingCount(1);
4502 new ForEachValueTask<K,V>
4503 (this, batch >>>= 1, baseLimit = h, f, tab,
4504 action).fork();
4505 }
4506 for (Node<K,V> p; (p = advance()) != null;)
4507 action.accept(p.val);
4508 propagateCompletion();
4509 }
4510 }
4511 }
4512
4513 static final class ForEachEntryTask<K,V>
4514 extends BulkTask<K,V,Void> {
4515 final Consumer<? super Entry<K,V>> action;
4516 ForEachEntryTask
4517 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4518 Consumer<? super Entry<K,V>> action) {
4519 super(p, b, i, f, t);
4520 this.action = action;
4521 }
4522 public final void compute() {
4523 final Consumer<? super Entry<K,V>> action;
4524 if ((action = this.action) != null) {
4525 for (int i = baseIndex, f, h; batch > 0 &&
4526 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4527 addToPendingCount(1);
4528 new ForEachEntryTask<K,V>
4529 (this, batch >>>= 1, baseLimit = h, f, tab,
4530 action).fork();
4531 }
4532 for (Node<K,V> p; (p = advance()) != null; )
4533 action.accept(p);
4534 propagateCompletion();
4535 }
4536 }
4537 }
4538
4539 static final class ForEachMappingTask<K,V>
4540 extends BulkTask<K,V,Void> {
4541 final BiConsumer<? super K, ? super V> action;
4542 ForEachMappingTask
4543 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4544 BiConsumer<? super K,? super V> action) {
4545 super(p, b, i, f, t);
4546 this.action = action;
4547 }
4548 public final void compute() {
4549 final BiConsumer<? super K, ? super V> action;
4550 if ((action = this.action) != null) {
4551 for (int i = baseIndex, f, h; batch > 0 &&
4552 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4553 addToPendingCount(1);
4554 new ForEachMappingTask<K,V>
4555 (this, batch >>>= 1, baseLimit = h, f, tab,
4556 action).fork();
4557 }
4558 for (Node<K,V> p; (p = advance()) != null; )
4559 action.accept((K)p.key, p.val);
4560 propagateCompletion();
4561 }
4562 }
4563 }
4564
4565 static final class ForEachTransformedKeyTask<K,V,U>
4566 extends BulkTask<K,V,Void> {
4567 final Function<? super K, ? extends U> transformer;
4568 final Consumer<? super U> action;
4569 ForEachTransformedKeyTask
4570 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4571 Function<? super K, ? extends U> transformer, Consumer<? super U> action) {
4572 super(p, b, i, f, t);
4573 this.transformer = transformer; this.action = action;
4574 }
4575 public final void compute() {
4576 final Function<? super K, ? extends U> transformer;
4577 final Consumer<? super U> action;
4578 if ((transformer = this.transformer) != null &&
4579 (action = this.action) != null) {
4580 for (int i = baseIndex, f, h; batch > 0 &&
4581 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4582 addToPendingCount(1);
4583 new ForEachTransformedKeyTask<K,V,U>
4584 (this, batch >>>= 1, baseLimit = h, f, tab,
4585 transformer, action).fork();
4586 }
4587 for (Node<K,V> p; (p = advance()) != null; ) {
4588 U u;
4589 if ((u = transformer.apply((K)p.key)) != null)
4590 action.accept(u);
4591 }
4592 propagateCompletion();
4593 }
4594 }
4595 }
4596
4597 static final class ForEachTransformedValueTask<K,V,U>
4598 extends BulkTask<K,V,Void> {
4599 final Function<? super V, ? extends U> transformer;
4600 final Consumer<? super U> action;
4601 ForEachTransformedValueTask
4602 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4603 Function<? super V, ? extends U> transformer, Consumer<? super U> action) {
4604 super(p, b, i, f, t);
4605 this.transformer = transformer; this.action = action;
4606 }
4607 public final void compute() {
4608 final Function<? super V, ? extends U> transformer;
4609 final Consumer<? super U> action;
4610 if ((transformer = this.transformer) != null &&
4611 (action = this.action) != null) {
4612 for (int i = baseIndex, f, h; batch > 0 &&
4613 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4614 addToPendingCount(1);
4615 new ForEachTransformedValueTask<K,V,U>
4616 (this, batch >>>= 1, baseLimit = h, f, tab,
4617 transformer, action).fork();
4618 }
4619 for (Node<K,V> p; (p = advance()) != null; ) {
4620 U u;
4621 if ((u = transformer.apply(p.val)) != null)
4622 action.accept(u);
4623 }
4624 propagateCompletion();
4625 }
4626 }
4627 }
4628
4629 static final class ForEachTransformedEntryTask<K,V,U>
4630 extends BulkTask<K,V,Void> {
4631 final Function<Map.Entry<K,V>, ? extends U> transformer;
4632 final Consumer<? super U> action;
4633 ForEachTransformedEntryTask
4634 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4635 Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action) {
4636 super(p, b, i, f, t);
4637 this.transformer = transformer; this.action = action;
4638 }
4639 public final void compute() {
4640 final Function<Map.Entry<K,V>, ? extends U> transformer;
4641 final Consumer<? super U> action;
4642 if ((transformer = this.transformer) != null &&
4643 (action = this.action) != null) {
4644 for (int i = baseIndex, f, h; batch > 0 &&
4645 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4646 addToPendingCount(1);
4647 new ForEachTransformedEntryTask<K,V,U>
4648 (this, batch >>>= 1, baseLimit = h, f, tab,
4649 transformer, action).fork();
4650 }
4651 for (Node<K,V> p; (p = advance()) != null; ) {
4652 U u;
4653 if ((u = transformer.apply(p)) != null)
4654 action.accept(u);
4655 }
4656 propagateCompletion();
4657 }
4658 }
4659 }
4660
4661 static final class ForEachTransformedMappingTask<K,V,U>
4662 extends BulkTask<K,V,Void> {
4663 final BiFunction<? super K, ? super V, ? extends U> transformer;
4664 final Consumer<? super U> action;
4665 ForEachTransformedMappingTask
4666 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4667 BiFunction<? super K, ? super V, ? extends U> transformer,
4668 Consumer<? super U> action) {
4669 super(p, b, i, f, t);
4670 this.transformer = transformer; this.action = action;
4671 }
4672 public final void compute() {
4673 final BiFunction<? super K, ? super V, ? extends U> transformer;
4674 final Consumer<? super U> action;
4675 if ((transformer = this.transformer) != null &&
4676 (action = this.action) != null) {
4677 for (int i = baseIndex, f, h; batch > 0 &&
4678 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4679 addToPendingCount(1);
4680 new ForEachTransformedMappingTask<K,V,U>
4681 (this, batch >>>= 1, baseLimit = h, f, tab,
4682 transformer, action).fork();
4683 }
4684 for (Node<K,V> p; (p = advance()) != null; ) {
4685 U u;
4686 if ((u = transformer.apply((K)p.key, p.val)) != null)
4687 action.accept(u);
4688 }
4689 propagateCompletion();
4690 }
4691 }
4692 }
4693
4694 static final class SearchKeysTask<K,V,U>
4695 extends BulkTask<K,V,U> {
4696 final Function<? super K, ? extends U> searchFunction;
4697 final AtomicReference<U> result;
4698 SearchKeysTask
4699 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4700 Function<? super K, ? extends U> searchFunction,
4701 AtomicReference<U> result) {
4702 super(p, b, i, f, t);
4703 this.searchFunction = searchFunction; this.result = result;
4704 }
4705 public final U getRawResult() { return result.get(); }
4706 public final void compute() {
4707 final Function<? super K, ? extends U> searchFunction;
4708 final AtomicReference<U> result;
4709 if ((searchFunction = this.searchFunction) != null &&
4710 (result = this.result) != null) {
4711 for (int i = baseIndex, f, h; batch > 0 &&
4712 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4713 if (result.get() != null)
4714 return;
4715 addToPendingCount(1);
4716 new SearchKeysTask<K,V,U>
4717 (this, batch >>>= 1, baseLimit = h, f, tab,
4718 searchFunction, result).fork();
4719 }
4720 while (result.get() == null) {
4721 U u;
4722 Node<K,V> p;
4723 if ((p = advance()) == null) {
4724 propagateCompletion();
4725 break;
4726 }
4727 if ((u = searchFunction.apply((K)p.key)) != null) {
4728 if (result.compareAndSet(null, u))
4729 quietlyCompleteRoot();
4730 break;
4731 }
4732 }
4733 }
4734 }
4735 }
4736
4737 static final class SearchValuesTask<K,V,U>
4738 extends BulkTask<K,V,U> {
4739 final Function<? super V, ? extends U> searchFunction;
4740 final AtomicReference<U> result;
4741 SearchValuesTask
4742 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4743 Function<? super V, ? extends U> searchFunction,
4744 AtomicReference<U> result) {
4745 super(p, b, i, f, t);
4746 this.searchFunction = searchFunction; this.result = result;
4747 }
4748 public final U getRawResult() { return result.get(); }
4749 public final void compute() {
4750 final Function<? super V, ? extends U> searchFunction;
4751 final AtomicReference<U> result;
4752 if ((searchFunction = this.searchFunction) != null &&
4753 (result = this.result) != null) {
4754 for (int i = baseIndex, f, h; batch > 0 &&
4755 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4756 if (result.get() != null)
4757 return;
4758 addToPendingCount(1);
4759 new SearchValuesTask<K,V,U>
4760 (this, batch >>>= 1, baseLimit = h, f, tab,
4761 searchFunction, result).fork();
4762 }
4763 while (result.get() == null) {
4764 U u;
4765 Node<K,V> p;
4766 if ((p = advance()) == null) {
4767 propagateCompletion();
4768 break;
4769 }
4770 if ((u = searchFunction.apply(p.val)) != null) {
4771 if (result.compareAndSet(null, u))
4772 quietlyCompleteRoot();
4773 break;
4774 }
4775 }
4776 }
4777 }
4778 }
4779
4780 static final class SearchEntriesTask<K,V,U>
4781 extends BulkTask<K,V,U> {
4782 final Function<Entry<K,V>, ? extends U> searchFunction;
4783 final AtomicReference<U> result;
4784 SearchEntriesTask
4785 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4786 Function<Entry<K,V>, ? extends U> searchFunction,
4787 AtomicReference<U> result) {
4788 super(p, b, i, f, t);
4789 this.searchFunction = searchFunction; this.result = result;
4790 }
4791 public final U getRawResult() { return result.get(); }
4792 public final void compute() {
4793 final Function<Entry<K,V>, ? extends U> searchFunction;
4794 final AtomicReference<U> result;
4795 if ((searchFunction = this.searchFunction) != null &&
4796 (result = this.result) != null) {
4797 for (int i = baseIndex, f, h; batch > 0 &&
4798 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4799 if (result.get() != null)
4800 return;
4801 addToPendingCount(1);
4802 new SearchEntriesTask<K,V,U>
4803 (this, batch >>>= 1, baseLimit = h, f, tab,
4804 searchFunction, result).fork();
4805 }
4806 while (result.get() == null) {
4807 U u;
4808 Node<K,V> p;
4809 if ((p = advance()) == null) {
4810 propagateCompletion();
4811 break;
4812 }
4813 if ((u = searchFunction.apply(p)) != null) {
4814 if (result.compareAndSet(null, u))
4815 quietlyCompleteRoot();
4816 return;
4817 }
4818 }
4819 }
4820 }
4821 }
4822
4823 static final class SearchMappingsTask<K,V,U>
4824 extends BulkTask<K,V,U> {
4825 final BiFunction<? super K, ? super V, ? extends U> searchFunction;
4826 final AtomicReference<U> result;
4827 SearchMappingsTask
4828 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4829 BiFunction<? super K, ? super V, ? extends U> searchFunction,
4830 AtomicReference<U> result) {
4831 super(p, b, i, f, t);
4832 this.searchFunction = searchFunction; this.result = result;
4833 }
4834 public final U getRawResult() { return result.get(); }
4835 public final void compute() {
4836 final BiFunction<? super K, ? super V, ? extends U> searchFunction;
4837 final AtomicReference<U> result;
4838 if ((searchFunction = this.searchFunction) != null &&
4839 (result = this.result) != null) {
4840 for (int i = baseIndex, f, h; batch > 0 &&
4841 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4842 if (result.get() != null)
4843 return;
4844 addToPendingCount(1);
4845 new SearchMappingsTask<K,V,U>
4846 (this, batch >>>= 1, baseLimit = h, f, tab,
4847 searchFunction, result).fork();
4848 }
4849 while (result.get() == null) {
4850 U u;
4851 Node<K,V> p;
4852 if ((p = advance()) == null) {
4853 propagateCompletion();
4854 break;
4855 }
4856 if ((u = searchFunction.apply((K)p.key, p.val)) != null) {
4857 if (result.compareAndSet(null, u))
4858 quietlyCompleteRoot();
4859 break;
4860 }
4861 }
4862 }
4863 }
4864 }
4865
4866 static final class ReduceKeysTask<K,V>
4867 extends BulkTask<K,V,K> {
4868 final BiFunction<? super K, ? super K, ? extends K> reducer;
4869 K result;
4870 ReduceKeysTask<K,V> rights, nextRight;
4871 ReduceKeysTask
4872 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4873 ReduceKeysTask<K,V> nextRight,
4874 BiFunction<? super K, ? super K, ? extends K> reducer) {
4875 super(p, b, i, f, t); this.nextRight = nextRight;
4876 this.reducer = reducer;
4877 }
4878 public final K getRawResult() { return result; }
4879 public final void compute() {
4880 final BiFunction<? super K, ? super K, ? extends K> reducer;
4881 if ((reducer = this.reducer) != null) {
4882 for (int i = baseIndex, f, h; batch > 0 &&
4883 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4884 addToPendingCount(1);
4885 (rights = new ReduceKeysTask<K,V>
4886 (this, batch >>>= 1, baseLimit = h, f, tab,
4887 rights, reducer)).fork();
4888 }
4889 K r = null;
4890 for (Node<K,V> p; (p = advance()) != null; ) {
4891 K u = (K)p.key;
4892 r = (r == null) ? u : u == null ? r : reducer.apply(r, u);
4893 }
4894 result = r;
4895 CountedCompleter<?> c;
4896 for (c = firstComplete(); c != null; c = c.nextComplete()) {
4897 ReduceKeysTask<K,V>
4898 t = (ReduceKeysTask<K,V>)c,
4899 s = t.rights;
4900 while (s != null) {
4901 K tr, sr;
4902 if ((sr = s.result) != null)
4903 t.result = (((tr = t.result) == null) ? sr :
4904 reducer.apply(tr, sr));
4905 s = t.rights = s.nextRight;
4906 }
4907 }
4908 }
4909 }
4910 }
4911
4912 static final class ReduceValuesTask<K,V>
4913 extends BulkTask<K,V,V> {
4914 final BiFunction<? super V, ? super V, ? extends V> reducer;
4915 V result;
4916 ReduceValuesTask<K,V> rights, nextRight;
4917 ReduceValuesTask
4918 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4919 ReduceValuesTask<K,V> nextRight,
4920 BiFunction<? super V, ? super V, ? extends V> reducer) {
4921 super(p, b, i, f, t); this.nextRight = nextRight;
4922 this.reducer = reducer;
4923 }
4924 public final V getRawResult() { return result; }
4925 public final void compute() {
4926 final BiFunction<? super V, ? super V, ? extends V> reducer;
4927 if ((reducer = this.reducer) != null) {
4928 for (int i = baseIndex, f, h; batch > 0 &&
4929 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4930 addToPendingCount(1);
4931 (rights = new ReduceValuesTask<K,V>
4932 (this, batch >>>= 1, baseLimit = h, f, tab,
4933 rights, reducer)).fork();
4934 }
4935 V r = null;
4936 for (Node<K,V> p; (p = advance()) != null; ) {
4937 V v = p.val;
4938 r = (r == null) ? v : reducer.apply(r, v);
4939 }
4940 result = r;
4941 CountedCompleter<?> c;
4942 for (c = firstComplete(); c != null; c = c.nextComplete()) {
4943 ReduceValuesTask<K,V>
4944 t = (ReduceValuesTask<K,V>)c,
4945 s = t.rights;
4946 while (s != null) {
4947 V tr, sr;
4948 if ((sr = s.result) != null)
4949 t.result = (((tr = t.result) == null) ? sr :
4950 reducer.apply(tr, sr));
4951 s = t.rights = s.nextRight;
4952 }
4953 }
4954 }
4955 }
4956 }
4957
4958 static final class ReduceEntriesTask<K,V>
4959 extends BulkTask<K,V,Map.Entry<K,V>> {
4960 final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
4961 Map.Entry<K,V> result;
4962 ReduceEntriesTask<K,V> rights, nextRight;
4963 ReduceEntriesTask
4964 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4965 ReduceEntriesTask<K,V> nextRight,
4966 BiFunction<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4967 super(p, b, i, f, t); this.nextRight = nextRight;
4968 this.reducer = reducer;
4969 }
4970 public final Map.Entry<K,V> getRawResult() { return result; }
4971 public final void compute() {
4972 final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
4973 if ((reducer = this.reducer) != null) {
4974 for (int i = baseIndex, f, h; batch > 0 &&
4975 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4976 addToPendingCount(1);
4977 (rights = new ReduceEntriesTask<K,V>
4978 (this, batch >>>= 1, baseLimit = h, f, tab,
4979 rights, reducer)).fork();
4980 }
4981 Map.Entry<K,V> r = null;
4982 for (Node<K,V> p; (p = advance()) != null; )
4983 r = (r == null) ? p : reducer.apply(r, p);
4984 result = r;
4985 CountedCompleter<?> c;
4986 for (c = firstComplete(); c != null; c = c.nextComplete()) {
4987 ReduceEntriesTask<K,V>
4988 t = (ReduceEntriesTask<K,V>)c,
4989 s = t.rights;
4990 while (s != null) {
4991 Map.Entry<K,V> tr, sr;
4992 if ((sr = s.result) != null)
4993 t.result = (((tr = t.result) == null) ? sr :
4994 reducer.apply(tr, sr));
4995 s = t.rights = s.nextRight;
4996 }
4997 }
4998 }
4999 }
5000 }
5001
5002 static final class MapReduceKeysTask<K,V,U>
5003 extends BulkTask<K,V,U> {
5004 final Function<? super K, ? extends U> transformer;
5005 final BiFunction<? super U, ? super U, ? extends U> reducer;
5006 U result;
5007 MapReduceKeysTask<K,V,U> rights, nextRight;
5008 MapReduceKeysTask
5009 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5010 MapReduceKeysTask<K,V,U> nextRight,
5011 Function<? super K, ? extends U> transformer,
5012 BiFunction<? super U, ? super U, ? extends U> reducer) {
5013 super(p, b, i, f, t); this.nextRight = nextRight;
5014 this.transformer = transformer;
5015 this.reducer = reducer;
5016 }
5017 public final U getRawResult() { return result; }
5018 public final void compute() {
5019 final Function<? super K, ? extends U> transformer;
5020 final BiFunction<? super U, ? super U, ? extends U> reducer;
5021 if ((transformer = this.transformer) != null &&
5022 (reducer = this.reducer) != null) {
5023 for (int i = baseIndex, f, h; batch > 0 &&
5024 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5025 addToPendingCount(1);
5026 (rights = new MapReduceKeysTask<K,V,U>
5027 (this, batch >>>= 1, baseLimit = h, f, tab,
5028 rights, transformer, reducer)).fork();
5029 }
5030 U r = null;
5031 for (Node<K,V> p; (p = advance()) != null; ) {
5032 U u;
5033 if ((u = transformer.apply((K)p.key)) != null)
5034 r = (r == null) ? u : reducer.apply(r, u);
5035 }
5036 result = r;
5037 CountedCompleter<?> c;
5038 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5039 MapReduceKeysTask<K,V,U>
5040 t = (MapReduceKeysTask<K,V,U>)c,
5041 s = t.rights;
5042 while (s != null) {
5043 U tr, sr;
5044 if ((sr = s.result) != null)
5045 t.result = (((tr = t.result) == null) ? sr :
5046 reducer.apply(tr, sr));
5047 s = t.rights = s.nextRight;
5048 }
5049 }
5050 }
5051 }
5052 }
5053
5054 static final class MapReduceValuesTask<K,V,U>
5055 extends BulkTask<K,V,U> {
5056 final Function<? super V, ? extends U> transformer;
5057 final BiFunction<? super U, ? super U, ? extends U> reducer;
5058 U result;
5059 MapReduceValuesTask<K,V,U> rights, nextRight;
5060 MapReduceValuesTask
5061 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5062 MapReduceValuesTask<K,V,U> nextRight,
5063 Function<? super V, ? extends U> transformer,
5064 BiFunction<? super U, ? super U, ? extends U> reducer) {
5065 super(p, b, i, f, t); this.nextRight = nextRight;
5066 this.transformer = transformer;
5067 this.reducer = reducer;
5068 }
5069 public final U getRawResult() { return result; }
5070 public final void compute() {
5071 final Function<? super V, ? extends U> transformer;
5072 final BiFunction<? super U, ? super U, ? extends U> reducer;
5073 if ((transformer = this.transformer) != null &&
5074 (reducer = this.reducer) != null) {
5075 for (int i = baseIndex, f, h; batch > 0 &&
5076 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5077 addToPendingCount(1);
5078 (rights = new MapReduceValuesTask<K,V,U>
5079 (this, batch >>>= 1, baseLimit = h, f, tab,
5080 rights, transformer, reducer)).fork();
5081 }
5082 U r = null;
5083 for (Node<K,V> p; (p = advance()) != null; ) {
5084 U u;
5085 if ((u = transformer.apply(p.val)) != null)
5086 r = (r == null) ? u : reducer.apply(r, u);
5087 }
5088 result = r;
5089 CountedCompleter<?> c;
5090 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5091 MapReduceValuesTask<K,V,U>
5092 t = (MapReduceValuesTask<K,V,U>)c,
5093 s = t.rights;
5094 while (s != null) {
5095 U tr, sr;
5096 if ((sr = s.result) != null)
5097 t.result = (((tr = t.result) == null) ? sr :
5098 reducer.apply(tr, sr));
5099 s = t.rights = s.nextRight;
5100 }
5101 }
5102 }
5103 }
5104 }
5105
5106 static final class MapReduceEntriesTask<K,V,U>
5107 extends BulkTask<K,V,U> {
5108 final Function<Map.Entry<K,V>, ? extends U> transformer;
5109 final BiFunction<? super U, ? super U, ? extends U> reducer;
5110 U result;
5111 MapReduceEntriesTask<K,V,U> rights, nextRight;
5112 MapReduceEntriesTask
5113 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5114 MapReduceEntriesTask<K,V,U> nextRight,
5115 Function<Map.Entry<K,V>, ? extends U> transformer,
5116 BiFunction<? super U, ? super U, ? extends U> reducer) {
5117 super(p, b, i, f, t); this.nextRight = nextRight;
5118 this.transformer = transformer;
5119 this.reducer = reducer;
5120 }
5121 public final U getRawResult() { return result; }
5122 public final void compute() {
5123 final Function<Map.Entry<K,V>, ? extends U> transformer;
5124 final BiFunction<? super U, ? super U, ? extends U> reducer;
5125 if ((transformer = this.transformer) != null &&
5126 (reducer = this.reducer) != null) {
5127 for (int i = baseIndex, f, h; batch > 0 &&
5128 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5129 addToPendingCount(1);
5130 (rights = new MapReduceEntriesTask<K,V,U>
5131 (this, batch >>>= 1, baseLimit = h, f, tab,
5132 rights, transformer, reducer)).fork();
5133 }
5134 U r = null;
5135 for (Node<K,V> p; (p = advance()) != null; ) {
5136 U u;
5137 if ((u = transformer.apply(p)) != null)
5138 r = (r == null) ? u : reducer.apply(r, u);
5139 }
5140 result = r;
5141 CountedCompleter<?> c;
5142 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5143 MapReduceEntriesTask<K,V,U>
5144 t = (MapReduceEntriesTask<K,V,U>)c,
5145 s = t.rights;
5146 while (s != null) {
5147 U tr, sr;
5148 if ((sr = s.result) != null)
5149 t.result = (((tr = t.result) == null) ? sr :
5150 reducer.apply(tr, sr));
5151 s = t.rights = s.nextRight;
5152 }
5153 }
5154 }
5155 }
5156 }
5157
5158 static final class MapReduceMappingsTask<K,V,U>
5159 extends BulkTask<K,V,U> {
5160 final BiFunction<? super K, ? super V, ? extends U> transformer;
5161 final BiFunction<? super U, ? super U, ? extends U> reducer;
5162 U result;
5163 MapReduceMappingsTask<K,V,U> rights, nextRight;
5164 MapReduceMappingsTask
5165 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5166 MapReduceMappingsTask<K,V,U> nextRight,
5167 BiFunction<? super K, ? super V, ? extends U> transformer,
5168 BiFunction<? super U, ? super U, ? extends U> reducer) {
5169 super(p, b, i, f, t); this.nextRight = nextRight;
5170 this.transformer = transformer;
5171 this.reducer = reducer;
5172 }
5173 public final U getRawResult() { return result; }
5174 public final void compute() {
5175 final BiFunction<? super K, ? super V, ? extends U> transformer;
5176 final BiFunction<? super U, ? super U, ? extends U> reducer;
5177 if ((transformer = this.transformer) != null &&
5178 (reducer = this.reducer) != null) {
5179 for (int i = baseIndex, f, h; batch > 0 &&
5180 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5181 addToPendingCount(1);
5182 (rights = new MapReduceMappingsTask<K,V,U>
5183 (this, batch >>>= 1, baseLimit = h, f, tab,
5184 rights, transformer, reducer)).fork();
5185 }
5186 U r = null;
5187 for (Node<K,V> p; (p = advance()) != null; ) {
5188 U u;
5189 if ((u = transformer.apply((K)p.key, p.val)) != null)
5190 r = (r == null) ? u : reducer.apply(r, u);
5191 }
5192 result = r;
5193 CountedCompleter<?> c;
5194 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5195 MapReduceMappingsTask<K,V,U>
5196 t = (MapReduceMappingsTask<K,V,U>)c,
5197 s = t.rights;
5198 while (s != null) {
5199 U tr, sr;
5200 if ((sr = s.result) != null)
5201 t.result = (((tr = t.result) == null) ? sr :
5202 reducer.apply(tr, sr));
5203 s = t.rights = s.nextRight;
5204 }
5205 }
5206 }
5207 }
5208 }
5209
5210 static final class MapReduceKeysToDoubleTask<K,V>
5211 extends BulkTask<K,V,Double> {
5212 final ToDoubleFunction<? super K> transformer;
5213 final DoubleBinaryOperator reducer;
5214 final double basis;
5215 double result;
5216 MapReduceKeysToDoubleTask<K,V> rights, nextRight;
5217 MapReduceKeysToDoubleTask
5218 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5219 MapReduceKeysToDoubleTask<K,V> nextRight,
5220 ToDoubleFunction<? super K> transformer,
5221 double basis,
5222 DoubleBinaryOperator reducer) {
5223 super(p, b, i, f, t); this.nextRight = nextRight;
5224 this.transformer = transformer;
5225 this.basis = basis; this.reducer = reducer;
5226 }
5227 public final Double getRawResult() { return result; }
5228 public final void compute() {
5229 final ToDoubleFunction<? super K> transformer;
5230 final DoubleBinaryOperator reducer;
5231 if ((transformer = this.transformer) != null &&
5232 (reducer = this.reducer) != null) {
5233 double r = this.basis;
5234 for (int i = baseIndex, f, h; batch > 0 &&
5235 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5236 addToPendingCount(1);
5237 (rights = new MapReduceKeysToDoubleTask<K,V>
5238 (this, batch >>>= 1, baseLimit = h, f, tab,
5239 rights, transformer, r, reducer)).fork();
5240 }
5241 for (Node<K,V> p; (p = advance()) != null; )
5242 r = reducer.applyAsDouble(r, transformer.applyAsDouble((K)p.key));
5243 result = r;
5244 CountedCompleter<?> c;
5245 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5246 MapReduceKeysToDoubleTask<K,V>
5247 t = (MapReduceKeysToDoubleTask<K,V>)c,
5248 s = t.rights;
5249 while (s != null) {
5250 t.result = reducer.applyAsDouble(t.result, s.result);
5251 s = t.rights = s.nextRight;
5252 }
5253 }
5254 }
5255 }
5256 }
5257
5258 static final class MapReduceValuesToDoubleTask<K,V>
5259 extends BulkTask<K,V,Double> {
5260 final ToDoubleFunction<? super V> transformer;
5261 final DoubleBinaryOperator reducer;
5262 final double basis;
5263 double result;
5264 MapReduceValuesToDoubleTask<K,V> rights, nextRight;
5265 MapReduceValuesToDoubleTask
5266 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5267 MapReduceValuesToDoubleTask<K,V> nextRight,
5268 ToDoubleFunction<? super V> transformer,
5269 double basis,
5270 DoubleBinaryOperator reducer) {
5271 super(p, b, i, f, t); this.nextRight = nextRight;
5272 this.transformer = transformer;
5273 this.basis = basis; this.reducer = reducer;
5274 }
5275 public final Double getRawResult() { return result; }
5276 public final void compute() {
5277 final ToDoubleFunction<? super V> transformer;
5278 final DoubleBinaryOperator reducer;
5279 if ((transformer = this.transformer) != null &&
5280 (reducer = this.reducer) != null) {
5281 double r = this.basis;
5282 for (int i = baseIndex, f, h; batch > 0 &&
5283 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5284 addToPendingCount(1);
5285 (rights = new MapReduceValuesToDoubleTask<K,V>
5286 (this, batch >>>= 1, baseLimit = h, f, tab,
5287 rights, transformer, r, reducer)).fork();
5288 }
5289 for (Node<K,V> p; (p = advance()) != null; )
5290 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.val));
5291 result = r;
5292 CountedCompleter<?> c;
5293 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5294 MapReduceValuesToDoubleTask<K,V>
5295 t = (MapReduceValuesToDoubleTask<K,V>)c,
5296 s = t.rights;
5297 while (s != null) {
5298 t.result = reducer.applyAsDouble(t.result, s.result);
5299 s = t.rights = s.nextRight;
5300 }
5301 }
5302 }
5303 }
5304 }
5305
5306 static final class MapReduceEntriesToDoubleTask<K,V>
5307 extends BulkTask<K,V,Double> {
5308 final ToDoubleFunction<Map.Entry<K,V>> transformer;
5309 final DoubleBinaryOperator reducer;
5310 final double basis;
5311 double result;
5312 MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
5313 MapReduceEntriesToDoubleTask
5314 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5315 MapReduceEntriesToDoubleTask<K,V> nextRight,
5316 ToDoubleFunction<Map.Entry<K,V>> transformer,
5317 double basis,
5318 DoubleBinaryOperator reducer) {
5319 super(p, b, i, f, t); this.nextRight = nextRight;
5320 this.transformer = transformer;
5321 this.basis = basis; this.reducer = reducer;
5322 }
5323 public final Double getRawResult() { return result; }
5324 public final void compute() {
5325 final ToDoubleFunction<Map.Entry<K,V>> transformer;
5326 final DoubleBinaryOperator reducer;
5327 if ((transformer = this.transformer) != null &&
5328 (reducer = this.reducer) != null) {
5329 double r = this.basis;
5330 for (int i = baseIndex, f, h; batch > 0 &&
5331 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5332 addToPendingCount(1);
5333 (rights = new MapReduceEntriesToDoubleTask<K,V>
5334 (this, batch >>>= 1, baseLimit = h, f, tab,
5335 rights, transformer, r, reducer)).fork();
5336 }
5337 for (Node<K,V> p; (p = advance()) != null; )
5338 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p));
5339 result = r;
5340 CountedCompleter<?> c;
5341 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5342 MapReduceEntriesToDoubleTask<K,V>
5343 t = (MapReduceEntriesToDoubleTask<K,V>)c,
5344 s = t.rights;
5345 while (s != null) {
5346 t.result = reducer.applyAsDouble(t.result, s.result);
5347 s = t.rights = s.nextRight;
5348 }
5349 }
5350 }
5351 }
5352 }
5353
5354 static final class MapReduceMappingsToDoubleTask<K,V>
5355 extends BulkTask<K,V,Double> {
5356 final ToDoubleBiFunction<? super K, ? super V> transformer;
5357 final DoubleBinaryOperator reducer;
5358 final double basis;
5359 double result;
5360 MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
5361 MapReduceMappingsToDoubleTask
5362 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5363 MapReduceMappingsToDoubleTask<K,V> nextRight,
5364 ToDoubleBiFunction<? super K, ? super V> transformer,
5365 double basis,
5366 DoubleBinaryOperator reducer) {
5367 super(p, b, i, f, t); this.nextRight = nextRight;
5368 this.transformer = transformer;
5369 this.basis = basis; this.reducer = reducer;
5370 }
5371 public final Double getRawResult() { return result; }
5372 public final void compute() {
5373 final ToDoubleBiFunction<? super K, ? super V> transformer;
5374 final DoubleBinaryOperator reducer;
5375 if ((transformer = this.transformer) != null &&
5376 (reducer = this.reducer) != null) {
5377 double r = this.basis;
5378 for (int i = baseIndex, f, h; batch > 0 &&
5379 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5380 addToPendingCount(1);
5381 (rights = new MapReduceMappingsToDoubleTask<K,V>
5382 (this, batch >>>= 1, baseLimit = h, f, tab,
5383 rights, transformer, r, reducer)).fork();
5384 }
5385 for (Node<K,V> p; (p = advance()) != null; )
5386 r = reducer.applyAsDouble(r, transformer.applyAsDouble((K)p.key, p.val));
5387 result = r;
5388 CountedCompleter<?> c;
5389 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5390 MapReduceMappingsToDoubleTask<K,V>
5391 t = (MapReduceMappingsToDoubleTask<K,V>)c,
5392 s = t.rights;
5393 while (s != null) {
5394 t.result = reducer.applyAsDouble(t.result, s.result);
5395 s = t.rights = s.nextRight;
5396 }
5397 }
5398 }
5399 }
5400 }
5401
5402 static final class MapReduceKeysToLongTask<K,V>
5403 extends BulkTask<K,V,Long> {
5404 final ToLongFunction<? super K> transformer;
5405 final LongBinaryOperator reducer;
5406 final long basis;
5407 long result;
5408 MapReduceKeysToLongTask<K,V> rights, nextRight;
5409 MapReduceKeysToLongTask
5410 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5411 MapReduceKeysToLongTask<K,V> nextRight,
5412 ToLongFunction<? super K> transformer,
5413 long basis,
5414 LongBinaryOperator reducer) {
5415 super(p, b, i, f, t); this.nextRight = nextRight;
5416 this.transformer = transformer;
5417 this.basis = basis; this.reducer = reducer;
5418 }
5419 public final Long getRawResult() { return result; }
5420 public final void compute() {
5421 final ToLongFunction<? super K> transformer;
5422 final LongBinaryOperator reducer;
5423 if ((transformer = this.transformer) != null &&
5424 (reducer = this.reducer) != null) {
5425 long r = this.basis;
5426 for (int i = baseIndex, f, h; batch > 0 &&
5427 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5428 addToPendingCount(1);
5429 (rights = new MapReduceKeysToLongTask<K,V>
5430 (this, batch >>>= 1, baseLimit = h, f, tab,
5431 rights, transformer, r, reducer)).fork();
5432 }
5433 for (Node<K,V> p; (p = advance()) != null; )
5434 r = reducer.applyAsLong(r, transformer.applyAsLong((K)p.key));
5435 result = r;
5436 CountedCompleter<?> c;
5437 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5438 MapReduceKeysToLongTask<K,V>
5439 t = (MapReduceKeysToLongTask<K,V>)c,
5440 s = t.rights;
5441 while (s != null) {
5442 t.result = reducer.applyAsLong(t.result, s.result);
5443 s = t.rights = s.nextRight;
5444 }
5445 }
5446 }
5447 }
5448 }
5449
5450 static final class MapReduceValuesToLongTask<K,V>
5451 extends BulkTask<K,V,Long> {
5452 final ToLongFunction<? super V> transformer;
5453 final LongBinaryOperator reducer;
5454 final long basis;
5455 long result;
5456 MapReduceValuesToLongTask<K,V> rights, nextRight;
5457 MapReduceValuesToLongTask
5458 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5459 MapReduceValuesToLongTask<K,V> nextRight,
5460 ToLongFunction<? super V> transformer,
5461 long basis,
5462 LongBinaryOperator reducer) {
5463 super(p, b, i, f, t); this.nextRight = nextRight;
5464 this.transformer = transformer;
5465 this.basis = basis; this.reducer = reducer;
5466 }
5467 public final Long getRawResult() { return result; }
5468 public final void compute() {
5469 final ToLongFunction<? super V> transformer;
5470 final LongBinaryOperator reducer;
5471 if ((transformer = this.transformer) != null &&
5472 (reducer = this.reducer) != null) {
5473 long r = this.basis;
5474 for (int i = baseIndex, f, h; batch > 0 &&
5475 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5476 addToPendingCount(1);
5477 (rights = new MapReduceValuesToLongTask<K,V>
5478 (this, batch >>>= 1, baseLimit = h, f, tab,
5479 rights, transformer, r, reducer)).fork();
5480 }
5481 for (Node<K,V> p; (p = advance()) != null; )
5482 r = reducer.applyAsLong(r, transformer.applyAsLong(p.val));
5483 result = r;
5484 CountedCompleter<?> c;
5485 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5486 MapReduceValuesToLongTask<K,V>
5487 t = (MapReduceValuesToLongTask<K,V>)c,
5488 s = t.rights;
5489 while (s != null) {
5490 t.result = reducer.applyAsLong(t.result, s.result);
5491 s = t.rights = s.nextRight;
5492 }
5493 }
5494 }
5495 }
5496 }
5497
5498 static final class MapReduceEntriesToLongTask<K,V>
5499 extends BulkTask<K,V,Long> {
5500 final ToLongFunction<Map.Entry<K,V>> transformer;
5501 final LongBinaryOperator reducer;
5502 final long basis;
5503 long result;
5504 MapReduceEntriesToLongTask<K,V> rights, nextRight;
5505 MapReduceEntriesToLongTask
5506 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5507 MapReduceEntriesToLongTask<K,V> nextRight,
5508 ToLongFunction<Map.Entry<K,V>> transformer,
5509 long basis,
5510 LongBinaryOperator reducer) {
5511 super(p, b, i, f, t); this.nextRight = nextRight;
5512 this.transformer = transformer;
5513 this.basis = basis; this.reducer = reducer;
5514 }
5515 public final Long getRawResult() { return result; }
5516 public final void compute() {
5517 final ToLongFunction<Map.Entry<K,V>> transformer;
5518 final LongBinaryOperator reducer;
5519 if ((transformer = this.transformer) != null &&
5520 (reducer = this.reducer) != null) {
5521 long r = this.basis;
5522 for (int i = baseIndex, f, h; batch > 0 &&
5523 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5524 addToPendingCount(1);
5525 (rights = new MapReduceEntriesToLongTask<K,V>
5526 (this, batch >>>= 1, baseLimit = h, f, tab,
5527 rights, transformer, r, reducer)).fork();
5528 }
5529 for (Node<K,V> p; (p = advance()) != null; )
5530 r = reducer.applyAsLong(r, transformer.applyAsLong(p));
5531 result = r;
5532 CountedCompleter<?> c;
5533 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5534 MapReduceEntriesToLongTask<K,V>
5535 t = (MapReduceEntriesToLongTask<K,V>)c,
5536 s = t.rights;
5537 while (s != null) {
5538 t.result = reducer.applyAsLong(t.result, s.result);
5539 s = t.rights = s.nextRight;
5540 }
5541 }
5542 }
5543 }
5544 }
5545
5546 static final class MapReduceMappingsToLongTask<K,V>
5547 extends BulkTask<K,V,Long> {
5548 final ToLongBiFunction<? super K, ? super V> transformer;
5549 final LongBinaryOperator reducer;
5550 final long basis;
5551 long result;
5552 MapReduceMappingsToLongTask<K,V> rights, nextRight;
5553 MapReduceMappingsToLongTask
5554 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5555 MapReduceMappingsToLongTask<K,V> nextRight,
5556 ToLongBiFunction<? super K, ? super V> transformer,
5557 long basis,
5558 LongBinaryOperator reducer) {
5559 super(p, b, i, f, t); this.nextRight = nextRight;
5560 this.transformer = transformer;
5561 this.basis = basis; this.reducer = reducer;
5562 }
5563 public final Long getRawResult() { return result; }
5564 public final void compute() {
5565 final ToLongBiFunction<? super K, ? super V> transformer;
5566 final LongBinaryOperator reducer;
5567 if ((transformer = this.transformer) != null &&
5568 (reducer = this.reducer) != null) {
5569 long r = this.basis;
5570 for (int i = baseIndex, f, h; batch > 0 &&
5571 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5572 addToPendingCount(1);
5573 (rights = new MapReduceMappingsToLongTask<K,V>
5574 (this, batch >>>= 1, baseLimit = h, f, tab,
5575 rights, transformer, r, reducer)).fork();
5576 }
5577 for (Node<K,V> p; (p = advance()) != null; )
5578 r = reducer.applyAsLong(r, transformer.applyAsLong((K)p.key, p.val));
5579 result = r;
5580 CountedCompleter<?> c;
5581 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5582 MapReduceMappingsToLongTask<K,V>
5583 t = (MapReduceMappingsToLongTask<K,V>)c,
5584 s = t.rights;
5585 while (s != null) {
5586 t.result = reducer.applyAsLong(t.result, s.result);
5587 s = t.rights = s.nextRight;
5588 }
5589 }
5590 }
5591 }
5592 }
5593
5594 static final class MapReduceKeysToIntTask<K,V>
5595 extends BulkTask<K,V,Integer> {
5596 final ToIntFunction<? super K> transformer;
5597 final IntBinaryOperator reducer;
5598 final int basis;
5599 int result;
5600 MapReduceKeysToIntTask<K,V> rights, nextRight;
5601 MapReduceKeysToIntTask
5602 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5603 MapReduceKeysToIntTask<K,V> nextRight,
5604 ToIntFunction<? super K> transformer,
5605 int basis,
5606 IntBinaryOperator reducer) {
5607 super(p, b, i, f, t); this.nextRight = nextRight;
5608 this.transformer = transformer;
5609 this.basis = basis; this.reducer = reducer;
5610 }
5611 public final Integer getRawResult() { return result; }
5612 public final void compute() {
5613 final ToIntFunction<? super K> transformer;
5614 final IntBinaryOperator reducer;
5615 if ((transformer = this.transformer) != null &&
5616 (reducer = this.reducer) != null) {
5617 int r = this.basis;
5618 for (int i = baseIndex, f, h; batch > 0 &&
5619 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5620 addToPendingCount(1);
5621 (rights = new MapReduceKeysToIntTask<K,V>
5622 (this, batch >>>= 1, baseLimit = h, f, tab,
5623 rights, transformer, r, reducer)).fork();
5624 }
5625 for (Node<K,V> p; (p = advance()) != null; )
5626 r = reducer.applyAsInt(r, transformer.applyAsInt((K)p.key));
5627 result = r;
5628 CountedCompleter<?> c;
5629 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5630 MapReduceKeysToIntTask<K,V>
5631 t = (MapReduceKeysToIntTask<K,V>)c,
5632 s = t.rights;
5633 while (s != null) {
5634 t.result = reducer.applyAsInt(t.result, s.result);
5635 s = t.rights = s.nextRight;
5636 }
5637 }
5638 }
5639 }
5640 }
5641
5642 static final class MapReduceValuesToIntTask<K,V>
5643 extends BulkTask<K,V,Integer> {
5644 final ToIntFunction<? super V> transformer;
5645 final IntBinaryOperator reducer;
5646 final int basis;
5647 int result;
5648 MapReduceValuesToIntTask<K,V> rights, nextRight;
5649 MapReduceValuesToIntTask
5650 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5651 MapReduceValuesToIntTask<K,V> nextRight,
5652 ToIntFunction<? super V> transformer,
5653 int basis,
5654 IntBinaryOperator reducer) {
5655 super(p, b, i, f, t); this.nextRight = nextRight;
5656 this.transformer = transformer;
5657 this.basis = basis; this.reducer = reducer;
5658 }
5659 public final Integer getRawResult() { return result; }
5660 public final void compute() {
5661 final ToIntFunction<? super V> transformer;
5662 final IntBinaryOperator reducer;
5663 if ((transformer = this.transformer) != null &&
5664 (reducer = this.reducer) != null) {
5665 int r = this.basis;
5666 for (int i = baseIndex, f, h; batch > 0 &&
5667 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5668 addToPendingCount(1);
5669 (rights = new MapReduceValuesToIntTask<K,V>
5670 (this, batch >>>= 1, baseLimit = h, f, tab,
5671 rights, transformer, r, reducer)).fork();
5672 }
5673 for (Node<K,V> p; (p = advance()) != null; )
5674 r = reducer.applyAsInt(r, transformer.applyAsInt(p.val));
5675 result = r;
5676 CountedCompleter<?> c;
5677 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5678 MapReduceValuesToIntTask<K,V>
5679 t = (MapReduceValuesToIntTask<K,V>)c,
5680 s = t.rights;
5681 while (s != null) {
5682 t.result = reducer.applyAsInt(t.result, s.result);
5683 s = t.rights = s.nextRight;
5684 }
5685 }
5686 }
5687 }
5688 }
5689
5690 static final class MapReduceEntriesToIntTask<K,V>
5691 extends BulkTask<K,V,Integer> {
5692 final ToIntFunction<Map.Entry<K,V>> transformer;
5693 final IntBinaryOperator reducer;
5694 final int basis;
5695 int result;
5696 MapReduceEntriesToIntTask<K,V> rights, nextRight;
5697 MapReduceEntriesToIntTask
5698 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5699 MapReduceEntriesToIntTask<K,V> nextRight,
5700 ToIntFunction<Map.Entry<K,V>> transformer,
5701 int basis,
5702 IntBinaryOperator reducer) {
5703 super(p, b, i, f, t); this.nextRight = nextRight;
5704 this.transformer = transformer;
5705 this.basis = basis; this.reducer = reducer;
5706 }
5707 public final Integer getRawResult() { return result; }
5708 public final void compute() {
5709 final ToIntFunction<Map.Entry<K,V>> transformer;
5710 final IntBinaryOperator reducer;
5711 if ((transformer = this.transformer) != null &&
5712 (reducer = this.reducer) != null) {
5713 int r = this.basis;
5714 for (int i = baseIndex, f, h; batch > 0 &&
5715 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5716 addToPendingCount(1);
5717 (rights = new MapReduceEntriesToIntTask<K,V>
5718 (this, batch >>>= 1, baseLimit = h, f, tab,
5719 rights, transformer, r, reducer)).fork();
5720 }
5721 for (Node<K,V> p; (p = advance()) != null; )
5722 r = reducer.applyAsInt(r, transformer.applyAsInt(p));
5723 result = r;
5724 CountedCompleter<?> c;
5725 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5726 MapReduceEntriesToIntTask<K,V>
5727 t = (MapReduceEntriesToIntTask<K,V>)c,
5728 s = t.rights;
5729 while (s != null) {
5730 t.result = reducer.applyAsInt(t.result, s.result);
5731 s = t.rights = s.nextRight;
5732 }
5733 }
5734 }
5735 }
5736 }
5737
5738 static final class MapReduceMappingsToIntTask<K,V>
5739 extends BulkTask<K,V,Integer> {
5740 final ToIntBiFunction<? super K, ? super V> transformer;
5741 final IntBinaryOperator reducer;
5742 final int basis;
5743 int result;
5744 MapReduceMappingsToIntTask<K,V> rights, nextRight;
5745 MapReduceMappingsToIntTask
5746 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5747 MapReduceMappingsToIntTask<K,V> nextRight,
5748 ToIntBiFunction<? super K, ? super V> transformer,
5749 int basis,
5750 IntBinaryOperator reducer) {
5751 super(p, b, i, f, t); this.nextRight = nextRight;
5752 this.transformer = transformer;
5753 this.basis = basis; this.reducer = reducer;
5754 }
5755 public final Integer getRawResult() { return result; }
5756 public final void compute() {
5757 final ToIntBiFunction<? super K, ? super V> transformer;
5758 final IntBinaryOperator reducer;
5759 if ((transformer = this.transformer) != null &&
5760 (reducer = this.reducer) != null) {
5761 int r = this.basis;
5762 for (int i = baseIndex, f, h; batch > 0 &&
5763 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5764 addToPendingCount(1);
5765 (rights = new MapReduceMappingsToIntTask<K,V>
5766 (this, batch >>>= 1, baseLimit = h, f, tab,
5767 rights, transformer, r, reducer)).fork();
5768 }
5769 for (Node<K,V> p; (p = advance()) != null; )
5770 r = reducer.applyAsInt(r, transformer.applyAsInt((K)p.key, p.val));
5771 result = r;
5772 CountedCompleter<?> c;
5773 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5774 MapReduceMappingsToIntTask<K,V>
5775 t = (MapReduceMappingsToIntTask<K,V>)c,
5776 s = t.rights;
5777 while (s != null) {
5778 t.result = reducer.applyAsInt(t.result, s.result);
5779 s = t.rights = s.nextRight;
5780 }
5781 }
5782 }
5783 }
5784 }
5785
5786 // Unsafe mechanics
5787 private static final sun.misc.Unsafe U;
5788 private static final long SIZECTL;
5789 private static final long TRANSFERINDEX;
5790 private static final long TRANSFERORIGIN;
5791 private static final long BASECOUNT;
5792 private static final long CELLSBUSY;
5793 private static final long CELLVALUE;
5794 private static final long ABASE;
5795 private static final int ASHIFT;
5796
5797 static {
5798 try {
5799 U = sun.misc.Unsafe.getUnsafe();
5800 Class<?> k = ConcurrentHashMap.class;
5801 SIZECTL = U.objectFieldOffset
5802 (k.getDeclaredField("sizeCtl"));
5803 TRANSFERINDEX = U.objectFieldOffset
5804 (k.getDeclaredField("transferIndex"));
5805 TRANSFERORIGIN = U.objectFieldOffset
5806 (k.getDeclaredField("transferOrigin"));
5807 BASECOUNT = U.objectFieldOffset
5808 (k.getDeclaredField("baseCount"));
5809 CELLSBUSY = U.objectFieldOffset
5810 (k.getDeclaredField("cellsBusy"));
5811 Class<?> ck = Cell.class;
5812 CELLVALUE = U.objectFieldOffset
5813 (ck.getDeclaredField("value"));
5814 Class<?> sc = Node[].class;
5815 ABASE = U.arrayBaseOffset(sc);
5816 int scale = U.arrayIndexScale(sc);
5817 if ((scale & (scale - 1)) != 0)
5818 throw new Error("data type scale not a power of two");
5819 ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
5820 } catch (Exception e) {
5821 throw new Error(e);
5822 }
5823 }
5824 }