ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/jsr166e/ConcurrentHashMapV8.java
Revision: 1.117
Committed: Sun Dec 1 13:39:02 2013 UTC (10 years, 5 months ago) by dl
Branch: MAIN
Changes since 1.116: +80 -38 lines
Log Message:
avoid overlapping resize generations; fix RW mask

File Contents

# Content
1 /*
2 * Written by Doug Lea with assistance from members of JCP JSR-166
3 * Expert Group and released to the public domain, as explained at
4 * http://creativecommons.org/publicdomain/zero/1.0/
5 */
6
7 package jsr166e;
8
9 import jsr166e.ForkJoinPool;
10
11 import java.io.ObjectStreamField;
12 import java.io.Serializable;
13 import java.lang.reflect.ParameterizedType;
14 import java.lang.reflect.Type;
15 import java.util.AbstractMap;
16 import java.util.Arrays;
17 import java.util.Collection;
18 import java.util.Comparator;
19 import java.util.ConcurrentModificationException;
20 import java.util.Enumeration;
21 import java.util.HashMap;
22 import java.util.Hashtable;
23 import java.util.Iterator;
24 import java.util.Map;
25 import java.util.NoSuchElementException;
26 import java.util.Set;
27 import java.util.concurrent.ConcurrentMap;
28 import java.util.concurrent.atomic.AtomicReference;
29 import java.util.concurrent.atomic.AtomicInteger;
30 import java.util.concurrent.locks.LockSupport;
31 import java.util.concurrent.locks.ReentrantLock;
32
33 /**
34 * A hash table supporting full concurrency of retrievals and
35 * high expected concurrency for updates. This class obeys the
36 * same functional specification as {@link java.util.Hashtable}, and
37 * includes versions of methods corresponding to each method of
38 * {@code Hashtable}. However, even though all operations are
39 * thread-safe, retrieval operations do <em>not</em> entail locking,
40 * and there is <em>not</em> any support for locking the entire table
41 * in a way that prevents all access. This class is fully
42 * interoperable with {@code Hashtable} in programs that rely on its
43 * thread safety but not on its synchronization details.
44 *
45 * <p>Retrieval operations (including {@code get}) generally do not
46 * block, so may overlap with update operations (including {@code put}
47 * and {@code remove}). Retrievals reflect the results of the most
48 * recently <em>completed</em> update operations holding upon their
49 * onset. (More formally, an update operation for a given key bears a
50 * <em>happens-before</em> relation with any (non-null) retrieval for
51 * that key reporting the updated value.) For aggregate operations
52 * such as {@code putAll} and {@code clear}, concurrent retrievals may
53 * reflect insertion or removal of only some entries. Similarly,
54 * Iterators and Enumerations return elements reflecting the state of
55 * the hash table at some point at or since the creation of the
56 * iterator/enumeration. They do <em>not</em> throw {@link
57 * ConcurrentModificationException}. However, iterators are designed
58 * to be used by only one thread at a time. Bear in mind that the
59 * results of aggregate status methods including {@code size}, {@code
60 * isEmpty}, and {@code containsValue} are typically useful only when
61 * a map is not undergoing concurrent updates in other threads.
62 * Otherwise the results of these methods reflect transient states
63 * that may be adequate for monitoring or estimation purposes, but not
64 * for program control.
65 *
66 * <p>The table is dynamically expanded when there are too many
67 * collisions (i.e., keys that have distinct hash codes but fall into
68 * the same slot modulo the table size), with the expected average
69 * effect of maintaining roughly two bins per mapping (corresponding
70 * to a 0.75 load factor threshold for resizing). There may be much
71 * variance around this average as mappings are added and removed, but
72 * overall, this maintains a commonly accepted time/space tradeoff for
73 * hash tables. However, resizing this or any other kind of hash
74 * table may be a relatively slow operation. When possible, it is a
75 * good idea to provide a size estimate as an optional {@code
76 * initialCapacity} constructor argument. An additional optional
77 * {@code loadFactor} constructor argument provides a further means of
78 * customizing initial table capacity by specifying the table density
79 * to be used in calculating the amount of space to allocate for the
80 * given number of elements. Also, for compatibility with previous
81 * versions of this class, constructors may optionally specify an
82 * expected {@code concurrencyLevel} as an additional hint for
83 * internal sizing. Note that using many keys with exactly the same
84 * {@code hashCode()} is a sure way to slow down performance of any
85 * hash table. To ameliorate impact, when keys are {@link Comparable},
86 * this class may use comparison order among keys to help break ties.
87 *
88 * <p>A {@link Set} projection of a ConcurrentHashMapV8 may be created
89 * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
90 * (using {@link #keySet(Object)} when only keys are of interest, and the
91 * mapped values are (perhaps transiently) not used or all take the
92 * same mapping value.
93 *
94 * <p>This class and its views and iterators implement all of the
95 * <em>optional</em> methods of the {@link Map} and {@link Iterator}
96 * interfaces.
97 *
98 * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
99 * does <em>not</em> allow {@code null} to be used as a key or value.
100 *
101 * <p>ConcurrentHashMapV8s support a set of sequential and parallel bulk
102 * operations that are designed
103 * to be safely, and often sensibly, applied even with maps that are
104 * being concurrently updated by other threads; for example, when
105 * computing a snapshot summary of the values in a shared registry.
106 * There are three kinds of operation, each with four forms, accepting
107 * functions with Keys, Values, Entries, and (Key, Value) arguments
108 * and/or return values. Because the elements of a ConcurrentHashMapV8
109 * are not ordered in any particular way, and may be processed in
110 * different orders in different parallel executions, the correctness
111 * of supplied functions should not depend on any ordering, or on any
112 * other objects or values that may transiently change while
113 * computation is in progress; and except for forEach actions, should
114 * ideally be side-effect-free. Bulk operations on {@link java.util.Map.Entry}
115 * objects do not support method {@code setValue}.
116 *
117 * <ul>
118 * <li> forEach: Perform a given action on each element.
119 * A variant form applies a given transformation on each element
120 * before performing the action.</li>
121 *
122 * <li> search: Return the first available non-null result of
123 * applying a given function on each element; skipping further
124 * search when a result is found.</li>
125 *
126 * <li> reduce: Accumulate each element. The supplied reduction
127 * function cannot rely on ordering (more formally, it should be
128 * both associative and commutative). There are five variants:
129 *
130 * <ul>
131 *
132 * <li> Plain reductions. (There is not a form of this method for
133 * (key, value) function arguments since there is no corresponding
134 * return type.)</li>
135 *
136 * <li> Mapped reductions that accumulate the results of a given
137 * function applied to each element.</li>
138 *
139 * <li> Reductions to scalar doubles, longs, and ints, using a
140 * given basis value.</li>
141 *
142 * </ul>
143 * </li>
144 * </ul>
145 *
146 * <p>These bulk operations accept a {@code parallelismThreshold}
147 * argument. Methods proceed sequentially if the current map size is
148 * estimated to be less than the given threshold. Using a value of
149 * {@code Long.MAX_VALUE} suppresses all parallelism. Using a value
150 * of {@code 1} results in maximal parallelism by partitioning into
151 * enough subtasks to fully utilize the {@link
152 * ForkJoinPool#commonPool()} that is used for all parallel
153 * computations. Normally, you would initially choose one of these
154 * extreme values, and then measure performance of using in-between
155 * values that trade off overhead versus throughput.
156 *
157 * <p>The concurrency properties of bulk operations follow
158 * from those of ConcurrentHashMapV8: Any non-null result returned
159 * from {@code get(key)} and related access methods bears a
160 * happens-before relation with the associated insertion or
161 * update. The result of any bulk operation reflects the
162 * composition of these per-element relations (but is not
163 * necessarily atomic with respect to the map as a whole unless it
164 * is somehow known to be quiescent). Conversely, because keys
165 * and values in the map are never null, null serves as a reliable
166 * atomic indicator of the current lack of any result. To
167 * maintain this property, null serves as an implicit basis for
168 * all non-scalar reduction operations. For the double, long, and
169 * int versions, the basis should be one that, when combined with
170 * any other value, returns that other value (more formally, it
171 * should be the identity element for the reduction). Most common
172 * reductions have these properties; for example, computing a sum
173 * with basis 0 or a minimum with basis MAX_VALUE.
174 *
175 * <p>Search and transformation functions provided as arguments
176 * should similarly return null to indicate the lack of any result
177 * (in which case it is not used). In the case of mapped
178 * reductions, this also enables transformations to serve as
179 * filters, returning null (or, in the case of primitive
180 * specializations, the identity basis) if the element should not
181 * be combined. You can create compound transformations and
182 * filterings by composing them yourself under this "null means
183 * there is nothing there now" rule before using them in search or
184 * reduce operations.
185 *
186 * <p>Methods accepting and/or returning Entry arguments maintain
187 * key-value associations. They may be useful for example when
188 * finding the key for the greatest value. Note that "plain" Entry
189 * arguments can be supplied using {@code new
190 * AbstractMap.SimpleEntry(k,v)}.
191 *
192 * <p>Bulk operations may complete abruptly, throwing an
193 * exception encountered in the application of a supplied
194 * function. Bear in mind when handling such exceptions that other
195 * concurrently executing functions could also have thrown
196 * exceptions, or would have done so if the first exception had
197 * not occurred.
198 *
199 * <p>Speedups for parallel compared to sequential forms are common
200 * but not guaranteed. Parallel operations involving brief functions
201 * on small maps may execute more slowly than sequential forms if the
202 * underlying work to parallelize the computation is more expensive
203 * than the computation itself. Similarly, parallelization may not
204 * lead to much actual parallelism if all processors are busy
205 * performing unrelated tasks.
206 *
207 * <p>All arguments to all task methods must be non-null.
208 *
209 * <p><em>jsr166e note: During transition, this class
210 * uses nested functional interfaces with different names but the
211 * same forms as those expected for JDK8.</em>
212 *
213 * <p>This class is a member of the
214 * <a href="{@docRoot}/../technotes/guides/collections/index.html">
215 * Java Collections Framework</a>.
216 *
217 * @since 1.5
218 * @author Doug Lea
219 * @param <K> the type of keys maintained by this map
220 * @param <V> the type of mapped values
221 */
222 public class ConcurrentHashMapV8<K,V> extends AbstractMap<K,V>
223 implements ConcurrentMap<K,V>, Serializable {
224 private static final long serialVersionUID = 7249069246763182397L;
225
226 /**
227 * An object for traversing and partitioning elements of a source.
228 * This interface provides a subset of the functionality of JDK8
229 * java.util.Spliterator.
230 */
231 public static interface ConcurrentHashMapSpliterator<T> {
232 /**
233 * If possible, returns a new spliterator covering
234 * approximately one half of the elements, which will not be
235 * covered by this spliterator. Returns null if cannot be
236 * split.
237 */
238 ConcurrentHashMapSpliterator<T> trySplit();
239 /**
240 * Returns an estimate of the number of elements covered by
241 * this Spliterator.
242 */
243 long estimateSize();
244
245 /** Applies the action to each untraversed element */
246 void forEachRemaining(Action<? super T> action);
247 /** If an element remains, applies the action and returns true. */
248 boolean tryAdvance(Action<? super T> action);
249 }
250
251 // Sams
252 /** Interface describing a void action of one argument */
253 public interface Action<A> { void apply(A a); }
254 /** Interface describing a void action of two arguments */
255 public interface BiAction<A,B> { void apply(A a, B b); }
256 /** Interface describing a function of one argument */
257 public interface Fun<A,T> { T apply(A a); }
258 /** Interface describing a function of two arguments */
259 public interface BiFun<A,B,T> { T apply(A a, B b); }
260 /** Interface describing a function mapping its argument to a double */
261 public interface ObjectToDouble<A> { double apply(A a); }
262 /** Interface describing a function mapping its argument to a long */
263 public interface ObjectToLong<A> { long apply(A a); }
264 /** Interface describing a function mapping its argument to an int */
265 public interface ObjectToInt<A> {int apply(A a); }
266 /** Interface describing a function mapping two arguments to a double */
267 public interface ObjectByObjectToDouble<A,B> { double apply(A a, B b); }
268 /** Interface describing a function mapping two arguments to a long */
269 public interface ObjectByObjectToLong<A,B> { long apply(A a, B b); }
270 /** Interface describing a function mapping two arguments to an int */
271 public interface ObjectByObjectToInt<A,B> {int apply(A a, B b); }
272 /** Interface describing a function mapping two doubles to a double */
273 public interface DoubleByDoubleToDouble { double apply(double a, double b); }
274 /** Interface describing a function mapping two longs to a long */
275 public interface LongByLongToLong { long apply(long a, long b); }
276 /** Interface describing a function mapping two ints to an int */
277 public interface IntByIntToInt { int apply(int a, int b); }
278
279
280 /*
281 * Overview:
282 *
283 * The primary design goal of this hash table is to maintain
284 * concurrent readability (typically method get(), but also
285 * iterators and related methods) while minimizing update
286 * contention. Secondary goals are to keep space consumption about
287 * the same or better than java.util.HashMap, and to support high
288 * initial insertion rates on an empty table by many threads.
289 *
290 * This map usually acts as a binned (bucketed) hash table. Each
291 * key-value mapping is held in a Node. Most nodes are instances
292 * of the basic Node class with hash, key, value, and next
293 * fields. However, various subclasses exist: TreeNodes are
294 * arranged in balanced trees, not lists. TreeBins hold the roots
295 * of sets of TreeNodes. ForwardingNodes are placed at the heads
296 * of bins during resizing. ReservationNodes are used as
297 * placeholders while establishing values in computeIfAbsent and
298 * related methods. The types TreeBin, ForwardingNode, and
299 * ReservationNode do not hold normal user keys, values, or
300 * hashes, and are readily distinguishable during search etc
301 * because they have negative hash fields and null key and value
302 * fields. (These special nodes are either uncommon or transient,
303 * so the impact of carrying around some unused fields is
304 * insignificant.)
305 *
306 * The table is lazily initialized to a power-of-two size upon the
307 * first insertion. Each bin in the table normally contains a
308 * list of Nodes (most often, the list has only zero or one Node).
309 * Table accesses require volatile/atomic reads, writes, and
310 * CASes. Because there is no other way to arrange this without
311 * adding further indirections, we use intrinsics
312 * (sun.misc.Unsafe) operations.
313 *
314 * We use the top (sign) bit of Node hash fields for control
315 * purposes -- it is available anyway because of addressing
316 * constraints. Nodes with negative hash fields are specially
317 * handled or ignored in map methods.
318 *
319 * Insertion (via put or its variants) of the first node in an
320 * empty bin is performed by just CASing it to the bin. This is
321 * by far the most common case for put operations under most
322 * key/hash distributions. Other update operations (insert,
323 * delete, and replace) require locks. We do not want to waste
324 * the space required to associate a distinct lock object with
325 * each bin, so instead use the first node of a bin list itself as
326 * a lock. Locking support for these locks relies on builtin
327 * "synchronized" monitors.
328 *
329 * Using the first node of a list as a lock does not by itself
330 * suffice though: When a node is locked, any update must first
331 * validate that it is still the first node after locking it, and
332 * retry if not. Because new nodes are always appended to lists,
333 * once a node is first in a bin, it remains first until deleted
334 * or the bin becomes invalidated (upon resizing).
335 *
336 * The main disadvantage of per-bin locks is that other update
337 * operations on other nodes in a bin list protected by the same
338 * lock can stall, for example when user equals() or mapping
339 * functions take a long time. However, statistically, under
340 * random hash codes, this is not a common problem. Ideally, the
341 * frequency of nodes in bins follows a Poisson distribution
342 * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
343 * parameter of about 0.5 on average, given the resizing threshold
344 * of 0.75, although with a large variance because of resizing
345 * granularity. Ignoring variance, the expected occurrences of
346 * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
347 * first values are:
348 *
349 * 0: 0.60653066
350 * 1: 0.30326533
351 * 2: 0.07581633
352 * 3: 0.01263606
353 * 4: 0.00157952
354 * 5: 0.00015795
355 * 6: 0.00001316
356 * 7: 0.00000094
357 * 8: 0.00000006
358 * more: less than 1 in ten million
359 *
360 * Lock contention probability for two threads accessing distinct
361 * elements is roughly 1 / (8 * #elements) under random hashes.
362 *
363 * Actual hash code distributions encountered in practice
364 * sometimes deviate significantly from uniform randomness. This
365 * includes the case when N > (1<<30), so some keys MUST collide.
366 * Similarly for dumb or hostile usages in which multiple keys are
367 * designed to have identical hash codes or ones that differs only
368 * in masked-out high bits. So we use a secondary strategy that
369 * applies when the number of nodes in a bin exceeds a
370 * threshold. These TreeBins use a balanced tree to hold nodes (a
371 * specialized form of red-black trees), bounding search time to
372 * O(log N). Each search step in a TreeBin is at least twice as
373 * slow as in a regular list, but given that N cannot exceed
374 * (1<<64) (before running out of addresses) this bounds search
375 * steps, lock hold times, etc, to reasonable constants (roughly
376 * 100 nodes inspected per operation worst case) so long as keys
377 * are Comparable (which is very common -- String, Long, etc).
378 * TreeBin nodes (TreeNodes) also maintain the same "next"
379 * traversal pointers as regular nodes, so can be traversed in
380 * iterators in the same way.
381 *
382 * The table is resized when occupancy exceeds a percentage
383 * threshold (nominally, 0.75, but see below). Any thread
384 * noticing an overfull bin may assist in resizing after the
385 * initiating thread allocates and sets up the replacement array.
386 * However, rather than stalling, these other threads may proceed
387 * with insertions etc. The use of TreeBins shields us from the
388 * worst case effects of overfilling while resizes are in
389 * progress. Resizing proceeds by transferring bins, one by one,
390 * from the table to the next table. However, threads claim small
391 * blocks of indices to transfer (via field transferIndex) before
392 * doing so, reducing contention. A generation stamp in field
393 * sizeCtl ensures that resizings do not overlap. Because we are
394 * using power-of-two expansion, the elements from each bin must
395 * either stay at same index, or move with a power of two
396 * offset. We eliminate unnecessary node creation by catching
397 * cases where old nodes can be reused because their next fields
398 * won't change. On average, only about one-sixth of them need
399 * cloning when a table doubles. The nodes they replace will be
400 * garbage collectable as soon as they are no longer referenced by
401 * any reader thread that may be in the midst of concurrently
402 * traversing table. Upon transfer, the old table bin contains
403 * only a special forwarding node (with hash field "MOVED") that
404 * contains the next table as its key. On encountering a
405 * forwarding node, access and update operations restart, using
406 * the new table.
407 *
408 * Each bin transfer requires its bin lock, which can stall
409 * waiting for locks while resizing. However, because other
410 * threads can join in and help resize rather than contend for
411 * locks, average aggregate waits become shorter as resizing
412 * progresses. The transfer operation must also ensure that all
413 * accessible bins in both the old and new table are usable by any
414 * traversal. This is arranged in part by proceeding from the
415 * last bin (table.length - 1) up towards the first. Upon seeing
416 * a forwarding node, traversals (see class Traverser) arrange to
417 * move to the new table without revisiting nodes. To ensure that
418 * no intervening nodes are skipped even when moved out of order,
419 * a stack (see class TableStack) is created on first encounter of
420 * a forwarding node during a traversal, to maintain its place if
421 * later processing the current table. The need for these
422 * save/restore mechanics is relatively rare, but when one
423 * forwarding node is encountered, typically many more will be.
424 * So Traversers use a simple caching scheme to avoid creating so
425 * many new TableStack nodes. (Thanks to Peter Levart for
426 * suggesting use of a stack here.)
427 *
428 * The traversal scheme also applies to partial traversals of
429 * ranges of bins (via an alternate Traverser constructor)
430 * to support partitioned aggregate operations. Also, read-only
431 * operations give up if ever forwarded to a null table, which
432 * provides support for shutdown-style clearing, which is also not
433 * currently implemented.
434 *
435 * Lazy table initialization minimizes footprint until first use,
436 * and also avoids resizings when the first operation is from a
437 * putAll, constructor with map argument, or deserialization.
438 * These cases attempt to override the initial capacity settings,
439 * but harmlessly fail to take effect in cases of races.
440 *
441 * The element count is maintained using a specialization of
442 * LongAdder. We need to incorporate a specialization rather than
443 * just use a LongAdder in order to access implicit
444 * contention-sensing that leads to creation of multiple
445 * CounterCells. The counter mechanics avoid contention on
446 * updates but can encounter cache thrashing if read too
447 * frequently during concurrent access. To avoid reading so often,
448 * resizing under contention is attempted only upon adding to a
449 * bin already holding two or more nodes. Under uniform hash
450 * distributions, the probability of this occurring at threshold
451 * is around 13%, meaning that only about 1 in 8 puts check
452 * threshold (and after resizing, many fewer do so).
453 *
454 * TreeBins use a special form of comparison for search and
455 * related operations (which is the main reason we cannot use
456 * existing collections such as TreeMaps). TreeBins contain
457 * Comparable elements, but may contain others, as well as
458 * elements that are Comparable but not necessarily Comparable for
459 * the same T, so we cannot invoke compareTo among them. To handle
460 * this, the tree is ordered primarily by hash value, then by
461 * Comparable.compareTo order if applicable. On lookup at a node,
462 * if elements are not comparable or compare as 0 then both left
463 * and right children may need to be searched in the case of tied
464 * hash values. (This corresponds to the full list search that
465 * would be necessary if all elements were non-Comparable and had
466 * tied hashes.) On insertion, to keep a total ordering (or as
467 * close as is required here) across rebalancings, we compare
468 * classes and identityHashCodes as tie-breakers. The red-black
469 * balancing code is updated from pre-jdk-collections
470 * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
471 * based in turn on Cormen, Leiserson, and Rivest "Introduction to
472 * Algorithms" (CLR).
473 *
474 * TreeBins also require an additional locking mechanism. While
475 * list traversal is always possible by readers even during
476 * updates, tree traversal is not, mainly because of tree-rotations
477 * that may change the root node and/or its linkages. TreeBins
478 * include a simple read-write lock mechanism parasitic on the
479 * main bin-synchronization strategy: Structural adjustments
480 * associated with an insertion or removal are already bin-locked
481 * (and so cannot conflict with other writers) but must wait for
482 * ongoing readers to finish. Since there can be only one such
483 * waiter, we use a simple scheme using a single "waiter" field to
484 * block writers. However, readers need never block. If the root
485 * lock is held, they proceed along the slow traversal path (via
486 * next-pointers) until the lock becomes available or the list is
487 * exhausted, whichever comes first. These cases are not fast, but
488 * maximize aggregate expected throughput.
489 *
490 * Maintaining API and serialization compatibility with previous
491 * versions of this class introduces several oddities. Mainly: We
492 * leave untouched but unused constructor arguments refering to
493 * concurrencyLevel. We accept a loadFactor constructor argument,
494 * but apply it only to initial table capacity (which is the only
495 * time that we can guarantee to honor it.) We also declare an
496 * unused "Segment" class that is instantiated in minimal form
497 * only when serializing.
498 *
499 * Also, solely for compatibility with previous versions of this
500 * class, it extends AbstractMap, even though all of its methods
501 * are overridden, so it is just useless baggage.
502 *
503 * This file is organized to make things a little easier to follow
504 * while reading than they might otherwise: First the main static
505 * declarations and utilities, then fields, then main public
506 * methods (with a few factorings of multiple public methods into
507 * internal ones), then sizing methods, trees, traversers, and
508 * bulk operations.
509 */
510
511 /* ---------------- Constants -------------- */
512
513 /**
514 * The largest possible table capacity. This value must be
515 * exactly 1<<30 to stay within Java array allocation and indexing
516 * bounds for power of two table sizes, and is further required
517 * because the top two bits of 32bit hash fields are used for
518 * control purposes.
519 */
520 private static final int MAXIMUM_CAPACITY = 1 << 30;
521
522 /**
523 * The default initial table capacity. Must be a power of 2
524 * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
525 */
526 private static final int DEFAULT_CAPACITY = 16;
527
528 /**
529 * The largest possible (non-power of two) array size.
530 * Needed by toArray and related methods.
531 */
532 static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
533
534 /**
535 * The default concurrency level for this table. Unused but
536 * defined for compatibility with previous versions of this class.
537 */
538 private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
539
540 /**
541 * The load factor for this table. Overrides of this value in
542 * constructors affect only the initial table capacity. The
543 * actual floating point value isn't normally used -- it is
544 * simpler to use expressions such as {@code n - (n >>> 2)} for
545 * the associated resizing threshold.
546 */
547 private static final float LOAD_FACTOR = 0.75f;
548
549 /**
550 * The bin count threshold for using a tree rather than list for a
551 * bin. Bins are converted to trees when adding an element to a
552 * bin with at least this many nodes. The value must be greater
553 * than 2, and should be at least 8 to mesh with assumptions in
554 * tree removal about conversion back to plain bins upon
555 * shrinkage.
556 */
557 static final int TREEIFY_THRESHOLD = 8;
558
559 /**
560 * The bin count threshold for untreeifying a (split) bin during a
561 * resize operation. Should be less than TREEIFY_THRESHOLD, and at
562 * most 6 to mesh with shrinkage detection under removal.
563 */
564 static final int UNTREEIFY_THRESHOLD = 6;
565
566 /**
567 * The smallest table capacity for which bins may be treeified.
568 * (Otherwise the table is resized if too many nodes in a bin.)
569 * The value should be at least 4 * TREEIFY_THRESHOLD to avoid
570 * conflicts between resizing and treeification thresholds.
571 */
572 static final int MIN_TREEIFY_CAPACITY = 64;
573
574 /**
575 * Minimum number of rebinnings per transfer step. Ranges are
576 * subdivided to allow multiple resizer threads. This value
577 * serves as a lower bound to avoid resizers encountering
578 * excessive memory contention. The value should be at least
579 * DEFAULT_CAPACITY.
580 */
581 private static final int MIN_TRANSFER_STRIDE = 16;
582
583 /**
584 * The number of bits used for generation stamp in sizeCtl.
585 * Must be at least 6 for 32bit arrays.
586 */
587 private static int RESIZE_STAMP_BITS = 16;
588
589 /**
590 * The maximum number of threads that can help resize.
591 * Must fit in 32 - RESIZE_STAMP_BITS bits.
592 */
593 private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;
594
595 /**
596 * The bit shift for recording size stamp in sizeCtl.
597 */
598 private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;
599
600 /*
601 * Encodings for Node hash fields. See above for explanation.
602 */
603 static final int MOVED = -1; // hash for forwarding nodes
604 static final int TREEBIN = -2; // hash for roots of trees
605 static final int RESERVED = -3; // hash for transient reservations
606 static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
607
608 /** Number of CPUS, to place bounds on some sizings */
609 static final int NCPU = Runtime.getRuntime().availableProcessors();
610
611 /** For serialization compatibility. */
612 private static final ObjectStreamField[] serialPersistentFields = {
613 new ObjectStreamField("segments", Segment[].class),
614 new ObjectStreamField("segmentMask", Integer.TYPE),
615 new ObjectStreamField("segmentShift", Integer.TYPE)
616 };
617
618 /* ---------------- Nodes -------------- */
619
620 /**
621 * Key-value entry. This class is never exported out as a
622 * user-mutable Map.Entry (i.e., one supporting setValue; see
623 * MapEntry below), but can be used for read-only traversals used
624 * in bulk tasks. Subclasses of Node with a negative hash field
625 * are special, and contain null keys and values (but are never
626 * exported). Otherwise, keys and vals are never null.
627 */
628 static class Node<K,V> implements Map.Entry<K,V> {
629 final int hash;
630 final K key;
631 volatile V val;
632 volatile Node<K,V> next;
633
634 Node(int hash, K key, V val, Node<K,V> next) {
635 this.hash = hash;
636 this.key = key;
637 this.val = val;
638 this.next = next;
639 }
640
641 public final K getKey() { return key; }
642 public final V getValue() { return val; }
643 public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
644 public final String toString(){ return key + "=" + val; }
645 public final V setValue(V value) {
646 throw new UnsupportedOperationException();
647 }
648
649 public final boolean equals(Object o) {
650 Object k, v, u; Map.Entry<?,?> e;
651 return ((o instanceof Map.Entry) &&
652 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
653 (v = e.getValue()) != null &&
654 (k == key || k.equals(key)) &&
655 (v == (u = val) || v.equals(u)));
656 }
657
658 /**
659 * Virtualized support for map.get(); overridden in subclasses.
660 */
661 Node<K,V> find(int h, Object k) {
662 Node<K,V> e = this;
663 if (k != null) {
664 do {
665 K ek;
666 if (e.hash == h &&
667 ((ek = e.key) == k || (ek != null && k.equals(ek))))
668 return e;
669 } while ((e = e.next) != null);
670 }
671 return null;
672 }
673 }
674
675 /* ---------------- Static utilities -------------- */
676
677 /**
678 * Spreads (XORs) higher bits of hash to lower and also forces top
679 * bit to 0. Because the table uses power-of-two masking, sets of
680 * hashes that vary only in bits above the current mask will
681 * always collide. (Among known examples are sets of Float keys
682 * holding consecutive whole numbers in small tables.) So we
683 * apply a transform that spreads the impact of higher bits
684 * downward. There is a tradeoff between speed, utility, and
685 * quality of bit-spreading. Because many common sets of hashes
686 * are already reasonably distributed (so don't benefit from
687 * spreading), and because we use trees to handle large sets of
688 * collisions in bins, we just XOR some shifted bits in the
689 * cheapest possible way to reduce systematic lossage, as well as
690 * to incorporate impact of the highest bits that would otherwise
691 * never be used in index calculations because of table bounds.
692 */
693 static final int spread(int h) {
694 return (h ^ (h >>> 16)) & HASH_BITS;
695 }
696
697 /**
698 * Returns a power of two table size for the given desired capacity.
699 * See Hackers Delight, sec 3.2
700 */
701 private static final int tableSizeFor(int c) {
702 int n = c - 1;
703 n |= n >>> 1;
704 n |= n >>> 2;
705 n |= n >>> 4;
706 n |= n >>> 8;
707 n |= n >>> 16;
708 return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
709 }
710
711 /**
712 * Returns x's Class if it is of the form "class C implements
713 * Comparable<C>", else null.
714 */
715 static Class<?> comparableClassFor(Object x) {
716 if (x instanceof Comparable) {
717 Class<?> c; Type[] ts, as; Type t; ParameterizedType p;
718 if ((c = x.getClass()) == String.class) // bypass checks
719 return c;
720 if ((ts = c.getGenericInterfaces()) != null) {
721 for (int i = 0; i < ts.length; ++i) {
722 if (((t = ts[i]) instanceof ParameterizedType) &&
723 ((p = (ParameterizedType)t).getRawType() ==
724 Comparable.class) &&
725 (as = p.getActualTypeArguments()) != null &&
726 as.length == 1 && as[0] == c) // type arg is c
727 return c;
728 }
729 }
730 }
731 return null;
732 }
733
734 /**
735 * Returns k.compareTo(x) if x matches kc (k's screened comparable
736 * class), else 0.
737 */
738 @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
739 static int compareComparables(Class<?> kc, Object k, Object x) {
740 return (x == null || x.getClass() != kc ? 0 :
741 ((Comparable)k).compareTo(x));
742 }
743
744 /* ---------------- Table element access -------------- */
745
746 /*
747 * Volatile access methods are used for table elements as well as
748 * elements of in-progress next table while resizing. All uses of
749 * the tab arguments must be null checked by callers. All callers
750 * also paranoically precheck that tab's length is not zero (or an
751 * equivalent check), thus ensuring that any index argument taking
752 * the form of a hash value anded with (length - 1) is a valid
753 * index. Note that, to be correct wrt arbitrary concurrency
754 * errors by users, these checks must operate on local variables,
755 * which accounts for some odd-looking inline assignments below.
756 * Note that calls to setTabAt always occur within locked regions,
757 * and so in principle require only release ordering, not
758 * full volatile semantics, but are currently coded as volatile
759 * writes to be conservative.
760 */
761
762 @SuppressWarnings("unchecked")
763 static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) {
764 return (Node<K,V>)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE);
765 }
766
767 static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i,
768 Node<K,V> c, Node<K,V> v) {
769 return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
770 }
771
772 static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) {
773 U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v);
774 }
775
776 /* ---------------- Fields -------------- */
777
778 /**
779 * The array of bins. Lazily initialized upon first insertion.
780 * Size is always a power of two. Accessed directly by iterators.
781 */
782 transient volatile Node<K,V>[] table;
783
784 /**
785 * The next table to use; non-null only while resizing.
786 */
787 private transient volatile Node<K,V>[] nextTable;
788
789 /**
790 * Base counter value, used mainly when there is no contention,
791 * but also as a fallback during table initialization
792 * races. Updated via CAS.
793 */
794 private transient volatile long baseCount;
795
796 /**
797 * Table initialization and resizing control. When negative, the
798 * table is being initialized or resized: -1 for initialization,
799 * else -(1 + the number of active resizing threads). Otherwise,
800 * when table is null, holds the initial table size to use upon
801 * creation, or 0 for default. After initialization, holds the
802 * next element count value upon which to resize the table.
803 */
804 private transient volatile int sizeCtl;
805
806 /**
807 * The next table index (plus one) to split while resizing.
808 */
809 private transient volatile int transferIndex;
810
811 /**
812 * Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
813 */
814 private transient volatile int cellsBusy;
815
816 /**
817 * Table of counter cells. When non-null, size is a power of 2.
818 */
819 private transient volatile CounterCell[] counterCells;
820
821 // views
822 private transient KeySetView<K,V> keySet;
823 private transient ValuesView<K,V> values;
824 private transient EntrySetView<K,V> entrySet;
825
826
827 /* ---------------- Public operations -------------- */
828
829 /**
830 * Creates a new, empty map with the default initial table size (16).
831 */
832 public ConcurrentHashMapV8() {
833 }
834
835 /**
836 * Creates a new, empty map with an initial table size
837 * accommodating the specified number of elements without the need
838 * to dynamically resize.
839 *
840 * @param initialCapacity The implementation performs internal
841 * sizing to accommodate this many elements.
842 * @throws IllegalArgumentException if the initial capacity of
843 * elements is negative
844 */
845 public ConcurrentHashMapV8(int initialCapacity) {
846 if (initialCapacity < 0)
847 throw new IllegalArgumentException();
848 int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
849 MAXIMUM_CAPACITY :
850 tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
851 this.sizeCtl = cap;
852 }
853
854 /**
855 * Creates a new map with the same mappings as the given map.
856 *
857 * @param m the map
858 */
859 public ConcurrentHashMapV8(Map<? extends K, ? extends V> m) {
860 this.sizeCtl = DEFAULT_CAPACITY;
861 putAll(m);
862 }
863
864 /**
865 * Creates a new, empty map with an initial table size based on
866 * the given number of elements ({@code initialCapacity}) and
867 * initial table density ({@code loadFactor}).
868 *
869 * @param initialCapacity the initial capacity. The implementation
870 * performs internal sizing to accommodate this many elements,
871 * given the specified load factor.
872 * @param loadFactor the load factor (table density) for
873 * establishing the initial table size
874 * @throws IllegalArgumentException if the initial capacity of
875 * elements is negative or the load factor is nonpositive
876 *
877 * @since 1.6
878 */
879 public ConcurrentHashMapV8(int initialCapacity, float loadFactor) {
880 this(initialCapacity, loadFactor, 1);
881 }
882
883 /**
884 * Creates a new, empty map with an initial table size based on
885 * the given number of elements ({@code initialCapacity}), table
886 * density ({@code loadFactor}), and number of concurrently
887 * updating threads ({@code concurrencyLevel}).
888 *
889 * @param initialCapacity the initial capacity. The implementation
890 * performs internal sizing to accommodate this many elements,
891 * given the specified load factor.
892 * @param loadFactor the load factor (table density) for
893 * establishing the initial table size
894 * @param concurrencyLevel the estimated number of concurrently
895 * updating threads. The implementation may use this value as
896 * a sizing hint.
897 * @throws IllegalArgumentException if the initial capacity is
898 * negative or the load factor or concurrencyLevel are
899 * nonpositive
900 */
901 public ConcurrentHashMapV8(int initialCapacity,
902 float loadFactor, int concurrencyLevel) {
903 if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
904 throw new IllegalArgumentException();
905 if (initialCapacity < concurrencyLevel) // Use at least as many bins
906 initialCapacity = concurrencyLevel; // as estimated threads
907 long size = (long)(1.0 + (long)initialCapacity / loadFactor);
908 int cap = (size >= (long)MAXIMUM_CAPACITY) ?
909 MAXIMUM_CAPACITY : tableSizeFor((int)size);
910 this.sizeCtl = cap;
911 }
912
913 // Original (since JDK1.2) Map methods
914
915 /**
916 * {@inheritDoc}
917 */
918 public int size() {
919 long n = sumCount();
920 return ((n < 0L) ? 0 :
921 (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
922 (int)n);
923 }
924
925 /**
926 * {@inheritDoc}
927 */
928 public boolean isEmpty() {
929 return sumCount() <= 0L; // ignore transient negative values
930 }
931
932 /**
933 * Returns the value to which the specified key is mapped,
934 * or {@code null} if this map contains no mapping for the key.
935 *
936 * <p>More formally, if this map contains a mapping from a key
937 * {@code k} to a value {@code v} such that {@code key.equals(k)},
938 * then this method returns {@code v}; otherwise it returns
939 * {@code null}. (There can be at most one such mapping.)
940 *
941 * @throws NullPointerException if the specified key is null
942 */
943 public V get(Object key) {
944 Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
945 int h = spread(key.hashCode());
946 if ((tab = table) != null && (n = tab.length) > 0 &&
947 (e = tabAt(tab, (n - 1) & h)) != null) {
948 if ((eh = e.hash) == h) {
949 if ((ek = e.key) == key || (ek != null && key.equals(ek)))
950 return e.val;
951 }
952 else if (eh < 0)
953 return (p = e.find(h, key)) != null ? p.val : null;
954 while ((e = e.next) != null) {
955 if (e.hash == h &&
956 ((ek = e.key) == key || (ek != null && key.equals(ek))))
957 return e.val;
958 }
959 }
960 return null;
961 }
962
963 /**
964 * Tests if the specified object is a key in this table.
965 *
966 * @param key possible key
967 * @return {@code true} if and only if the specified object
968 * is a key in this table, as determined by the
969 * {@code equals} method; {@code false} otherwise
970 * @throws NullPointerException if the specified key is null
971 */
972 public boolean containsKey(Object key) {
973 return get(key) != null;
974 }
975
976 /**
977 * Returns {@code true} if this map maps one or more keys to the
978 * specified value. Note: This method may require a full traversal
979 * of the map, and is much slower than method {@code containsKey}.
980 *
981 * @param value value whose presence in this map is to be tested
982 * @return {@code true} if this map maps one or more keys to the
983 * specified value
984 * @throws NullPointerException if the specified value is null
985 */
986 public boolean containsValue(Object value) {
987 if (value == null)
988 throw new NullPointerException();
989 Node<K,V>[] t;
990 if ((t = table) != null) {
991 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
992 for (Node<K,V> p; (p = it.advance()) != null; ) {
993 V v;
994 if ((v = p.val) == value || (v != null && value.equals(v)))
995 return true;
996 }
997 }
998 return false;
999 }
1000
1001 /**
1002 * Maps the specified key to the specified value in this table.
1003 * Neither the key nor the value can be null.
1004 *
1005 * <p>The value can be retrieved by calling the {@code get} method
1006 * with a key that is equal to the original key.
1007 *
1008 * @param key key with which the specified value is to be associated
1009 * @param value value to be associated with the specified key
1010 * @return the previous value associated with {@code key}, or
1011 * {@code null} if there was no mapping for {@code key}
1012 * @throws NullPointerException if the specified key or value is null
1013 */
1014 public V put(K key, V value) {
1015 return putVal(key, value, false);
1016 }
1017
1018 /** Implementation for put and putIfAbsent */
1019 final V putVal(K key, V value, boolean onlyIfAbsent) {
1020 if (key == null || value == null) throw new NullPointerException();
1021 int hash = spread(key.hashCode());
1022 int binCount = 0;
1023 for (Node<K,V>[] tab = table;;) {
1024 Node<K,V> f; int n, i, fh;
1025 if (tab == null || (n = tab.length) == 0)
1026 tab = initTable();
1027 else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
1028 if (casTabAt(tab, i, null,
1029 new Node<K,V>(hash, key, value, null)))
1030 break; // no lock when adding to empty bin
1031 }
1032 else if ((fh = f.hash) == MOVED)
1033 tab = helpTransfer(tab, f);
1034 else {
1035 V oldVal = null;
1036 synchronized (f) {
1037 if (tabAt(tab, i) == f) {
1038 if (fh >= 0) {
1039 binCount = 1;
1040 for (Node<K,V> e = f;; ++binCount) {
1041 K ek;
1042 if (e.hash == hash &&
1043 ((ek = e.key) == key ||
1044 (ek != null && key.equals(ek)))) {
1045 oldVal = e.val;
1046 if (!onlyIfAbsent)
1047 e.val = value;
1048 break;
1049 }
1050 Node<K,V> pred = e;
1051 if ((e = e.next) == null) {
1052 pred.next = new Node<K,V>(hash, key,
1053 value, null);
1054 break;
1055 }
1056 }
1057 }
1058 else if (f instanceof TreeBin) {
1059 Node<K,V> p;
1060 binCount = 2;
1061 if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
1062 value)) != null) {
1063 oldVal = p.val;
1064 if (!onlyIfAbsent)
1065 p.val = value;
1066 }
1067 }
1068 }
1069 }
1070 if (binCount != 0) {
1071 if (binCount >= TREEIFY_THRESHOLD)
1072 treeifyBin(tab, i);
1073 if (oldVal != null)
1074 return oldVal;
1075 break;
1076 }
1077 }
1078 }
1079 addCount(1L, binCount);
1080 return null;
1081 }
1082
1083 /**
1084 * Copies all of the mappings from the specified map to this one.
1085 * These mappings replace any mappings that this map had for any of the
1086 * keys currently in the specified map.
1087 *
1088 * @param m mappings to be stored in this map
1089 */
1090 public void putAll(Map<? extends K, ? extends V> m) {
1091 tryPresize(m.size());
1092 for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
1093 putVal(e.getKey(), e.getValue(), false);
1094 }
1095
1096 /**
1097 * Removes the key (and its corresponding value) from this map.
1098 * This method does nothing if the key is not in the map.
1099 *
1100 * @param key the key that needs to be removed
1101 * @return the previous value associated with {@code key}, or
1102 * {@code null} if there was no mapping for {@code key}
1103 * @throws NullPointerException if the specified key is null
1104 */
1105 public V remove(Object key) {
1106 return replaceNode(key, null, null);
1107 }
1108
1109 /**
1110 * Implementation for the four public remove/replace methods:
1111 * Replaces node value with v, conditional upon match of cv if
1112 * non-null. If resulting value is null, delete.
1113 */
1114 final V replaceNode(Object key, V value, Object cv) {
1115 int hash = spread(key.hashCode());
1116 for (Node<K,V>[] tab = table;;) {
1117 Node<K,V> f; int n, i, fh;
1118 if (tab == null || (n = tab.length) == 0 ||
1119 (f = tabAt(tab, i = (n - 1) & hash)) == null)
1120 break;
1121 else if ((fh = f.hash) == MOVED)
1122 tab = helpTransfer(tab, f);
1123 else {
1124 V oldVal = null;
1125 boolean validated = false;
1126 synchronized (f) {
1127 if (tabAt(tab, i) == f) {
1128 if (fh >= 0) {
1129 validated = true;
1130 for (Node<K,V> e = f, pred = null;;) {
1131 K ek;
1132 if (e.hash == hash &&
1133 ((ek = e.key) == key ||
1134 (ek != null && key.equals(ek)))) {
1135 V ev = e.val;
1136 if (cv == null || cv == ev ||
1137 (ev != null && cv.equals(ev))) {
1138 oldVal = ev;
1139 if (value != null)
1140 e.val = value;
1141 else if (pred != null)
1142 pred.next = e.next;
1143 else
1144 setTabAt(tab, i, e.next);
1145 }
1146 break;
1147 }
1148 pred = e;
1149 if ((e = e.next) == null)
1150 break;
1151 }
1152 }
1153 else if (f instanceof TreeBin) {
1154 validated = true;
1155 TreeBin<K,V> t = (TreeBin<K,V>)f;
1156 TreeNode<K,V> r, p;
1157 if ((r = t.root) != null &&
1158 (p = r.findTreeNode(hash, key, null)) != null) {
1159 V pv = p.val;
1160 if (cv == null || cv == pv ||
1161 (pv != null && cv.equals(pv))) {
1162 oldVal = pv;
1163 if (value != null)
1164 p.val = value;
1165 else if (t.removeTreeNode(p))
1166 setTabAt(tab, i, untreeify(t.first));
1167 }
1168 }
1169 }
1170 }
1171 }
1172 if (validated) {
1173 if (oldVal != null) {
1174 if (value == null)
1175 addCount(-1L, -1);
1176 return oldVal;
1177 }
1178 break;
1179 }
1180 }
1181 }
1182 return null;
1183 }
1184
1185 /**
1186 * Removes all of the mappings from this map.
1187 */
1188 public void clear() {
1189 long delta = 0L; // negative number of deletions
1190 int i = 0;
1191 Node<K,V>[] tab = table;
1192 while (tab != null && i < tab.length) {
1193 int fh;
1194 Node<K,V> f = tabAt(tab, i);
1195 if (f == null)
1196 ++i;
1197 else if ((fh = f.hash) == MOVED) {
1198 tab = helpTransfer(tab, f);
1199 i = 0; // restart
1200 }
1201 else {
1202 synchronized (f) {
1203 if (tabAt(tab, i) == f) {
1204 Node<K,V> p = (fh >= 0 ? f :
1205 (f instanceof TreeBin) ?
1206 ((TreeBin<K,V>)f).first : null);
1207 while (p != null) {
1208 --delta;
1209 p = p.next;
1210 }
1211 setTabAt(tab, i++, null);
1212 }
1213 }
1214 }
1215 }
1216 if (delta != 0L)
1217 addCount(delta, -1);
1218 }
1219
1220 /**
1221 * Returns a {@link Set} view of the keys contained in this map.
1222 * The set is backed by the map, so changes to the map are
1223 * reflected in the set, and vice-versa. The set supports element
1224 * removal, which removes the corresponding mapping from this map,
1225 * via the {@code Iterator.remove}, {@code Set.remove},
1226 * {@code removeAll}, {@code retainAll}, and {@code clear}
1227 * operations. It does not support the {@code add} or
1228 * {@code addAll} operations.
1229 *
1230 * <p>The view's {@code iterator} is a "weakly consistent" iterator
1231 * that will never throw {@link ConcurrentModificationException},
1232 * and guarantees to traverse elements as they existed upon
1233 * construction of the iterator, and may (but is not guaranteed to)
1234 * reflect any modifications subsequent to construction.
1235 *
1236 * @return the set view
1237 */
1238 public KeySetView<K,V> keySet() {
1239 KeySetView<K,V> ks;
1240 return (ks = keySet) != null ? ks : (keySet = new KeySetView<K,V>(this, null));
1241 }
1242
1243 /**
1244 * Returns a {@link Collection} view of the values contained in this map.
1245 * The collection is backed by the map, so changes to the map are
1246 * reflected in the collection, and vice-versa. The collection
1247 * supports element removal, which removes the corresponding
1248 * mapping from this map, via the {@code Iterator.remove},
1249 * {@code Collection.remove}, {@code removeAll},
1250 * {@code retainAll}, and {@code clear} operations. It does not
1251 * support the {@code add} or {@code addAll} operations.
1252 *
1253 * <p>The view's {@code iterator} is a "weakly consistent" iterator
1254 * that will never throw {@link ConcurrentModificationException},
1255 * and guarantees to traverse elements as they existed upon
1256 * construction of the iterator, and may (but is not guaranteed to)
1257 * reflect any modifications subsequent to construction.
1258 *
1259 * @return the collection view
1260 */
1261 public Collection<V> values() {
1262 ValuesView<K,V> vs;
1263 return (vs = values) != null ? vs : (values = new ValuesView<K,V>(this));
1264 }
1265
1266 /**
1267 * Returns a {@link Set} view of the mappings contained in this map.
1268 * The set is backed by the map, so changes to the map are
1269 * reflected in the set, and vice-versa. The set supports element
1270 * removal, which removes the corresponding mapping from the map,
1271 * via the {@code Iterator.remove}, {@code Set.remove},
1272 * {@code removeAll}, {@code retainAll}, and {@code clear}
1273 * operations.
1274 *
1275 * <p>The view's {@code iterator} is a "weakly consistent" iterator
1276 * that will never throw {@link ConcurrentModificationException},
1277 * and guarantees to traverse elements as they existed upon
1278 * construction of the iterator, and may (but is not guaranteed to)
1279 * reflect any modifications subsequent to construction.
1280 *
1281 * @return the set view
1282 */
1283 public Set<Map.Entry<K,V>> entrySet() {
1284 EntrySetView<K,V> es;
1285 return (es = entrySet) != null ? es : (entrySet = new EntrySetView<K,V>(this));
1286 }
1287
1288 /**
1289 * Returns the hash code value for this {@link Map}, i.e.,
1290 * the sum of, for each key-value pair in the map,
1291 * {@code key.hashCode() ^ value.hashCode()}.
1292 *
1293 * @return the hash code value for this map
1294 */
1295 public int hashCode() {
1296 int h = 0;
1297 Node<K,V>[] t;
1298 if ((t = table) != null) {
1299 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1300 for (Node<K,V> p; (p = it.advance()) != null; )
1301 h += p.key.hashCode() ^ p.val.hashCode();
1302 }
1303 return h;
1304 }
1305
1306 /**
1307 * Returns a string representation of this map. The string
1308 * representation consists of a list of key-value mappings (in no
1309 * particular order) enclosed in braces ("{@code {}}"). Adjacent
1310 * mappings are separated by the characters {@code ", "} (comma
1311 * and space). Each key-value mapping is rendered as the key
1312 * followed by an equals sign ("{@code =}") followed by the
1313 * associated value.
1314 *
1315 * @return a string representation of this map
1316 */
1317 public String toString() {
1318 Node<K,V>[] t;
1319 int f = (t = table) == null ? 0 : t.length;
1320 Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1321 StringBuilder sb = new StringBuilder();
1322 sb.append('{');
1323 Node<K,V> p;
1324 if ((p = it.advance()) != null) {
1325 for (;;) {
1326 K k = p.key;
1327 V v = p.val;
1328 sb.append(k == this ? "(this Map)" : k);
1329 sb.append('=');
1330 sb.append(v == this ? "(this Map)" : v);
1331 if ((p = it.advance()) == null)
1332 break;
1333 sb.append(',').append(' ');
1334 }
1335 }
1336 return sb.append('}').toString();
1337 }
1338
1339 /**
1340 * Compares the specified object with this map for equality.
1341 * Returns {@code true} if the given object is a map with the same
1342 * mappings as this map. This operation may return misleading
1343 * results if either map is concurrently modified during execution
1344 * of this method.
1345 *
1346 * @param o object to be compared for equality with this map
1347 * @return {@code true} if the specified object is equal to this map
1348 */
1349 public boolean equals(Object o) {
1350 if (o != this) {
1351 if (!(o instanceof Map))
1352 return false;
1353 Map<?,?> m = (Map<?,?>) o;
1354 Node<K,V>[] t;
1355 int f = (t = table) == null ? 0 : t.length;
1356 Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1357 for (Node<K,V> p; (p = it.advance()) != null; ) {
1358 V val = p.val;
1359 Object v = m.get(p.key);
1360 if (v == null || (v != val && !v.equals(val)))
1361 return false;
1362 }
1363 for (Map.Entry<?,?> e : m.entrySet()) {
1364 Object mk, mv, v;
1365 if ((mk = e.getKey()) == null ||
1366 (mv = e.getValue()) == null ||
1367 (v = get(mk)) == null ||
1368 (mv != v && !mv.equals(v)))
1369 return false;
1370 }
1371 }
1372 return true;
1373 }
1374
1375 /**
1376 * Stripped-down version of helper class used in previous version,
1377 * declared for the sake of serialization compatibility
1378 */
1379 static class Segment<K,V> extends ReentrantLock implements Serializable {
1380 private static final long serialVersionUID = 2249069246763182397L;
1381 final float loadFactor;
1382 Segment(float lf) { this.loadFactor = lf; }
1383 }
1384
1385 /**
1386 * Saves the state of the {@code ConcurrentHashMapV8} instance to a
1387 * stream (i.e., serializes it).
1388 * @param s the stream
1389 * @throws java.io.IOException if an I/O error occurs
1390 * @serialData
1391 * the key (Object) and value (Object)
1392 * for each key-value mapping, followed by a null pair.
1393 * The key-value mappings are emitted in no particular order.
1394 */
1395 private void writeObject(java.io.ObjectOutputStream s)
1396 throws java.io.IOException {
1397 // For serialization compatibility
1398 // Emulate segment calculation from previous version of this class
1399 int sshift = 0;
1400 int ssize = 1;
1401 while (ssize < DEFAULT_CONCURRENCY_LEVEL) {
1402 ++sshift;
1403 ssize <<= 1;
1404 }
1405 int segmentShift = 32 - sshift;
1406 int segmentMask = ssize - 1;
1407 @SuppressWarnings("unchecked") Segment<K,V>[] segments = (Segment<K,V>[])
1408 new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
1409 for (int i = 0; i < segments.length; ++i)
1410 segments[i] = new Segment<K,V>(LOAD_FACTOR);
1411 s.putFields().put("segments", segments);
1412 s.putFields().put("segmentShift", segmentShift);
1413 s.putFields().put("segmentMask", segmentMask);
1414 s.writeFields();
1415
1416 Node<K,V>[] t;
1417 if ((t = table) != null) {
1418 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1419 for (Node<K,V> p; (p = it.advance()) != null; ) {
1420 s.writeObject(p.key);
1421 s.writeObject(p.val);
1422 }
1423 }
1424 s.writeObject(null);
1425 s.writeObject(null);
1426 segments = null; // throw away
1427 }
1428
1429 /**
1430 * Reconstitutes the instance from a stream (that is, deserializes it).
1431 * @param s the stream
1432 * @throws ClassNotFoundException if the class of a serialized object
1433 * could not be found
1434 * @throws java.io.IOException if an I/O error occurs
1435 */
1436 private void readObject(java.io.ObjectInputStream s)
1437 throws java.io.IOException, ClassNotFoundException {
1438 /*
1439 * To improve performance in typical cases, we create nodes
1440 * while reading, then place in table once size is known.
1441 * However, we must also validate uniqueness and deal with
1442 * overpopulated bins while doing so, which requires
1443 * specialized versions of putVal mechanics.
1444 */
1445 sizeCtl = -1; // force exclusion for table construction
1446 s.defaultReadObject();
1447 long size = 0L;
1448 Node<K,V> p = null;
1449 for (;;) {
1450 @SuppressWarnings("unchecked") K k = (K) s.readObject();
1451 @SuppressWarnings("unchecked") V v = (V) s.readObject();
1452 if (k != null && v != null) {
1453 p = new Node<K,V>(spread(k.hashCode()), k, v, p);
1454 ++size;
1455 }
1456 else
1457 break;
1458 }
1459 if (size == 0L)
1460 sizeCtl = 0;
1461 else {
1462 int n;
1463 if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
1464 n = MAXIMUM_CAPACITY;
1465 else {
1466 int sz = (int)size;
1467 n = tableSizeFor(sz + (sz >>> 1) + 1);
1468 }
1469 @SuppressWarnings("unchecked")
1470 Node<K,V>[] tab = (Node<K,V>[])new Node<?,?>[n];
1471 int mask = n - 1;
1472 long added = 0L;
1473 while (p != null) {
1474 boolean insertAtFront;
1475 Node<K,V> next = p.next, first;
1476 int h = p.hash, j = h & mask;
1477 if ((first = tabAt(tab, j)) == null)
1478 insertAtFront = true;
1479 else {
1480 K k = p.key;
1481 if (first.hash < 0) {
1482 TreeBin<K,V> t = (TreeBin<K,V>)first;
1483 if (t.putTreeVal(h, k, p.val) == null)
1484 ++added;
1485 insertAtFront = false;
1486 }
1487 else {
1488 int binCount = 0;
1489 insertAtFront = true;
1490 Node<K,V> q; K qk;
1491 for (q = first; q != null; q = q.next) {
1492 if (q.hash == h &&
1493 ((qk = q.key) == k ||
1494 (qk != null && k.equals(qk)))) {
1495 insertAtFront = false;
1496 break;
1497 }
1498 ++binCount;
1499 }
1500 if (insertAtFront && binCount >= TREEIFY_THRESHOLD) {
1501 insertAtFront = false;
1502 ++added;
1503 p.next = first;
1504 TreeNode<K,V> hd = null, tl = null;
1505 for (q = p; q != null; q = q.next) {
1506 TreeNode<K,V> t = new TreeNode<K,V>
1507 (q.hash, q.key, q.val, null, null);
1508 if ((t.prev = tl) == null)
1509 hd = t;
1510 else
1511 tl.next = t;
1512 tl = t;
1513 }
1514 setTabAt(tab, j, new TreeBin<K,V>(hd));
1515 }
1516 }
1517 }
1518 if (insertAtFront) {
1519 ++added;
1520 p.next = first;
1521 setTabAt(tab, j, p);
1522 }
1523 p = next;
1524 }
1525 table = tab;
1526 sizeCtl = n - (n >>> 2);
1527 baseCount = added;
1528 }
1529 }
1530
1531 // ConcurrentMap methods
1532
1533 /**
1534 * {@inheritDoc}
1535 *
1536 * @return the previous value associated with the specified key,
1537 * or {@code null} if there was no mapping for the key
1538 * @throws NullPointerException if the specified key or value is null
1539 */
1540 public V putIfAbsent(K key, V value) {
1541 return putVal(key, value, true);
1542 }
1543
1544 /**
1545 * {@inheritDoc}
1546 *
1547 * @throws NullPointerException if the specified key is null
1548 */
1549 public boolean remove(Object key, Object value) {
1550 if (key == null)
1551 throw new NullPointerException();
1552 return value != null && replaceNode(key, null, value) != null;
1553 }
1554
1555 /**
1556 * {@inheritDoc}
1557 *
1558 * @throws NullPointerException if any of the arguments are null
1559 */
1560 public boolean replace(K key, V oldValue, V newValue) {
1561 if (key == null || oldValue == null || newValue == null)
1562 throw new NullPointerException();
1563 return replaceNode(key, newValue, oldValue) != null;
1564 }
1565
1566 /**
1567 * {@inheritDoc}
1568 *
1569 * @return the previous value associated with the specified key,
1570 * or {@code null} if there was no mapping for the key
1571 * @throws NullPointerException if the specified key or value is null
1572 */
1573 public V replace(K key, V value) {
1574 if (key == null || value == null)
1575 throw new NullPointerException();
1576 return replaceNode(key, value, null);
1577 }
1578
1579 // Overrides of JDK8+ Map extension method defaults
1580
1581 /**
1582 * Returns the value to which the specified key is mapped, or the
1583 * given default value if this map contains no mapping for the
1584 * key.
1585 *
1586 * @param key the key whose associated value is to be returned
1587 * @param defaultValue the value to return if this map contains
1588 * no mapping for the given key
1589 * @return the mapping for the key, if present; else the default value
1590 * @throws NullPointerException if the specified key is null
1591 */
1592 public V getOrDefault(Object key, V defaultValue) {
1593 V v;
1594 return (v = get(key)) == null ? defaultValue : v;
1595 }
1596
1597 public void forEach(BiAction<? super K, ? super V> action) {
1598 if (action == null) throw new NullPointerException();
1599 Node<K,V>[] t;
1600 if ((t = table) != null) {
1601 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1602 for (Node<K,V> p; (p = it.advance()) != null; ) {
1603 action.apply(p.key, p.val);
1604 }
1605 }
1606 }
1607
1608 public void replaceAll(BiFun<? super K, ? super V, ? extends V> function) {
1609 if (function == null) throw new NullPointerException();
1610 Node<K,V>[] t;
1611 if ((t = table) != null) {
1612 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1613 for (Node<K,V> p; (p = it.advance()) != null; ) {
1614 V oldValue = p.val;
1615 for (K key = p.key;;) {
1616 V newValue = function.apply(key, oldValue);
1617 if (newValue == null)
1618 throw new NullPointerException();
1619 if (replaceNode(key, newValue, oldValue) != null ||
1620 (oldValue = get(key)) == null)
1621 break;
1622 }
1623 }
1624 }
1625 }
1626
1627 /**
1628 * If the specified key is not already associated with a value,
1629 * attempts to compute its value using the given mapping function
1630 * and enters it into this map unless {@code null}. The entire
1631 * method invocation is performed atomically, so the function is
1632 * applied at most once per key. Some attempted update operations
1633 * on this map by other threads may be blocked while computation
1634 * is in progress, so the computation should be short and simple,
1635 * and must not attempt to update any other mappings of this map.
1636 *
1637 * @param key key with which the specified value is to be associated
1638 * @param mappingFunction the function to compute a value
1639 * @return the current (existing or computed) value associated with
1640 * the specified key, or null if the computed value is null
1641 * @throws NullPointerException if the specified key or mappingFunction
1642 * is null
1643 * @throws IllegalStateException if the computation detectably
1644 * attempts a recursive update to this map that would
1645 * otherwise never complete
1646 * @throws RuntimeException or Error if the mappingFunction does so,
1647 * in which case the mapping is left unestablished
1648 */
1649 public V computeIfAbsent(K key, Fun<? super K, ? extends V> mappingFunction) {
1650 if (key == null || mappingFunction == null)
1651 throw new NullPointerException();
1652 int h = spread(key.hashCode());
1653 V val = null;
1654 int binCount = 0;
1655 for (Node<K,V>[] tab = table;;) {
1656 Node<K,V> f; int n, i, fh;
1657 if (tab == null || (n = tab.length) == 0)
1658 tab = initTable();
1659 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1660 Node<K,V> r = new ReservationNode<K,V>();
1661 synchronized (r) {
1662 if (casTabAt(tab, i, null, r)) {
1663 binCount = 1;
1664 Node<K,V> node = null;
1665 try {
1666 if ((val = mappingFunction.apply(key)) != null)
1667 node = new Node<K,V>(h, key, val, null);
1668 } finally {
1669 setTabAt(tab, i, node);
1670 }
1671 }
1672 }
1673 if (binCount != 0)
1674 break;
1675 }
1676 else if ((fh = f.hash) == MOVED)
1677 tab = helpTransfer(tab, f);
1678 else {
1679 boolean added = false;
1680 synchronized (f) {
1681 if (tabAt(tab, i) == f) {
1682 if (fh >= 0) {
1683 binCount = 1;
1684 for (Node<K,V> e = f;; ++binCount) {
1685 K ek; V ev;
1686 if (e.hash == h &&
1687 ((ek = e.key) == key ||
1688 (ek != null && key.equals(ek)))) {
1689 val = e.val;
1690 break;
1691 }
1692 Node<K,V> pred = e;
1693 if ((e = e.next) == null) {
1694 if ((val = mappingFunction.apply(key)) != null) {
1695 added = true;
1696 pred.next = new Node<K,V>(h, key, val, null);
1697 }
1698 break;
1699 }
1700 }
1701 }
1702 else if (f instanceof TreeBin) {
1703 binCount = 2;
1704 TreeBin<K,V> t = (TreeBin<K,V>)f;
1705 TreeNode<K,V> r, p;
1706 if ((r = t.root) != null &&
1707 (p = r.findTreeNode(h, key, null)) != null)
1708 val = p.val;
1709 else if ((val = mappingFunction.apply(key)) != null) {
1710 added = true;
1711 t.putTreeVal(h, key, val);
1712 }
1713 }
1714 }
1715 }
1716 if (binCount != 0) {
1717 if (binCount >= TREEIFY_THRESHOLD)
1718 treeifyBin(tab, i);
1719 if (!added)
1720 return val;
1721 break;
1722 }
1723 }
1724 }
1725 if (val != null)
1726 addCount(1L, binCount);
1727 return val;
1728 }
1729
1730 /**
1731 * If the value for the specified key is present, attempts to
1732 * compute a new mapping given the key and its current mapped
1733 * value. The entire method invocation is performed atomically.
1734 * Some attempted update operations on this map by other threads
1735 * may be blocked while computation is in progress, so the
1736 * computation should be short and simple, and must not attempt to
1737 * update any other mappings of this map.
1738 *
1739 * @param key key with which a value may be associated
1740 * @param remappingFunction the function to compute a value
1741 * @return the new value associated with the specified key, or null if none
1742 * @throws NullPointerException if the specified key or remappingFunction
1743 * is null
1744 * @throws IllegalStateException if the computation detectably
1745 * attempts a recursive update to this map that would
1746 * otherwise never complete
1747 * @throws RuntimeException or Error if the remappingFunction does so,
1748 * in which case the mapping is unchanged
1749 */
1750 public V computeIfPresent(K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
1751 if (key == null || remappingFunction == null)
1752 throw new NullPointerException();
1753 int h = spread(key.hashCode());
1754 V val = null;
1755 int delta = 0;
1756 int binCount = 0;
1757 for (Node<K,V>[] tab = table;;) {
1758 Node<K,V> f; int n, i, fh;
1759 if (tab == null || (n = tab.length) == 0)
1760 tab = initTable();
1761 else if ((f = tabAt(tab, i = (n - 1) & h)) == null)
1762 break;
1763 else if ((fh = f.hash) == MOVED)
1764 tab = helpTransfer(tab, f);
1765 else {
1766 synchronized (f) {
1767 if (tabAt(tab, i) == f) {
1768 if (fh >= 0) {
1769 binCount = 1;
1770 for (Node<K,V> e = f, pred = null;; ++binCount) {
1771 K ek;
1772 if (e.hash == h &&
1773 ((ek = e.key) == key ||
1774 (ek != null && key.equals(ek)))) {
1775 val = remappingFunction.apply(key, e.val);
1776 if (val != null)
1777 e.val = val;
1778 else {
1779 delta = -1;
1780 Node<K,V> en = e.next;
1781 if (pred != null)
1782 pred.next = en;
1783 else
1784 setTabAt(tab, i, en);
1785 }
1786 break;
1787 }
1788 pred = e;
1789 if ((e = e.next) == null)
1790 break;
1791 }
1792 }
1793 else if (f instanceof TreeBin) {
1794 binCount = 2;
1795 TreeBin<K,V> t = (TreeBin<K,V>)f;
1796 TreeNode<K,V> r, p;
1797 if ((r = t.root) != null &&
1798 (p = r.findTreeNode(h, key, null)) != null) {
1799 val = remappingFunction.apply(key, p.val);
1800 if (val != null)
1801 p.val = val;
1802 else {
1803 delta = -1;
1804 if (t.removeTreeNode(p))
1805 setTabAt(tab, i, untreeify(t.first));
1806 }
1807 }
1808 }
1809 }
1810 }
1811 if (binCount != 0)
1812 break;
1813 }
1814 }
1815 if (delta != 0)
1816 addCount((long)delta, binCount);
1817 return val;
1818 }
1819
1820 /**
1821 * Attempts to compute a mapping for the specified key and its
1822 * current mapped value (or {@code null} if there is no current
1823 * mapping). The entire method invocation is performed atomically.
1824 * Some attempted update operations on this map by other threads
1825 * may be blocked while computation is in progress, so the
1826 * computation should be short and simple, and must not attempt to
1827 * update any other mappings of this Map.
1828 *
1829 * @param key key with which the specified value is to be associated
1830 * @param remappingFunction the function to compute a value
1831 * @return the new value associated with the specified key, or null if none
1832 * @throws NullPointerException if the specified key or remappingFunction
1833 * is null
1834 * @throws IllegalStateException if the computation detectably
1835 * attempts a recursive update to this map that would
1836 * otherwise never complete
1837 * @throws RuntimeException or Error if the remappingFunction does so,
1838 * in which case the mapping is unchanged
1839 */
1840 public V compute(K key,
1841 BiFun<? super K, ? super V, ? extends V> remappingFunction) {
1842 if (key == null || remappingFunction == null)
1843 throw new NullPointerException();
1844 int h = spread(key.hashCode());
1845 V val = null;
1846 int delta = 0;
1847 int binCount = 0;
1848 for (Node<K,V>[] tab = table;;) {
1849 Node<K,V> f; int n, i, fh;
1850 if (tab == null || (n = tab.length) == 0)
1851 tab = initTable();
1852 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1853 Node<K,V> r = new ReservationNode<K,V>();
1854 synchronized (r) {
1855 if (casTabAt(tab, i, null, r)) {
1856 binCount = 1;
1857 Node<K,V> node = null;
1858 try {
1859 if ((val = remappingFunction.apply(key, null)) != null) {
1860 delta = 1;
1861 node = new Node<K,V>(h, key, val, null);
1862 }
1863 } finally {
1864 setTabAt(tab, i, node);
1865 }
1866 }
1867 }
1868 if (binCount != 0)
1869 break;
1870 }
1871 else if ((fh = f.hash) == MOVED)
1872 tab = helpTransfer(tab, f);
1873 else {
1874 synchronized (f) {
1875 if (tabAt(tab, i) == f) {
1876 if (fh >= 0) {
1877 binCount = 1;
1878 for (Node<K,V> e = f, pred = null;; ++binCount) {
1879 K ek;
1880 if (e.hash == h &&
1881 ((ek = e.key) == key ||
1882 (ek != null && key.equals(ek)))) {
1883 val = remappingFunction.apply(key, e.val);
1884 if (val != null)
1885 e.val = val;
1886 else {
1887 delta = -1;
1888 Node<K,V> en = e.next;
1889 if (pred != null)
1890 pred.next = en;
1891 else
1892 setTabAt(tab, i, en);
1893 }
1894 break;
1895 }
1896 pred = e;
1897 if ((e = e.next) == null) {
1898 val = remappingFunction.apply(key, null);
1899 if (val != null) {
1900 delta = 1;
1901 pred.next =
1902 new Node<K,V>(h, key, val, null);
1903 }
1904 break;
1905 }
1906 }
1907 }
1908 else if (f instanceof TreeBin) {
1909 binCount = 1;
1910 TreeBin<K,V> t = (TreeBin<K,V>)f;
1911 TreeNode<K,V> r, p;
1912 if ((r = t.root) != null)
1913 p = r.findTreeNode(h, key, null);
1914 else
1915 p = null;
1916 V pv = (p == null) ? null : p.val;
1917 val = remappingFunction.apply(key, pv);
1918 if (val != null) {
1919 if (p != null)
1920 p.val = val;
1921 else {
1922 delta = 1;
1923 t.putTreeVal(h, key, val);
1924 }
1925 }
1926 else if (p != null) {
1927 delta = -1;
1928 if (t.removeTreeNode(p))
1929 setTabAt(tab, i, untreeify(t.first));
1930 }
1931 }
1932 }
1933 }
1934 if (binCount != 0) {
1935 if (binCount >= TREEIFY_THRESHOLD)
1936 treeifyBin(tab, i);
1937 break;
1938 }
1939 }
1940 }
1941 if (delta != 0)
1942 addCount((long)delta, binCount);
1943 return val;
1944 }
1945
1946 /**
1947 * If the specified key is not already associated with a
1948 * (non-null) value, associates it with the given value.
1949 * Otherwise, replaces the value with the results of the given
1950 * remapping function, or removes if {@code null}. The entire
1951 * method invocation is performed atomically. Some attempted
1952 * update operations on this map by other threads may be blocked
1953 * while computation is in progress, so the computation should be
1954 * short and simple, and must not attempt to update any other
1955 * mappings of this Map.
1956 *
1957 * @param key key with which the specified value is to be associated
1958 * @param value the value to use if absent
1959 * @param remappingFunction the function to recompute a value if present
1960 * @return the new value associated with the specified key, or null if none
1961 * @throws NullPointerException if the specified key or the
1962 * remappingFunction is null
1963 * @throws RuntimeException or Error if the remappingFunction does so,
1964 * in which case the mapping is unchanged
1965 */
1966 public V merge(K key, V value, BiFun<? super V, ? super V, ? extends V> remappingFunction) {
1967 if (key == null || value == null || remappingFunction == null)
1968 throw new NullPointerException();
1969 int h = spread(key.hashCode());
1970 V val = null;
1971 int delta = 0;
1972 int binCount = 0;
1973 for (Node<K,V>[] tab = table;;) {
1974 Node<K,V> f; int n, i, fh;
1975 if (tab == null || (n = tab.length) == 0)
1976 tab = initTable();
1977 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1978 if (casTabAt(tab, i, null, new Node<K,V>(h, key, value, null))) {
1979 delta = 1;
1980 val = value;
1981 break;
1982 }
1983 }
1984 else if ((fh = f.hash) == MOVED)
1985 tab = helpTransfer(tab, f);
1986 else {
1987 synchronized (f) {
1988 if (tabAt(tab, i) == f) {
1989 if (fh >= 0) {
1990 binCount = 1;
1991 for (Node<K,V> e = f, pred = null;; ++binCount) {
1992 K ek;
1993 if (e.hash == h &&
1994 ((ek = e.key) == key ||
1995 (ek != null && key.equals(ek)))) {
1996 val = remappingFunction.apply(e.val, value);
1997 if (val != null)
1998 e.val = val;
1999 else {
2000 delta = -1;
2001 Node<K,V> en = e.next;
2002 if (pred != null)
2003 pred.next = en;
2004 else
2005 setTabAt(tab, i, en);
2006 }
2007 break;
2008 }
2009 pred = e;
2010 if ((e = e.next) == null) {
2011 delta = 1;
2012 val = value;
2013 pred.next =
2014 new Node<K,V>(h, key, val, null);
2015 break;
2016 }
2017 }
2018 }
2019 else if (f instanceof TreeBin) {
2020 binCount = 2;
2021 TreeBin<K,V> t = (TreeBin<K,V>)f;
2022 TreeNode<K,V> r = t.root;
2023 TreeNode<K,V> p = (r == null) ? null :
2024 r.findTreeNode(h, key, null);
2025 val = (p == null) ? value :
2026 remappingFunction.apply(p.val, value);
2027 if (val != null) {
2028 if (p != null)
2029 p.val = val;
2030 else {
2031 delta = 1;
2032 t.putTreeVal(h, key, val);
2033 }
2034 }
2035 else if (p != null) {
2036 delta = -1;
2037 if (t.removeTreeNode(p))
2038 setTabAt(tab, i, untreeify(t.first));
2039 }
2040 }
2041 }
2042 }
2043 if (binCount != 0) {
2044 if (binCount >= TREEIFY_THRESHOLD)
2045 treeifyBin(tab, i);
2046 break;
2047 }
2048 }
2049 }
2050 if (delta != 0)
2051 addCount((long)delta, binCount);
2052 return val;
2053 }
2054
2055 // Hashtable legacy methods
2056
2057 /**
2058 * Legacy method testing if some key maps into the specified value
2059 * in this table. This method is identical in functionality to
2060 * {@link #containsValue(Object)}, and exists solely to ensure
2061 * full compatibility with class {@link java.util.Hashtable},
2062 * which supported this method prior to introduction of the
2063 * Java Collections framework.
2064 *
2065 * @param value a value to search for
2066 * @return {@code true} if and only if some key maps to the
2067 * {@code value} argument in this table as
2068 * determined by the {@code equals} method;
2069 * {@code false} otherwise
2070 * @throws NullPointerException if the specified value is null
2071 */
2072 @Deprecated public boolean contains(Object value) {
2073 return containsValue(value);
2074 }
2075
2076 /**
2077 * Returns an enumeration of the keys in this table.
2078 *
2079 * @return an enumeration of the keys in this table
2080 * @see #keySet()
2081 */
2082 public Enumeration<K> keys() {
2083 Node<K,V>[] t;
2084 int f = (t = table) == null ? 0 : t.length;
2085 return new KeyIterator<K,V>(t, f, 0, f, this);
2086 }
2087
2088 /**
2089 * Returns an enumeration of the values in this table.
2090 *
2091 * @return an enumeration of the values in this table
2092 * @see #values()
2093 */
2094 public Enumeration<V> elements() {
2095 Node<K,V>[] t;
2096 int f = (t = table) == null ? 0 : t.length;
2097 return new ValueIterator<K,V>(t, f, 0, f, this);
2098 }
2099
2100 // ConcurrentHashMapV8-only methods
2101
2102 /**
2103 * Returns the number of mappings. This method should be used
2104 * instead of {@link #size} because a ConcurrentHashMapV8 may
2105 * contain more mappings than can be represented as an int. The
2106 * value returned is an estimate; the actual count may differ if
2107 * there are concurrent insertions or removals.
2108 *
2109 * @return the number of mappings
2110 * @since 1.8
2111 */
2112 public long mappingCount() {
2113 long n = sumCount();
2114 return (n < 0L) ? 0L : n; // ignore transient negative values
2115 }
2116
2117 /**
2118 * Creates a new {@link Set} backed by a ConcurrentHashMapV8
2119 * from the given type to {@code Boolean.TRUE}.
2120 *
2121 * @return the new set
2122 * @since 1.8
2123 */
2124 public static <K> KeySetView<K,Boolean> newKeySet() {
2125 return new KeySetView<K,Boolean>
2126 (new ConcurrentHashMapV8<K,Boolean>(), Boolean.TRUE);
2127 }
2128
2129 /**
2130 * Creates a new {@link Set} backed by a ConcurrentHashMapV8
2131 * from the given type to {@code Boolean.TRUE}.
2132 *
2133 * @param initialCapacity The implementation performs internal
2134 * sizing to accommodate this many elements.
2135 * @return the new set
2136 * @throws IllegalArgumentException if the initial capacity of
2137 * elements is negative
2138 * @since 1.8
2139 */
2140 public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2141 return new KeySetView<K,Boolean>
2142 (new ConcurrentHashMapV8<K,Boolean>(initialCapacity), Boolean.TRUE);
2143 }
2144
2145 /**
2146 * Returns a {@link Set} view of the keys in this map, using the
2147 * given common mapped value for any additions (i.e., {@link
2148 * Collection#add} and {@link Collection#addAll(Collection)}).
2149 * This is of course only appropriate if it is acceptable to use
2150 * the same value for all additions from this view.
2151 *
2152 * @param mappedValue the mapped value to use for any additions
2153 * @return the set view
2154 * @throws NullPointerException if the mappedValue is null
2155 */
2156 public KeySetView<K,V> keySet(V mappedValue) {
2157 if (mappedValue == null)
2158 throw new NullPointerException();
2159 return new KeySetView<K,V>(this, mappedValue);
2160 }
2161
2162 /* ---------------- Special Nodes -------------- */
2163
2164 /**
2165 * A node inserted at head of bins during transfer operations.
2166 */
2167 static final class ForwardingNode<K,V> extends Node<K,V> {
2168 final Node<K,V>[] nextTable;
2169 ForwardingNode(Node<K,V>[] tab) {
2170 super(MOVED, null, null, null);
2171 this.nextTable = tab;
2172 }
2173
2174 Node<K,V> find(int h, Object k) {
2175 // loop to avoid arbitrarily deep recursion on forwarding nodes
2176 outer: for (Node<K,V>[] tab = nextTable;;) {
2177 Node<K,V> e; int n;
2178 if (k == null || tab == null || (n = tab.length) == 0 ||
2179 (e = tabAt(tab, (n - 1) & h)) == null)
2180 return null;
2181 for (;;) {
2182 int eh; K ek;
2183 if ((eh = e.hash) == h &&
2184 ((ek = e.key) == k || (ek != null && k.equals(ek))))
2185 return e;
2186 if (eh < 0) {
2187 if (e instanceof ForwardingNode) {
2188 tab = ((ForwardingNode<K,V>)e).nextTable;
2189 continue outer;
2190 }
2191 else
2192 return e.find(h, k);
2193 }
2194 if ((e = e.next) == null)
2195 return null;
2196 }
2197 }
2198 }
2199 }
2200
2201 /**
2202 * A place-holder node used in computeIfAbsent and compute
2203 */
2204 static final class ReservationNode<K,V> extends Node<K,V> {
2205 ReservationNode() {
2206 super(RESERVED, null, null, null);
2207 }
2208
2209 Node<K,V> find(int h, Object k) {
2210 return null;
2211 }
2212 }
2213
2214 /* ---------------- Table Initialization and Resizing -------------- */
2215
2216 /**
2217 * Returns the stamp bits for resizing a table of size n.
2218 * Must be negative when shifted left by RESIZE_STAMP_SHIFT.
2219 */
2220 static final int resizeStamp(int n) {
2221 return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1));
2222 }
2223
2224 /**
2225 * Initializes table, using the size recorded in sizeCtl.
2226 */
2227 private final Node<K,V>[] initTable() {
2228 Node<K,V>[] tab; int sc;
2229 while ((tab = table) == null || tab.length == 0) {
2230 if ((sc = sizeCtl) < 0)
2231 Thread.yield(); // lost initialization race; just spin
2232 else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
2233 try {
2234 if ((tab = table) == null || tab.length == 0) {
2235 int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
2236 @SuppressWarnings("unchecked")
2237 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
2238 table = tab = nt;
2239 sc = n - (n >>> 2);
2240 }
2241 } finally {
2242 sizeCtl = sc;
2243 }
2244 break;
2245 }
2246 }
2247 return tab;
2248 }
2249
2250 /**
2251 * Adds to count, and if table is too small and not already
2252 * resizing, initiates transfer. If already resizing, helps
2253 * perform transfer if work is available. Rechecks occupancy
2254 * after a transfer to see if another resize is already needed
2255 * because resizings are lagging additions.
2256 *
2257 * @param x the count to add
2258 * @param check if <0, don't check resize, if <= 1 only check if uncontended
2259 */
2260 private final void addCount(long x, int check) {
2261 CounterCell[] as; long b, s;
2262 if ((as = counterCells) != null ||
2263 !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
2264 CounterHashCode hc; CounterCell a; long v; int m;
2265 boolean uncontended = true;
2266 if ((hc = threadCounterHashCode.get()) == null ||
2267 as == null || (m = as.length - 1) < 0 ||
2268 (a = as[m & hc.code]) == null ||
2269 !(uncontended =
2270 U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
2271 fullAddCount(x, hc, uncontended);
2272 return;
2273 }
2274 if (check <= 1)
2275 return;
2276 s = sumCount();
2277 }
2278 if (check >= 0) {
2279 Node<K,V>[] tab, nt; int n, sc;
2280 while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
2281 (n = tab.length) < MAXIMUM_CAPACITY) {
2282 int rs = resizeStamp(n);
2283 if (sc < 0) {
2284 if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
2285 sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
2286 transferIndex <= 0)
2287 break;
2288 if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
2289 transfer(tab, nt);
2290 }
2291 else if (U.compareAndSwapInt(this, SIZECTL, sc,
2292 (rs << RESIZE_STAMP_SHIFT) + 2))
2293 transfer(tab, null);
2294 s = sumCount();
2295 }
2296 }
2297 }
2298
2299 /**
2300 * Helps transfer if a resize is in progress.
2301 */
2302 final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) {
2303 Node<K,V>[] nextTab; int sc;
2304 if (tab != null && (f instanceof ForwardingNode) &&
2305 (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) {
2306 int rs = resizeStamp(tab.length);
2307 while (nextTab == nextTable && table == tab &&
2308 (sc = sizeCtl) < 0) {
2309 if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
2310 sc == rs + MAX_RESIZERS || transferIndex <= 0)
2311 break;
2312 if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) {
2313 transfer(tab, nextTab);
2314 break;
2315 }
2316 }
2317 return nextTab;
2318 }
2319 return table;
2320 }
2321
2322 /**
2323 * Tries to presize table to accommodate the given number of elements.
2324 *
2325 * @param size number of elements (doesn't need to be perfectly accurate)
2326 */
2327 private final void tryPresize(int size) {
2328 int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
2329 tableSizeFor(size + (size >>> 1) + 1);
2330 int sc;
2331 while ((sc = sizeCtl) >= 0) {
2332 Node<K,V>[] tab = table; int n;
2333 if (tab == null || (n = tab.length) == 0) {
2334 n = (sc > c) ? sc : c;
2335 if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
2336 try {
2337 if (table == tab) {
2338 @SuppressWarnings("unchecked")
2339 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
2340 table = nt;
2341 sc = n - (n >>> 2);
2342 }
2343 } finally {
2344 sizeCtl = sc;
2345 }
2346 }
2347 }
2348 else if (c <= sc || n >= MAXIMUM_CAPACITY)
2349 break;
2350 else if (tab == table) {
2351 int rs = resizeStamp(n);
2352 if (sc < 0) {
2353 Node<K,V>[] nt;
2354 if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
2355 sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
2356 transferIndex <= 0)
2357 break;
2358 if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
2359 transfer(tab, nt);
2360 }
2361 else if (U.compareAndSwapInt(this, SIZECTL, sc,
2362 (rs << RESIZE_STAMP_SHIFT) + 2))
2363 transfer(tab, null);
2364 }
2365 }
2366 }
2367
2368 /**
2369 * Moves and/or copies the nodes in each bin to new table. See
2370 * above for explanation.
2371 */
2372 private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
2373 int n = tab.length, stride;
2374 if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
2375 stride = MIN_TRANSFER_STRIDE; // subdivide range
2376 if (nextTab == null) { // initiating
2377 try {
2378 @SuppressWarnings("unchecked")
2379 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1];
2380 nextTab = nt;
2381 } catch (Throwable ex) { // try to cope with OOME
2382 sizeCtl = Integer.MAX_VALUE;
2383 return;
2384 }
2385 nextTable = nextTab;
2386 transferIndex = n;
2387 }
2388 int nextn = nextTab.length;
2389 ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
2390 boolean advance = true;
2391 boolean finishing = false; // to ensure sweep before committing nextTab
2392 for (int i = 0, bound = 0;;) {
2393 Node<K,V> f; int fh;
2394 while (advance) {
2395 int nextIndex, nextBound;
2396 if (--i >= bound || finishing)
2397 advance = false;
2398 else if ((nextIndex = transferIndex) <= 0) {
2399 i = -1;
2400 advance = false;
2401 }
2402 else if (U.compareAndSwapInt
2403 (this, TRANSFERINDEX, nextIndex,
2404 nextBound = (nextIndex > stride ?
2405 nextIndex - stride : 0))) {
2406 bound = nextBound;
2407 i = nextIndex - 1;
2408 advance = false;
2409 }
2410 }
2411 if (i < 0 || i >= n || i + n >= nextn) {
2412 int sc;
2413 if (finishing) {
2414 nextTable = null;
2415 table = nextTab;
2416 sizeCtl = (n << 1) - (n >>> 1);
2417 return;
2418 }
2419 if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
2420 if ((sc - 2) != resizeStamp(n))
2421 return;
2422 finishing = advance = true;
2423 i = n; // recheck before commit
2424 }
2425 }
2426 else if ((f = tabAt(tab, i)) == null)
2427 advance = casTabAt(tab, i, null, fwd);
2428 else if ((fh = f.hash) == MOVED)
2429 advance = true; // already processed
2430 else {
2431 synchronized (f) {
2432 if (tabAt(tab, i) == f) {
2433 Node<K,V> ln, hn;
2434 if (fh >= 0) {
2435 int runBit = fh & n;
2436 Node<K,V> lastRun = f;
2437 for (Node<K,V> p = f.next; p != null; p = p.next) {
2438 int b = p.hash & n;
2439 if (b != runBit) {
2440 runBit = b;
2441 lastRun = p;
2442 }
2443 }
2444 if (runBit == 0) {
2445 ln = lastRun;
2446 hn = null;
2447 }
2448 else {
2449 hn = lastRun;
2450 ln = null;
2451 }
2452 for (Node<K,V> p = f; p != lastRun; p = p.next) {
2453 int ph = p.hash; K pk = p.key; V pv = p.val;
2454 if ((ph & n) == 0)
2455 ln = new Node<K,V>(ph, pk, pv, ln);
2456 else
2457 hn = new Node<K,V>(ph, pk, pv, hn);
2458 }
2459 setTabAt(nextTab, i, ln);
2460 setTabAt(nextTab, i + n, hn);
2461 setTabAt(tab, i, fwd);
2462 advance = true;
2463 }
2464 else if (f instanceof TreeBin) {
2465 TreeBin<K,V> t = (TreeBin<K,V>)f;
2466 TreeNode<K,V> lo = null, loTail = null;
2467 TreeNode<K,V> hi = null, hiTail = null;
2468 int lc = 0, hc = 0;
2469 for (Node<K,V> e = t.first; e != null; e = e.next) {
2470 int h = e.hash;
2471 TreeNode<K,V> p = new TreeNode<K,V>
2472 (h, e.key, e.val, null, null);
2473 if ((h & n) == 0) {
2474 if ((p.prev = loTail) == null)
2475 lo = p;
2476 else
2477 loTail.next = p;
2478 loTail = p;
2479 ++lc;
2480 }
2481 else {
2482 if ((p.prev = hiTail) == null)
2483 hi = p;
2484 else
2485 hiTail.next = p;
2486 hiTail = p;
2487 ++hc;
2488 }
2489 }
2490 ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
2491 (hc != 0) ? new TreeBin<K,V>(lo) : t;
2492 hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
2493 (lc != 0) ? new TreeBin<K,V>(hi) : t;
2494 setTabAt(nextTab, i, ln);
2495 setTabAt(nextTab, i + n, hn);
2496 setTabAt(tab, i, fwd);
2497 advance = true;
2498 }
2499 }
2500 }
2501 }
2502 }
2503 }
2504
2505 /* ---------------- Conversion from/to TreeBins -------------- */
2506
2507 /**
2508 * Replaces all linked nodes in bin at given index unless table is
2509 * too small, in which case resizes instead.
2510 */
2511 private final void treeifyBin(Node<K,V>[] tab, int index) {
2512 Node<K,V> b; int n, sc;
2513 if (tab != null) {
2514 if ((n = tab.length) < MIN_TREEIFY_CAPACITY)
2515 tryPresize(n << 1);
2516 else if ((b = tabAt(tab, index)) != null && b.hash >= 0) {
2517 synchronized (b) {
2518 if (tabAt(tab, index) == b) {
2519 TreeNode<K,V> hd = null, tl = null;
2520 for (Node<K,V> e = b; e != null; e = e.next) {
2521 TreeNode<K,V> p =
2522 new TreeNode<K,V>(e.hash, e.key, e.val,
2523 null, null);
2524 if ((p.prev = tl) == null)
2525 hd = p;
2526 else
2527 tl.next = p;
2528 tl = p;
2529 }
2530 setTabAt(tab, index, new TreeBin<K,V>(hd));
2531 }
2532 }
2533 }
2534 }
2535 }
2536 /**
2537 * Returns a list on non-TreeNodes replacing those in given list.
2538 */
2539 static <K,V> Node<K,V> untreeify(Node<K,V> b) {
2540 Node<K,V> hd = null, tl = null;
2541 for (Node<K,V> q = b; q != null; q = q.next) {
2542 Node<K,V> p = new Node<K,V>(q.hash, q.key, q.val, null);
2543 if (tl == null)
2544 hd = p;
2545 else
2546 tl.next = p;
2547 tl = p;
2548 }
2549 return hd;
2550 }
2551
2552 /* ---------------- TreeNodes -------------- */
2553
2554 /**
2555 * Nodes for use in TreeBins
2556 */
2557 static final class TreeNode<K,V> extends Node<K,V> {
2558 TreeNode<K,V> parent; // red-black tree links
2559 TreeNode<K,V> left;
2560 TreeNode<K,V> right;
2561 TreeNode<K,V> prev; // needed to unlink next upon deletion
2562 boolean red;
2563
2564 TreeNode(int hash, K key, V val, Node<K,V> next,
2565 TreeNode<K,V> parent) {
2566 super(hash, key, val, next);
2567 this.parent = parent;
2568 }
2569
2570 Node<K,V> find(int h, Object k) {
2571 return findTreeNode(h, k, null);
2572 }
2573
2574 /**
2575 * Returns the TreeNode (or null if not found) for the given key
2576 * starting at given root.
2577 */
2578 final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) {
2579 if (k != null) {
2580 TreeNode<K,V> p = this;
2581 do {
2582 int ph, dir; K pk; TreeNode<K,V> q;
2583 TreeNode<K,V> pl = p.left, pr = p.right;
2584 if ((ph = p.hash) > h)
2585 p = pl;
2586 else if (ph < h)
2587 p = pr;
2588 else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2589 return p;
2590 else if (pl == null)
2591 p = pr;
2592 else if (pr == null)
2593 p = pl;
2594 else if ((kc != null ||
2595 (kc = comparableClassFor(k)) != null) &&
2596 (dir = compareComparables(kc, k, pk)) != 0)
2597 p = (dir < 0) ? pl : pr;
2598 else if ((q = pr.findTreeNode(h, k, kc)) != null)
2599 return q;
2600 else
2601 p = pl;
2602 } while (p != null);
2603 }
2604 return null;
2605 }
2606 }
2607
2608 /* ---------------- TreeBins -------------- */
2609
2610 /**
2611 * TreeNodes used at the heads of bins. TreeBins do not hold user
2612 * keys or values, but instead point to list of TreeNodes and
2613 * their root. They also maintain a parasitic read-write lock
2614 * forcing writers (who hold bin lock) to wait for readers (who do
2615 * not) to complete before tree restructuring operations.
2616 */
2617 static final class TreeBin<K,V> extends Node<K,V> {
2618 TreeNode<K,V> root;
2619 volatile TreeNode<K,V> first;
2620 volatile Thread waiter;
2621 volatile int lockState;
2622 // values for lockState
2623 static final int WRITER = 1; // set while holding write lock
2624 static final int WAITER = 2; // set when waiting for write lock
2625 static final int READER = 4; // increment value for setting read lock
2626
2627 /**
2628 * Tie-breaking utility for ordering insertions when equal
2629 * hashCodes and non-comparable. We don't require a total
2630 * order, just a consistent insertion rule to maintain
2631 * equivalence across rebalancings. Tie-breaking further than
2632 * necessary simplifies testing a bit.
2633 */
2634 static int tieBreakOrder(Object a, Object b) {
2635 int d;
2636 if (a == null || b == null ||
2637 (d = a.getClass().getName().
2638 compareTo(b.getClass().getName())) == 0)
2639 d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
2640 -1 : 1);
2641 return d;
2642 }
2643
2644 /**
2645 * Creates bin with initial set of nodes headed by b.
2646 */
2647 TreeBin(TreeNode<K,V> b) {
2648 super(TREEBIN, null, null, null);
2649 this.first = b;
2650 TreeNode<K,V> r = null;
2651 for (TreeNode<K,V> x = b, next; x != null; x = next) {
2652 next = (TreeNode<K,V>)x.next;
2653 x.left = x.right = null;
2654 if (r == null) {
2655 x.parent = null;
2656 x.red = false;
2657 r = x;
2658 }
2659 else {
2660 K k = x.key;
2661 int h = x.hash;
2662 Class<?> kc = null;
2663 for (TreeNode<K,V> p = r;;) {
2664 int dir, ph;
2665 K pk = p.key;
2666 if ((ph = p.hash) > h)
2667 dir = -1;
2668 else if (ph < h)
2669 dir = 1;
2670 else if ((kc == null &&
2671 (kc = comparableClassFor(k)) == null) ||
2672 (dir = compareComparables(kc, k, pk)) == 0)
2673 dir = tieBreakOrder(k, pk);
2674 TreeNode<K,V> xp = p;
2675 if ((p = (dir <= 0) ? p.left : p.right) == null) {
2676 x.parent = xp;
2677 if (dir <= 0)
2678 xp.left = x;
2679 else
2680 xp.right = x;
2681 r = balanceInsertion(r, x);
2682 break;
2683 }
2684 }
2685 }
2686 }
2687 this.root = r;
2688 assert checkInvariants(root);
2689 }
2690
2691 /**
2692 * Acquires write lock for tree restructuring.
2693 */
2694 private final void lockRoot() {
2695 if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER))
2696 contendedLock(); // offload to separate method
2697 }
2698
2699 /**
2700 * Releases write lock for tree restructuring.
2701 */
2702 private final void unlockRoot() {
2703 lockState = 0;
2704 }
2705
2706 /**
2707 * Possibly blocks awaiting root lock.
2708 */
2709 private final void contendedLock() {
2710 boolean waiting = false;
2711 for (int s;;) {
2712 if (((s = lockState) & ~WAITER) == 0) {
2713 if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) {
2714 if (waiting)
2715 waiter = null;
2716 return;
2717 }
2718 }
2719 else if ((s & WAITER) == 0) {
2720 if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) {
2721 waiting = true;
2722 waiter = Thread.currentThread();
2723 }
2724 }
2725 else if (waiting)
2726 LockSupport.park(this);
2727 }
2728 }
2729
2730 /**
2731 * Returns matching node or null if none. Tries to search
2732 * using tree comparisons from root, but continues linear
2733 * search when lock not available.
2734 */
2735 final Node<K,V> find(int h, Object k) {
2736 if (k != null) {
2737 for (Node<K,V> e = first; e != null; e = e.next) {
2738 int s; K ek;
2739 if (((s = lockState) & (WAITER|WRITER)) != 0) {
2740 if (e.hash == h &&
2741 ((ek = e.key) == k || (ek != null && k.equals(ek))))
2742 return e;
2743 }
2744 else if (U.compareAndSwapInt(this, LOCKSTATE, s,
2745 s + READER)) {
2746 TreeNode<K,V> r, p;
2747 try {
2748 p = ((r = root) == null ? null :
2749 r.findTreeNode(h, k, null));
2750 } finally {
2751 Thread w;
2752 int ls;
2753 do {} while (!U.compareAndSwapInt
2754 (this, LOCKSTATE,
2755 ls = lockState, ls - READER));
2756 if (ls == (READER|WAITER) && (w = waiter) != null)
2757 LockSupport.unpark(w);
2758 }
2759 return p;
2760 }
2761 }
2762 }
2763 return null;
2764 }
2765
2766 /**
2767 * Finds or adds a node.
2768 * @return null if added
2769 */
2770 final TreeNode<K,V> putTreeVal(int h, K k, V v) {
2771 Class<?> kc = null;
2772 boolean searched = false;
2773 for (TreeNode<K,V> p = root;;) {
2774 int dir, ph; K pk;
2775 if (p == null) {
2776 first = root = new TreeNode<K,V>(h, k, v, null, null);
2777 break;
2778 }
2779 else if ((ph = p.hash) > h)
2780 dir = -1;
2781 else if (ph < h)
2782 dir = 1;
2783 else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2784 return p;
2785 else if ((kc == null &&
2786 (kc = comparableClassFor(k)) == null) ||
2787 (dir = compareComparables(kc, k, pk)) == 0) {
2788 if (!searched) {
2789 TreeNode<K,V> q, ch;
2790 searched = true;
2791 if (((ch = p.left) != null &&
2792 (q = ch.findTreeNode(h, k, kc)) != null) ||
2793 ((ch = p.right) != null &&
2794 (q = ch.findTreeNode(h, k, kc)) != null))
2795 return q;
2796 }
2797 dir = tieBreakOrder(k, pk);
2798 }
2799
2800 TreeNode<K,V> xp = p;
2801 if ((p = (dir <= 0) ? p.left : p.right) == null) {
2802 TreeNode<K,V> x, f = first;
2803 first = x = new TreeNode<K,V>(h, k, v, f, xp);
2804 if (f != null)
2805 f.prev = x;
2806 if (dir <= 0)
2807 xp.left = x;
2808 else
2809 xp.right = x;
2810 if (!xp.red)
2811 x.red = true;
2812 else {
2813 lockRoot();
2814 try {
2815 root = balanceInsertion(root, x);
2816 } finally {
2817 unlockRoot();
2818 }
2819 }
2820 break;
2821 }
2822 }
2823 assert checkInvariants(root);
2824 return null;
2825 }
2826
2827 /**
2828 * Removes the given node, that must be present before this
2829 * call. This is messier than typical red-black deletion code
2830 * because we cannot swap the contents of an interior node
2831 * with a leaf successor that is pinned by "next" pointers
2832 * that are accessible independently of lock. So instead we
2833 * swap the tree linkages.
2834 *
2835 * @return true if now too small, so should be untreeified
2836 */
2837 final boolean removeTreeNode(TreeNode<K,V> p) {
2838 TreeNode<K,V> next = (TreeNode<K,V>)p.next;
2839 TreeNode<K,V> pred = p.prev; // unlink traversal pointers
2840 TreeNode<K,V> r, rl;
2841 if (pred == null)
2842 first = next;
2843 else
2844 pred.next = next;
2845 if (next != null)
2846 next.prev = pred;
2847 if (first == null) {
2848 root = null;
2849 return true;
2850 }
2851 if ((r = root) == null || r.right == null || // too small
2852 (rl = r.left) == null || rl.left == null)
2853 return true;
2854 lockRoot();
2855 try {
2856 TreeNode<K,V> replacement;
2857 TreeNode<K,V> pl = p.left;
2858 TreeNode<K,V> pr = p.right;
2859 if (pl != null && pr != null) {
2860 TreeNode<K,V> s = pr, sl;
2861 while ((sl = s.left) != null) // find successor
2862 s = sl;
2863 boolean c = s.red; s.red = p.red; p.red = c; // swap colors
2864 TreeNode<K,V> sr = s.right;
2865 TreeNode<K,V> pp = p.parent;
2866 if (s == pr) { // p was s's direct parent
2867 p.parent = s;
2868 s.right = p;
2869 }
2870 else {
2871 TreeNode<K,V> sp = s.parent;
2872 if ((p.parent = sp) != null) {
2873 if (s == sp.left)
2874 sp.left = p;
2875 else
2876 sp.right = p;
2877 }
2878 if ((s.right = pr) != null)
2879 pr.parent = s;
2880 }
2881 p.left = null;
2882 if ((p.right = sr) != null)
2883 sr.parent = p;
2884 if ((s.left = pl) != null)
2885 pl.parent = s;
2886 if ((s.parent = pp) == null)
2887 r = s;
2888 else if (p == pp.left)
2889 pp.left = s;
2890 else
2891 pp.right = s;
2892 if (sr != null)
2893 replacement = sr;
2894 else
2895 replacement = p;
2896 }
2897 else if (pl != null)
2898 replacement = pl;
2899 else if (pr != null)
2900 replacement = pr;
2901 else
2902 replacement = p;
2903 if (replacement != p) {
2904 TreeNode<K,V> pp = replacement.parent = p.parent;
2905 if (pp == null)
2906 r = replacement;
2907 else if (p == pp.left)
2908 pp.left = replacement;
2909 else
2910 pp.right = replacement;
2911 p.left = p.right = p.parent = null;
2912 }
2913
2914 root = (p.red) ? r : balanceDeletion(r, replacement);
2915
2916 if (p == replacement) { // detach pointers
2917 TreeNode<K,V> pp;
2918 if ((pp = p.parent) != null) {
2919 if (p == pp.left)
2920 pp.left = null;
2921 else if (p == pp.right)
2922 pp.right = null;
2923 p.parent = null;
2924 }
2925 }
2926 } finally {
2927 unlockRoot();
2928 }
2929 assert checkInvariants(root);
2930 return false;
2931 }
2932
2933 /* ------------------------------------------------------------ */
2934 // Red-black tree methods, all adapted from CLR
2935
2936 static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
2937 TreeNode<K,V> p) {
2938 TreeNode<K,V> r, pp, rl;
2939 if (p != null && (r = p.right) != null) {
2940 if ((rl = p.right = r.left) != null)
2941 rl.parent = p;
2942 if ((pp = r.parent = p.parent) == null)
2943 (root = r).red = false;
2944 else if (pp.left == p)
2945 pp.left = r;
2946 else
2947 pp.right = r;
2948 r.left = p;
2949 p.parent = r;
2950 }
2951 return root;
2952 }
2953
2954 static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
2955 TreeNode<K,V> p) {
2956 TreeNode<K,V> l, pp, lr;
2957 if (p != null && (l = p.left) != null) {
2958 if ((lr = p.left = l.right) != null)
2959 lr.parent = p;
2960 if ((pp = l.parent = p.parent) == null)
2961 (root = l).red = false;
2962 else if (pp.right == p)
2963 pp.right = l;
2964 else
2965 pp.left = l;
2966 l.right = p;
2967 p.parent = l;
2968 }
2969 return root;
2970 }
2971
2972 static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
2973 TreeNode<K,V> x) {
2974 x.red = true;
2975 for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
2976 if ((xp = x.parent) == null) {
2977 x.red = false;
2978 return x;
2979 }
2980 else if (!xp.red || (xpp = xp.parent) == null)
2981 return root;
2982 if (xp == (xppl = xpp.left)) {
2983 if ((xppr = xpp.right) != null && xppr.red) {
2984 xppr.red = false;
2985 xp.red = false;
2986 xpp.red = true;
2987 x = xpp;
2988 }
2989 else {
2990 if (x == xp.right) {
2991 root = rotateLeft(root, x = xp);
2992 xpp = (xp = x.parent) == null ? null : xp.parent;
2993 }
2994 if (xp != null) {
2995 xp.red = false;
2996 if (xpp != null) {
2997 xpp.red = true;
2998 root = rotateRight(root, xpp);
2999 }
3000 }
3001 }
3002 }
3003 else {
3004 if (xppl != null && xppl.red) {
3005 xppl.red = false;
3006 xp.red = false;
3007 xpp.red = true;
3008 x = xpp;
3009 }
3010 else {
3011 if (x == xp.left) {
3012 root = rotateRight(root, x = xp);
3013 xpp = (xp = x.parent) == null ? null : xp.parent;
3014 }
3015 if (xp != null) {
3016 xp.red = false;
3017 if (xpp != null) {
3018 xpp.red = true;
3019 root = rotateLeft(root, xpp);
3020 }
3021 }
3022 }
3023 }
3024 }
3025 }
3026
3027 static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
3028 TreeNode<K,V> x) {
3029 for (TreeNode<K,V> xp, xpl, xpr;;) {
3030 if (x == null || x == root)
3031 return root;
3032 else if ((xp = x.parent) == null) {
3033 x.red = false;
3034 return x;
3035 }
3036 else if (x.red) {
3037 x.red = false;
3038 return root;
3039 }
3040 else if ((xpl = xp.left) == x) {
3041 if ((xpr = xp.right) != null && xpr.red) {
3042 xpr.red = false;
3043 xp.red = true;
3044 root = rotateLeft(root, xp);
3045 xpr = (xp = x.parent) == null ? null : xp.right;
3046 }
3047 if (xpr == null)
3048 x = xp;
3049 else {
3050 TreeNode<K,V> sl = xpr.left, sr = xpr.right;
3051 if ((sr == null || !sr.red) &&
3052 (sl == null || !sl.red)) {
3053 xpr.red = true;
3054 x = xp;
3055 }
3056 else {
3057 if (sr == null || !sr.red) {
3058 if (sl != null)
3059 sl.red = false;
3060 xpr.red = true;
3061 root = rotateRight(root, xpr);
3062 xpr = (xp = x.parent) == null ?
3063 null : xp.right;
3064 }
3065 if (xpr != null) {
3066 xpr.red = (xp == null) ? false : xp.red;
3067 if ((sr = xpr.right) != null)
3068 sr.red = false;
3069 }
3070 if (xp != null) {
3071 xp.red = false;
3072 root = rotateLeft(root, xp);
3073 }
3074 x = root;
3075 }
3076 }
3077 }
3078 else { // symmetric
3079 if (xpl != null && xpl.red) {
3080 xpl.red = false;
3081 xp.red = true;
3082 root = rotateRight(root, xp);
3083 xpl = (xp = x.parent) == null ? null : xp.left;
3084 }
3085 if (xpl == null)
3086 x = xp;
3087 else {
3088 TreeNode<K,V> sl = xpl.left, sr = xpl.right;
3089 if ((sl == null || !sl.red) &&
3090 (sr == null || !sr.red)) {
3091 xpl.red = true;
3092 x = xp;
3093 }
3094 else {
3095 if (sl == null || !sl.red) {
3096 if (sr != null)
3097 sr.red = false;
3098 xpl.red = true;
3099 root = rotateLeft(root, xpl);
3100 xpl = (xp = x.parent) == null ?
3101 null : xp.left;
3102 }
3103 if (xpl != null) {
3104 xpl.red = (xp == null) ? false : xp.red;
3105 if ((sl = xpl.left) != null)
3106 sl.red = false;
3107 }
3108 if (xp != null) {
3109 xp.red = false;
3110 root = rotateRight(root, xp);
3111 }
3112 x = root;
3113 }
3114 }
3115 }
3116 }
3117 }
3118
3119 /**
3120 * Recursive invariant check
3121 */
3122 static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
3123 TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
3124 tb = t.prev, tn = (TreeNode<K,V>)t.next;
3125 if (tb != null && tb.next != t)
3126 return false;
3127 if (tn != null && tn.prev != t)
3128 return false;
3129 if (tp != null && t != tp.left && t != tp.right)
3130 return false;
3131 if (tl != null && (tl.parent != t || tl.hash > t.hash))
3132 return false;
3133 if (tr != null && (tr.parent != t || tr.hash < t.hash))
3134 return false;
3135 if (t.red && tl != null && tl.red && tr != null && tr.red)
3136 return false;
3137 if (tl != null && !checkInvariants(tl))
3138 return false;
3139 if (tr != null && !checkInvariants(tr))
3140 return false;
3141 return true;
3142 }
3143
3144 private static final sun.misc.Unsafe U;
3145 private static final long LOCKSTATE;
3146 static {
3147 try {
3148 U = getUnsafe();
3149 Class<?> k = TreeBin.class;
3150 LOCKSTATE = U.objectFieldOffset
3151 (k.getDeclaredField("lockState"));
3152 } catch (Exception e) {
3153 throw new Error(e);
3154 }
3155 }
3156 }
3157
3158 /* ----------------Table Traversal -------------- */
3159
3160 /**
3161 * Records the table, its length, and current traversal index for a
3162 * traverser that must process a region of a forwarded table before
3163 * proceeding with current table.
3164 */
3165 static final class TableStack<K,V> {
3166 int length;
3167 int index;
3168 Node<K,V>[] tab;
3169 TableStack<K,V> next;
3170 }
3171
3172 /**
3173 * Encapsulates traversal for methods such as containsValue; also
3174 * serves as a base class for other iterators and spliterators.
3175 *
3176 * Method advance visits once each still-valid node that was
3177 * reachable upon iterator construction. It might miss some that
3178 * were added to a bin after the bin was visited, which is OK wrt
3179 * consistency guarantees. Maintaining this property in the face
3180 * of possible ongoing resizes requires a fair amount of
3181 * bookkeeping state that is difficult to optimize away amidst
3182 * volatile accesses. Even so, traversal maintains reasonable
3183 * throughput.
3184 *
3185 * Normally, iteration proceeds bin-by-bin traversing lists.
3186 * However, if the table has been resized, then all future steps
3187 * must traverse both the bin at the current index as well as at
3188 * (index + baseSize); and so on for further resizings. To
3189 * paranoically cope with potential sharing by users of iterators
3190 * across threads, iteration terminates if a bounds checks fails
3191 * for a table read.
3192 */
3193 static class Traverser<K,V> {
3194 Node<K,V>[] tab; // current table; updated if resized
3195 Node<K,V> next; // the next entry to use
3196 TableStack<K,V> stack, spare; // to save/restore on ForwardingNodes
3197 int index; // index of bin to use next
3198 int baseIndex; // current index of initial table
3199 int baseLimit; // index bound for initial table
3200 final int baseSize; // initial table size
3201
3202 Traverser(Node<K,V>[] tab, int size, int index, int limit) {
3203 this.tab = tab;
3204 this.baseSize = size;
3205 this.baseIndex = this.index = index;
3206 this.baseLimit = limit;
3207 this.next = null;
3208 }
3209
3210 /**
3211 * Advances if possible, returning next valid node, or null if none.
3212 */
3213 final Node<K,V> advance() {
3214 Node<K,V> e;
3215 if ((e = next) != null)
3216 e = e.next;
3217 for (;;) {
3218 Node<K,V>[] t; int i, n; // must use locals in checks
3219 if (e != null)
3220 return next = e;
3221 if (baseIndex >= baseLimit || (t = tab) == null ||
3222 (n = t.length) <= (i = index) || i < 0)
3223 return next = null;
3224 if ((e = tabAt(t, i)) != null && e.hash < 0) {
3225 if (e instanceof ForwardingNode) {
3226 tab = ((ForwardingNode<K,V>)e).nextTable;
3227 e = null;
3228 pushState(t, i, n);
3229 continue;
3230 }
3231 else if (e instanceof TreeBin)
3232 e = ((TreeBin<K,V>)e).first;
3233 else
3234 e = null;
3235 }
3236 if (stack != null)
3237 recoverState(n);
3238 else if ((index = i + baseSize) >= n)
3239 index = ++baseIndex; // visit upper slots if present
3240 }
3241 }
3242
3243 /**
3244 * Saves traversal state upon encountering a forwarding node.
3245 */
3246 private void pushState(Node<K,V>[] t, int i, int n) {
3247 TableStack<K,V> s = spare; // reuse if possible
3248 if (s != null)
3249 spare = s.next;
3250 else
3251 s = new TableStack<K,V>();
3252 s.tab = t;
3253 s.length = n;
3254 s.index = i;
3255 s.next = stack;
3256 stack = s;
3257 }
3258
3259 /**
3260 * Possibly pops traversal state.
3261 *
3262 * @param n length of current table
3263 */
3264 private void recoverState(int n) {
3265 TableStack<K,V> s; int len;
3266 while ((s = stack) != null && (index += (len = s.length)) >= n) {
3267 n = len;
3268 index = s.index;
3269 tab = s.tab;
3270 s.tab = null;
3271 TableStack<K,V> next = s.next;
3272 s.next = spare; // save for reuse
3273 stack = next;
3274 spare = s;
3275 }
3276 if (s == null && (index += baseSize) >= n)
3277 index = ++baseIndex;
3278 }
3279 }
3280
3281 /**
3282 * Base of key, value, and entry Iterators. Adds fields to
3283 * Traverser to support iterator.remove.
3284 */
3285 static class BaseIterator<K,V> extends Traverser<K,V> {
3286 final ConcurrentHashMapV8<K,V> map;
3287 Node<K,V> lastReturned;
3288 BaseIterator(Node<K,V>[] tab, int size, int index, int limit,
3289 ConcurrentHashMapV8<K,V> map) {
3290 super(tab, size, index, limit);
3291 this.map = map;
3292 advance();
3293 }
3294
3295 public final boolean hasNext() { return next != null; }
3296 public final boolean hasMoreElements() { return next != null; }
3297
3298 public final void remove() {
3299 Node<K,V> p;
3300 if ((p = lastReturned) == null)
3301 throw new IllegalStateException();
3302 lastReturned = null;
3303 map.replaceNode(p.key, null, null);
3304 }
3305 }
3306
3307 static final class KeyIterator<K,V> extends BaseIterator<K,V>
3308 implements Iterator<K>, Enumeration<K> {
3309 KeyIterator(Node<K,V>[] tab, int index, int size, int limit,
3310 ConcurrentHashMapV8<K,V> map) {
3311 super(tab, index, size, limit, map);
3312 }
3313
3314 public final K next() {
3315 Node<K,V> p;
3316 if ((p = next) == null)
3317 throw new NoSuchElementException();
3318 K k = p.key;
3319 lastReturned = p;
3320 advance();
3321 return k;
3322 }
3323
3324 public final K nextElement() { return next(); }
3325 }
3326
3327 static final class ValueIterator<K,V> extends BaseIterator<K,V>
3328 implements Iterator<V>, Enumeration<V> {
3329 ValueIterator(Node<K,V>[] tab, int index, int size, int limit,
3330 ConcurrentHashMapV8<K,V> map) {
3331 super(tab, index, size, limit, map);
3332 }
3333
3334 public final V next() {
3335 Node<K,V> p;
3336 if ((p = next) == null)
3337 throw new NoSuchElementException();
3338 V v = p.val;
3339 lastReturned = p;
3340 advance();
3341 return v;
3342 }
3343
3344 public final V nextElement() { return next(); }
3345 }
3346
3347 static final class EntryIterator<K,V> extends BaseIterator<K,V>
3348 implements Iterator<Map.Entry<K,V>> {
3349 EntryIterator(Node<K,V>[] tab, int index, int size, int limit,
3350 ConcurrentHashMapV8<K,V> map) {
3351 super(tab, index, size, limit, map);
3352 }
3353
3354 public final Map.Entry<K,V> next() {
3355 Node<K,V> p;
3356 if ((p = next) == null)
3357 throw new NoSuchElementException();
3358 K k = p.key;
3359 V v = p.val;
3360 lastReturned = p;
3361 advance();
3362 return new MapEntry<K,V>(k, v, map);
3363 }
3364 }
3365
3366 /**
3367 * Exported Entry for EntryIterator
3368 */
3369 static final class MapEntry<K,V> implements Map.Entry<K,V> {
3370 final K key; // non-null
3371 V val; // non-null
3372 final ConcurrentHashMapV8<K,V> map;
3373 MapEntry(K key, V val, ConcurrentHashMapV8<K,V> map) {
3374 this.key = key;
3375 this.val = val;
3376 this.map = map;
3377 }
3378 public K getKey() { return key; }
3379 public V getValue() { return val; }
3380 public int hashCode() { return key.hashCode() ^ val.hashCode(); }
3381 public String toString() { return key + "=" + val; }
3382
3383 public boolean equals(Object o) {
3384 Object k, v; Map.Entry<?,?> e;
3385 return ((o instanceof Map.Entry) &&
3386 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3387 (v = e.getValue()) != null &&
3388 (k == key || k.equals(key)) &&
3389 (v == val || v.equals(val)));
3390 }
3391
3392 /**
3393 * Sets our entry's value and writes through to the map. The
3394 * value to return is somewhat arbitrary here. Since we do not
3395 * necessarily track asynchronous changes, the most recent
3396 * "previous" value could be different from what we return (or
3397 * could even have been removed, in which case the put will
3398 * re-establish). We do not and cannot guarantee more.
3399 */
3400 public V setValue(V value) {
3401 if (value == null) throw new NullPointerException();
3402 V v = val;
3403 val = value;
3404 map.put(key, value);
3405 return v;
3406 }
3407 }
3408
3409 static final class KeySpliterator<K,V> extends Traverser<K,V>
3410 implements ConcurrentHashMapSpliterator<K> {
3411 long est; // size estimate
3412 KeySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3413 long est) {
3414 super(tab, size, index, limit);
3415 this.est = est;
3416 }
3417
3418 public ConcurrentHashMapSpliterator<K> trySplit() {
3419 int i, f, h;
3420 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3421 new KeySpliterator<K,V>(tab, baseSize, baseLimit = h,
3422 f, est >>>= 1);
3423 }
3424
3425 public void forEachRemaining(Action<? super K> action) {
3426 if (action == null) throw new NullPointerException();
3427 for (Node<K,V> p; (p = advance()) != null;)
3428 action.apply(p.key);
3429 }
3430
3431 public boolean tryAdvance(Action<? super K> action) {
3432 if (action == null) throw new NullPointerException();
3433 Node<K,V> p;
3434 if ((p = advance()) == null)
3435 return false;
3436 action.apply(p.key);
3437 return true;
3438 }
3439
3440 public long estimateSize() { return est; }
3441
3442 }
3443
3444 static final class ValueSpliterator<K,V> extends Traverser<K,V>
3445 implements ConcurrentHashMapSpliterator<V> {
3446 long est; // size estimate
3447 ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit,
3448 long est) {
3449 super(tab, size, index, limit);
3450 this.est = est;
3451 }
3452
3453 public ConcurrentHashMapSpliterator<V> trySplit() {
3454 int i, f, h;
3455 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3456 new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h,
3457 f, est >>>= 1);
3458 }
3459
3460 public void forEachRemaining(Action<? super V> action) {
3461 if (action == null) throw new NullPointerException();
3462 for (Node<K,V> p; (p = advance()) != null;)
3463 action.apply(p.val);
3464 }
3465
3466 public boolean tryAdvance(Action<? super V> action) {
3467 if (action == null) throw new NullPointerException();
3468 Node<K,V> p;
3469 if ((p = advance()) == null)
3470 return false;
3471 action.apply(p.val);
3472 return true;
3473 }
3474
3475 public long estimateSize() { return est; }
3476
3477 }
3478
3479 static final class EntrySpliterator<K,V> extends Traverser<K,V>
3480 implements ConcurrentHashMapSpliterator<Map.Entry<K,V>> {
3481 final ConcurrentHashMapV8<K,V> map; // To export MapEntry
3482 long est; // size estimate
3483 EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3484 long est, ConcurrentHashMapV8<K,V> map) {
3485 super(tab, size, index, limit);
3486 this.map = map;
3487 this.est = est;
3488 }
3489
3490 public ConcurrentHashMapSpliterator<Map.Entry<K,V>> trySplit() {
3491 int i, f, h;
3492 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3493 new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h,
3494 f, est >>>= 1, map);
3495 }
3496
3497 public void forEachRemaining(Action<? super Map.Entry<K,V>> action) {
3498 if (action == null) throw new NullPointerException();
3499 for (Node<K,V> p; (p = advance()) != null; )
3500 action.apply(new MapEntry<K,V>(p.key, p.val, map));
3501 }
3502
3503 public boolean tryAdvance(Action<? super Map.Entry<K,V>> action) {
3504 if (action == null) throw new NullPointerException();
3505 Node<K,V> p;
3506 if ((p = advance()) == null)
3507 return false;
3508 action.apply(new MapEntry<K,V>(p.key, p.val, map));
3509 return true;
3510 }
3511
3512 public long estimateSize() { return est; }
3513
3514 }
3515
3516 // Parallel bulk operations
3517
3518 /**
3519 * Computes initial batch value for bulk tasks. The returned value
3520 * is approximately exp2 of the number of times (minus one) to
3521 * split task by two before executing leaf action. This value is
3522 * faster to compute and more convenient to use as a guide to
3523 * splitting than is the depth, since it is used while dividing by
3524 * two anyway.
3525 */
3526 final int batchFor(long b) {
3527 long n;
3528 if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b)
3529 return 0;
3530 int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4
3531 return (b <= 0L || (n /= b) >= sp) ? sp : (int)n;
3532 }
3533
3534 /**
3535 * Performs the given action for each (key, value).
3536 *
3537 * @param parallelismThreshold the (estimated) number of elements
3538 * needed for this operation to be executed in parallel
3539 * @param action the action
3540 * @since 1.8
3541 */
3542 public void forEach(long parallelismThreshold,
3543 BiAction<? super K,? super V> action) {
3544 if (action == null) throw new NullPointerException();
3545 new ForEachMappingTask<K,V>
3546 (null, batchFor(parallelismThreshold), 0, 0, table,
3547 action).invoke();
3548 }
3549
3550 /**
3551 * Performs the given action for each non-null transformation
3552 * of each (key, value).
3553 *
3554 * @param parallelismThreshold the (estimated) number of elements
3555 * needed for this operation to be executed in parallel
3556 * @param transformer a function returning the transformation
3557 * for an element, or null if there is no transformation (in
3558 * which case the action is not applied)
3559 * @param action the action
3560 * @since 1.8
3561 */
3562 public <U> void forEach(long parallelismThreshold,
3563 BiFun<? super K, ? super V, ? extends U> transformer,
3564 Action<? super U> action) {
3565 if (transformer == null || action == null)
3566 throw new NullPointerException();
3567 new ForEachTransformedMappingTask<K,V,U>
3568 (null, batchFor(parallelismThreshold), 0, 0, table,
3569 transformer, action).invoke();
3570 }
3571
3572 /**
3573 * Returns a non-null result from applying the given search
3574 * function on each (key, value), or null if none. Upon
3575 * success, further element processing is suppressed and the
3576 * results of any other parallel invocations of the search
3577 * function are ignored.
3578 *
3579 * @param parallelismThreshold the (estimated) number of elements
3580 * needed for this operation to be executed in parallel
3581 * @param searchFunction a function returning a non-null
3582 * result on success, else null
3583 * @return a non-null result from applying the given search
3584 * function on each (key, value), or null if none
3585 * @since 1.8
3586 */
3587 public <U> U search(long parallelismThreshold,
3588 BiFun<? super K, ? super V, ? extends U> searchFunction) {
3589 if (searchFunction == null) throw new NullPointerException();
3590 return new SearchMappingsTask<K,V,U>
3591 (null, batchFor(parallelismThreshold), 0, 0, table,
3592 searchFunction, new AtomicReference<U>()).invoke();
3593 }
3594
3595 /**
3596 * Returns the result of accumulating the given transformation
3597 * of all (key, value) pairs using the given reducer to
3598 * combine values, or null if none.
3599 *
3600 * @param parallelismThreshold the (estimated) number of elements
3601 * needed for this operation to be executed in parallel
3602 * @param transformer a function returning the transformation
3603 * for an element, or null if there is no transformation (in
3604 * which case it is not combined)
3605 * @param reducer a commutative associative combining function
3606 * @return the result of accumulating the given transformation
3607 * of all (key, value) pairs
3608 * @since 1.8
3609 */
3610 public <U> U reduce(long parallelismThreshold,
3611 BiFun<? super K, ? super V, ? extends U> transformer,
3612 BiFun<? super U, ? super U, ? extends U> reducer) {
3613 if (transformer == null || reducer == null)
3614 throw new NullPointerException();
3615 return new MapReduceMappingsTask<K,V,U>
3616 (null, batchFor(parallelismThreshold), 0, 0, table,
3617 null, transformer, reducer).invoke();
3618 }
3619
3620 /**
3621 * Returns the result of accumulating the given transformation
3622 * of all (key, value) pairs using the given reducer to
3623 * combine values, and the given basis as an identity value.
3624 *
3625 * @param parallelismThreshold the (estimated) number of elements
3626 * needed for this operation to be executed in parallel
3627 * @param transformer a function returning the transformation
3628 * for an element
3629 * @param basis the identity (initial default value) for the reduction
3630 * @param reducer a commutative associative combining function
3631 * @return the result of accumulating the given transformation
3632 * of all (key, value) pairs
3633 * @since 1.8
3634 */
3635 public double reduceToDouble(long parallelismThreshold,
3636 ObjectByObjectToDouble<? super K, ? super V> transformer,
3637 double basis,
3638 DoubleByDoubleToDouble reducer) {
3639 if (transformer == null || reducer == null)
3640 throw new NullPointerException();
3641 return new MapReduceMappingsToDoubleTask<K,V>
3642 (null, batchFor(parallelismThreshold), 0, 0, table,
3643 null, transformer, basis, reducer).invoke();
3644 }
3645
3646 /**
3647 * Returns the result of accumulating the given transformation
3648 * of all (key, value) pairs using the given reducer to
3649 * combine values, and the given basis as an identity value.
3650 *
3651 * @param parallelismThreshold the (estimated) number of elements
3652 * needed for this operation to be executed in parallel
3653 * @param transformer a function returning the transformation
3654 * for an element
3655 * @param basis the identity (initial default value) for the reduction
3656 * @param reducer a commutative associative combining function
3657 * @return the result of accumulating the given transformation
3658 * of all (key, value) pairs
3659 * @since 1.8
3660 */
3661 public long reduceToLong(long parallelismThreshold,
3662 ObjectByObjectToLong<? super K, ? super V> transformer,
3663 long basis,
3664 LongByLongToLong reducer) {
3665 if (transformer == null || reducer == null)
3666 throw new NullPointerException();
3667 return new MapReduceMappingsToLongTask<K,V>
3668 (null, batchFor(parallelismThreshold), 0, 0, table,
3669 null, transformer, basis, reducer).invoke();
3670 }
3671
3672 /**
3673 * Returns the result of accumulating the given transformation
3674 * of all (key, value) pairs using the given reducer to
3675 * combine values, and the given basis as an identity value.
3676 *
3677 * @param parallelismThreshold the (estimated) number of elements
3678 * needed for this operation to be executed in parallel
3679 * @param transformer a function returning the transformation
3680 * for an element
3681 * @param basis the identity (initial default value) for the reduction
3682 * @param reducer a commutative associative combining function
3683 * @return the result of accumulating the given transformation
3684 * of all (key, value) pairs
3685 * @since 1.8
3686 */
3687 public int reduceToInt(long parallelismThreshold,
3688 ObjectByObjectToInt<? super K, ? super V> transformer,
3689 int basis,
3690 IntByIntToInt reducer) {
3691 if (transformer == null || reducer == null)
3692 throw new NullPointerException();
3693 return new MapReduceMappingsToIntTask<K,V>
3694 (null, batchFor(parallelismThreshold), 0, 0, table,
3695 null, transformer, basis, reducer).invoke();
3696 }
3697
3698 /**
3699 * Performs the given action for each key.
3700 *
3701 * @param parallelismThreshold the (estimated) number of elements
3702 * needed for this operation to be executed in parallel
3703 * @param action the action
3704 * @since 1.8
3705 */
3706 public void forEachKey(long parallelismThreshold,
3707 Action<? super K> action) {
3708 if (action == null) throw new NullPointerException();
3709 new ForEachKeyTask<K,V>
3710 (null, batchFor(parallelismThreshold), 0, 0, table,
3711 action).invoke();
3712 }
3713
3714 /**
3715 * Performs the given action for each non-null transformation
3716 * of each key.
3717 *
3718 * @param parallelismThreshold the (estimated) number of elements
3719 * needed for this operation to be executed in parallel
3720 * @param transformer a function returning the transformation
3721 * for an element, or null if there is no transformation (in
3722 * which case the action is not applied)
3723 * @param action the action
3724 * @since 1.8
3725 */
3726 public <U> void forEachKey(long parallelismThreshold,
3727 Fun<? super K, ? extends U> transformer,
3728 Action<? super U> action) {
3729 if (transformer == null || action == null)
3730 throw new NullPointerException();
3731 new ForEachTransformedKeyTask<K,V,U>
3732 (null, batchFor(parallelismThreshold), 0, 0, table,
3733 transformer, action).invoke();
3734 }
3735
3736 /**
3737 * Returns a non-null result from applying the given search
3738 * function on each key, or null if none. Upon success,
3739 * further element processing is suppressed and the results of
3740 * any other parallel invocations of the search function are
3741 * ignored.
3742 *
3743 * @param parallelismThreshold the (estimated) number of elements
3744 * needed for this operation to be executed in parallel
3745 * @param searchFunction a function returning a non-null
3746 * result on success, else null
3747 * @return a non-null result from applying the given search
3748 * function on each key, or null if none
3749 * @since 1.8
3750 */
3751 public <U> U searchKeys(long parallelismThreshold,
3752 Fun<? super K, ? extends U> searchFunction) {
3753 if (searchFunction == null) throw new NullPointerException();
3754 return new SearchKeysTask<K,V,U>
3755 (null, batchFor(parallelismThreshold), 0, 0, table,
3756 searchFunction, new AtomicReference<U>()).invoke();
3757 }
3758
3759 /**
3760 * Returns the result of accumulating all keys using the given
3761 * reducer to combine values, or null if none.
3762 *
3763 * @param parallelismThreshold the (estimated) number of elements
3764 * needed for this operation to be executed in parallel
3765 * @param reducer a commutative associative combining function
3766 * @return the result of accumulating all keys using the given
3767 * reducer to combine values, or null if none
3768 * @since 1.8
3769 */
3770 public K reduceKeys(long parallelismThreshold,
3771 BiFun<? super K, ? super K, ? extends K> reducer) {
3772 if (reducer == null) throw new NullPointerException();
3773 return new ReduceKeysTask<K,V>
3774 (null, batchFor(parallelismThreshold), 0, 0, table,
3775 null, reducer).invoke();
3776 }
3777
3778 /**
3779 * Returns the result of accumulating the given transformation
3780 * of all keys using the given reducer to combine values, or
3781 * null if none.
3782 *
3783 * @param parallelismThreshold the (estimated) number of elements
3784 * needed for this operation to be executed in parallel
3785 * @param transformer a function returning the transformation
3786 * for an element, or null if there is no transformation (in
3787 * which case it is not combined)
3788 * @param reducer a commutative associative combining function
3789 * @return the result of accumulating the given transformation
3790 * of all keys
3791 * @since 1.8
3792 */
3793 public <U> U reduceKeys(long parallelismThreshold,
3794 Fun<? super K, ? extends U> transformer,
3795 BiFun<? super U, ? super U, ? extends U> reducer) {
3796 if (transformer == null || reducer == null)
3797 throw new NullPointerException();
3798 return new MapReduceKeysTask<K,V,U>
3799 (null, batchFor(parallelismThreshold), 0, 0, table,
3800 null, transformer, reducer).invoke();
3801 }
3802
3803 /**
3804 * Returns the result of accumulating the given transformation
3805 * of all keys using the given reducer to combine values, and
3806 * the given basis as an identity value.
3807 *
3808 * @param parallelismThreshold the (estimated) number of elements
3809 * needed for this operation to be executed in parallel
3810 * @param transformer a function returning the transformation
3811 * for an element
3812 * @param basis the identity (initial default value) for the reduction
3813 * @param reducer a commutative associative combining function
3814 * @return the result of accumulating the given transformation
3815 * of all keys
3816 * @since 1.8
3817 */
3818 public double reduceKeysToDouble(long parallelismThreshold,
3819 ObjectToDouble<? super K> transformer,
3820 double basis,
3821 DoubleByDoubleToDouble reducer) {
3822 if (transformer == null || reducer == null)
3823 throw new NullPointerException();
3824 return new MapReduceKeysToDoubleTask<K,V>
3825 (null, batchFor(parallelismThreshold), 0, 0, table,
3826 null, transformer, basis, reducer).invoke();
3827 }
3828
3829 /**
3830 * Returns the result of accumulating the given transformation
3831 * of all keys using the given reducer to combine values, and
3832 * the given basis as an identity value.
3833 *
3834 * @param parallelismThreshold the (estimated) number of elements
3835 * needed for this operation to be executed in parallel
3836 * @param transformer a function returning the transformation
3837 * for an element
3838 * @param basis the identity (initial default value) for the reduction
3839 * @param reducer a commutative associative combining function
3840 * @return the result of accumulating the given transformation
3841 * of all keys
3842 * @since 1.8
3843 */
3844 public long reduceKeysToLong(long parallelismThreshold,
3845 ObjectToLong<? super K> transformer,
3846 long basis,
3847 LongByLongToLong reducer) {
3848 if (transformer == null || reducer == null)
3849 throw new NullPointerException();
3850 return new MapReduceKeysToLongTask<K,V>
3851 (null, batchFor(parallelismThreshold), 0, 0, table,
3852 null, transformer, basis, reducer).invoke();
3853 }
3854
3855 /**
3856 * Returns the result of accumulating the given transformation
3857 * of all keys using the given reducer to combine values, and
3858 * the given basis as an identity value.
3859 *
3860 * @param parallelismThreshold the (estimated) number of elements
3861 * needed for this operation to be executed in parallel
3862 * @param transformer a function returning the transformation
3863 * for an element
3864 * @param basis the identity (initial default value) for the reduction
3865 * @param reducer a commutative associative combining function
3866 * @return the result of accumulating the given transformation
3867 * of all keys
3868 * @since 1.8
3869 */
3870 public int reduceKeysToInt(long parallelismThreshold,
3871 ObjectToInt<? super K> transformer,
3872 int basis,
3873 IntByIntToInt reducer) {
3874 if (transformer == null || reducer == null)
3875 throw new NullPointerException();
3876 return new MapReduceKeysToIntTask<K,V>
3877 (null, batchFor(parallelismThreshold), 0, 0, table,
3878 null, transformer, basis, reducer).invoke();
3879 }
3880
3881 /**
3882 * Performs the given action for each value.
3883 *
3884 * @param parallelismThreshold the (estimated) number of elements
3885 * needed for this operation to be executed in parallel
3886 * @param action the action
3887 * @since 1.8
3888 */
3889 public void forEachValue(long parallelismThreshold,
3890 Action<? super V> action) {
3891 if (action == null)
3892 throw new NullPointerException();
3893 new ForEachValueTask<K,V>
3894 (null, batchFor(parallelismThreshold), 0, 0, table,
3895 action).invoke();
3896 }
3897
3898 /**
3899 * Performs the given action for each non-null transformation
3900 * of each value.
3901 *
3902 * @param parallelismThreshold the (estimated) number of elements
3903 * needed for this operation to be executed in parallel
3904 * @param transformer a function returning the transformation
3905 * for an element, or null if there is no transformation (in
3906 * which case the action is not applied)
3907 * @param action the action
3908 * @since 1.8
3909 */
3910 public <U> void forEachValue(long parallelismThreshold,
3911 Fun<? super V, ? extends U> transformer,
3912 Action<? super U> action) {
3913 if (transformer == null || action == null)
3914 throw new NullPointerException();
3915 new ForEachTransformedValueTask<K,V,U>
3916 (null, batchFor(parallelismThreshold), 0, 0, table,
3917 transformer, action).invoke();
3918 }
3919
3920 /**
3921 * Returns a non-null result from applying the given search
3922 * function on each value, or null if none. Upon success,
3923 * further element processing is suppressed and the results of
3924 * any other parallel invocations of the search function are
3925 * ignored.
3926 *
3927 * @param parallelismThreshold the (estimated) number of elements
3928 * needed for this operation to be executed in parallel
3929 * @param searchFunction a function returning a non-null
3930 * result on success, else null
3931 * @return a non-null result from applying the given search
3932 * function on each value, or null if none
3933 * @since 1.8
3934 */
3935 public <U> U searchValues(long parallelismThreshold,
3936 Fun<? super V, ? extends U> searchFunction) {
3937 if (searchFunction == null) throw new NullPointerException();
3938 return new SearchValuesTask<K,V,U>
3939 (null, batchFor(parallelismThreshold), 0, 0, table,
3940 searchFunction, new AtomicReference<U>()).invoke();
3941 }
3942
3943 /**
3944 * Returns the result of accumulating all values using the
3945 * given reducer to combine values, or null if none.
3946 *
3947 * @param parallelismThreshold the (estimated) number of elements
3948 * needed for this operation to be executed in parallel
3949 * @param reducer a commutative associative combining function
3950 * @return the result of accumulating all values
3951 * @since 1.8
3952 */
3953 public V reduceValues(long parallelismThreshold,
3954 BiFun<? super V, ? super V, ? extends V> reducer) {
3955 if (reducer == null) throw new NullPointerException();
3956 return new ReduceValuesTask<K,V>
3957 (null, batchFor(parallelismThreshold), 0, 0, table,
3958 null, reducer).invoke();
3959 }
3960
3961 /**
3962 * Returns the result of accumulating the given transformation
3963 * of all values using the given reducer to combine values, or
3964 * null if none.
3965 *
3966 * @param parallelismThreshold the (estimated) number of elements
3967 * needed for this operation to be executed in parallel
3968 * @param transformer a function returning the transformation
3969 * for an element, or null if there is no transformation (in
3970 * which case it is not combined)
3971 * @param reducer a commutative associative combining function
3972 * @return the result of accumulating the given transformation
3973 * of all values
3974 * @since 1.8
3975 */
3976 public <U> U reduceValues(long parallelismThreshold,
3977 Fun<? super V, ? extends U> transformer,
3978 BiFun<? super U, ? super U, ? extends U> reducer) {
3979 if (transformer == null || reducer == null)
3980 throw new NullPointerException();
3981 return new MapReduceValuesTask<K,V,U>
3982 (null, batchFor(parallelismThreshold), 0, 0, table,
3983 null, transformer, reducer).invoke();
3984 }
3985
3986 /**
3987 * Returns the result of accumulating the given transformation
3988 * of all values using the given reducer to combine values,
3989 * and the given basis as an identity value.
3990 *
3991 * @param parallelismThreshold the (estimated) number of elements
3992 * needed for this operation to be executed in parallel
3993 * @param transformer a function returning the transformation
3994 * for an element
3995 * @param basis the identity (initial default value) for the reduction
3996 * @param reducer a commutative associative combining function
3997 * @return the result of accumulating the given transformation
3998 * of all values
3999 * @since 1.8
4000 */
4001 public double reduceValuesToDouble(long parallelismThreshold,
4002 ObjectToDouble<? super V> transformer,
4003 double basis,
4004 DoubleByDoubleToDouble reducer) {
4005 if (transformer == null || reducer == null)
4006 throw new NullPointerException();
4007 return new MapReduceValuesToDoubleTask<K,V>
4008 (null, batchFor(parallelismThreshold), 0, 0, table,
4009 null, transformer, basis, reducer).invoke();
4010 }
4011
4012 /**
4013 * Returns the result of accumulating the given transformation
4014 * of all values using the given reducer to combine values,
4015 * and the given basis as an identity value.
4016 *
4017 * @param parallelismThreshold the (estimated) number of elements
4018 * needed for this operation to be executed in parallel
4019 * @param transformer a function returning the transformation
4020 * for an element
4021 * @param basis the identity (initial default value) for the reduction
4022 * @param reducer a commutative associative combining function
4023 * @return the result of accumulating the given transformation
4024 * of all values
4025 * @since 1.8
4026 */
4027 public long reduceValuesToLong(long parallelismThreshold,
4028 ObjectToLong<? super V> transformer,
4029 long basis,
4030 LongByLongToLong reducer) {
4031 if (transformer == null || reducer == null)
4032 throw new NullPointerException();
4033 return new MapReduceValuesToLongTask<K,V>
4034 (null, batchFor(parallelismThreshold), 0, 0, table,
4035 null, transformer, basis, reducer).invoke();
4036 }
4037
4038 /**
4039 * Returns the result of accumulating the given transformation
4040 * of all values using the given reducer to combine values,
4041 * and the given basis as an identity value.
4042 *
4043 * @param parallelismThreshold the (estimated) number of elements
4044 * needed for this operation to be executed in parallel
4045 * @param transformer a function returning the transformation
4046 * for an element
4047 * @param basis the identity (initial default value) for the reduction
4048 * @param reducer a commutative associative combining function
4049 * @return the result of accumulating the given transformation
4050 * of all values
4051 * @since 1.8
4052 */
4053 public int reduceValuesToInt(long parallelismThreshold,
4054 ObjectToInt<? super V> transformer,
4055 int basis,
4056 IntByIntToInt reducer) {
4057 if (transformer == null || reducer == null)
4058 throw new NullPointerException();
4059 return new MapReduceValuesToIntTask<K,V>
4060 (null, batchFor(parallelismThreshold), 0, 0, table,
4061 null, transformer, basis, reducer).invoke();
4062 }
4063
4064 /**
4065 * Performs the given action for each entry.
4066 *
4067 * @param parallelismThreshold the (estimated) number of elements
4068 * needed for this operation to be executed in parallel
4069 * @param action the action
4070 * @since 1.8
4071 */
4072 public void forEachEntry(long parallelismThreshold,
4073 Action<? super Map.Entry<K,V>> action) {
4074 if (action == null) throw new NullPointerException();
4075 new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table,
4076 action).invoke();
4077 }
4078
4079 /**
4080 * Performs the given action for each non-null transformation
4081 * of each entry.
4082 *
4083 * @param parallelismThreshold the (estimated) number of elements
4084 * needed for this operation to be executed in parallel
4085 * @param transformer a function returning the transformation
4086 * for an element, or null if there is no transformation (in
4087 * which case the action is not applied)
4088 * @param action the action
4089 * @since 1.8
4090 */
4091 public <U> void forEachEntry(long parallelismThreshold,
4092 Fun<Map.Entry<K,V>, ? extends U> transformer,
4093 Action<? super U> action) {
4094 if (transformer == null || action == null)
4095 throw new NullPointerException();
4096 new ForEachTransformedEntryTask<K,V,U>
4097 (null, batchFor(parallelismThreshold), 0, 0, table,
4098 transformer, action).invoke();
4099 }
4100
4101 /**
4102 * Returns a non-null result from applying the given search
4103 * function on each entry, or null if none. Upon success,
4104 * further element processing is suppressed and the results of
4105 * any other parallel invocations of the search function are
4106 * ignored.
4107 *
4108 * @param parallelismThreshold the (estimated) number of elements
4109 * needed for this operation to be executed in parallel
4110 * @param searchFunction a function returning a non-null
4111 * result on success, else null
4112 * @return a non-null result from applying the given search
4113 * function on each entry, or null if none
4114 * @since 1.8
4115 */
4116 public <U> U searchEntries(long parallelismThreshold,
4117 Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
4118 if (searchFunction == null) throw new NullPointerException();
4119 return new SearchEntriesTask<K,V,U>
4120 (null, batchFor(parallelismThreshold), 0, 0, table,
4121 searchFunction, new AtomicReference<U>()).invoke();
4122 }
4123
4124 /**
4125 * Returns the result of accumulating all entries using the
4126 * given reducer to combine values, or null if none.
4127 *
4128 * @param parallelismThreshold the (estimated) number of elements
4129 * needed for this operation to be executed in parallel
4130 * @param reducer a commutative associative combining function
4131 * @return the result of accumulating all entries
4132 * @since 1.8
4133 */
4134 public Map.Entry<K,V> reduceEntries(long parallelismThreshold,
4135 BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4136 if (reducer == null) throw new NullPointerException();
4137 return new ReduceEntriesTask<K,V>
4138 (null, batchFor(parallelismThreshold), 0, 0, table,
4139 null, reducer).invoke();
4140 }
4141
4142 /**
4143 * Returns the result of accumulating the given transformation
4144 * of all entries using the given reducer to combine values,
4145 * or null if none.
4146 *
4147 * @param parallelismThreshold the (estimated) number of elements
4148 * needed for this operation to be executed in parallel
4149 * @param transformer a function returning the transformation
4150 * for an element, or null if there is no transformation (in
4151 * which case it is not combined)
4152 * @param reducer a commutative associative combining function
4153 * @return the result of accumulating the given transformation
4154 * of all entries
4155 * @since 1.8
4156 */
4157 public <U> U reduceEntries(long parallelismThreshold,
4158 Fun<Map.Entry<K,V>, ? extends U> transformer,
4159 BiFun<? super U, ? super U, ? extends U> reducer) {
4160 if (transformer == null || reducer == null)
4161 throw new NullPointerException();
4162 return new MapReduceEntriesTask<K,V,U>
4163 (null, batchFor(parallelismThreshold), 0, 0, table,
4164 null, transformer, reducer).invoke();
4165 }
4166
4167 /**
4168 * Returns the result of accumulating the given transformation
4169 * of all entries using the given reducer to combine values,
4170 * and the given basis as an identity value.
4171 *
4172 * @param parallelismThreshold the (estimated) number of elements
4173 * needed for this operation to be executed in parallel
4174 * @param transformer a function returning the transformation
4175 * for an element
4176 * @param basis the identity (initial default value) for the reduction
4177 * @param reducer a commutative associative combining function
4178 * @return the result of accumulating the given transformation
4179 * of all entries
4180 * @since 1.8
4181 */
4182 public double reduceEntriesToDouble(long parallelismThreshold,
4183 ObjectToDouble<Map.Entry<K,V>> transformer,
4184 double basis,
4185 DoubleByDoubleToDouble reducer) {
4186 if (transformer == null || reducer == null)
4187 throw new NullPointerException();
4188 return new MapReduceEntriesToDoubleTask<K,V>
4189 (null, batchFor(parallelismThreshold), 0, 0, table,
4190 null, transformer, basis, reducer).invoke();
4191 }
4192
4193 /**
4194 * Returns the result of accumulating the given transformation
4195 * of all entries using the given reducer to combine values,
4196 * and the given basis as an identity value.
4197 *
4198 * @param parallelismThreshold the (estimated) number of elements
4199 * needed for this operation to be executed in parallel
4200 * @param transformer a function returning the transformation
4201 * for an element
4202 * @param basis the identity (initial default value) for the reduction
4203 * @param reducer a commutative associative combining function
4204 * @return the result of accumulating the given transformation
4205 * of all entries
4206 * @since 1.8
4207 */
4208 public long reduceEntriesToLong(long parallelismThreshold,
4209 ObjectToLong<Map.Entry<K,V>> transformer,
4210 long basis,
4211 LongByLongToLong reducer) {
4212 if (transformer == null || reducer == null)
4213 throw new NullPointerException();
4214 return new MapReduceEntriesToLongTask<K,V>
4215 (null, batchFor(parallelismThreshold), 0, 0, table,
4216 null, transformer, basis, reducer).invoke();
4217 }
4218
4219 /**
4220 * Returns the result of accumulating the given transformation
4221 * of all entries using the given reducer to combine values,
4222 * and the given basis as an identity value.
4223 *
4224 * @param parallelismThreshold the (estimated) number of elements
4225 * needed for this operation to be executed in parallel
4226 * @param transformer a function returning the transformation
4227 * for an element
4228 * @param basis the identity (initial default value) for the reduction
4229 * @param reducer a commutative associative combining function
4230 * @return the result of accumulating the given transformation
4231 * of all entries
4232 * @since 1.8
4233 */
4234 public int reduceEntriesToInt(long parallelismThreshold,
4235 ObjectToInt<Map.Entry<K,V>> transformer,
4236 int basis,
4237 IntByIntToInt reducer) {
4238 if (transformer == null || reducer == null)
4239 throw new NullPointerException();
4240 return new MapReduceEntriesToIntTask<K,V>
4241 (null, batchFor(parallelismThreshold), 0, 0, table,
4242 null, transformer, basis, reducer).invoke();
4243 }
4244
4245
4246 /* ----------------Views -------------- */
4247
4248 /**
4249 * Base class for views.
4250 */
4251 abstract static class CollectionView<K,V,E>
4252 implements Collection<E>, java.io.Serializable {
4253 private static final long serialVersionUID = 7249069246763182397L;
4254 final ConcurrentHashMapV8<K,V> map;
4255 CollectionView(ConcurrentHashMapV8<K,V> map) { this.map = map; }
4256
4257 /**
4258 * Returns the map backing this view.
4259 *
4260 * @return the map backing this view
4261 */
4262 public ConcurrentHashMapV8<K,V> getMap() { return map; }
4263
4264 /**
4265 * Removes all of the elements from this view, by removing all
4266 * the mappings from the map backing this view.
4267 */
4268 public final void clear() { map.clear(); }
4269 public final int size() { return map.size(); }
4270 public final boolean isEmpty() { return map.isEmpty(); }
4271
4272 // implementations below rely on concrete classes supplying these
4273 // abstract methods
4274 /**
4275 * Returns a "weakly consistent" iterator that will never
4276 * throw {@link ConcurrentModificationException}, and
4277 * guarantees to traverse elements as they existed upon
4278 * construction of the iterator, and may (but is not
4279 * guaranteed to) reflect any modifications subsequent to
4280 * construction.
4281 */
4282 public abstract Iterator<E> iterator();
4283 public abstract boolean contains(Object o);
4284 public abstract boolean remove(Object o);
4285
4286 private static final String oomeMsg = "Required array size too large";
4287
4288 public final Object[] toArray() {
4289 long sz = map.mappingCount();
4290 if (sz > MAX_ARRAY_SIZE)
4291 throw new OutOfMemoryError(oomeMsg);
4292 int n = (int)sz;
4293 Object[] r = new Object[n];
4294 int i = 0;
4295 for (E e : this) {
4296 if (i == n) {
4297 if (n >= MAX_ARRAY_SIZE)
4298 throw new OutOfMemoryError(oomeMsg);
4299 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4300 n = MAX_ARRAY_SIZE;
4301 else
4302 n += (n >>> 1) + 1;
4303 r = Arrays.copyOf(r, n);
4304 }
4305 r[i++] = e;
4306 }
4307 return (i == n) ? r : Arrays.copyOf(r, i);
4308 }
4309
4310 @SuppressWarnings("unchecked")
4311 public final <T> T[] toArray(T[] a) {
4312 long sz = map.mappingCount();
4313 if (sz > MAX_ARRAY_SIZE)
4314 throw new OutOfMemoryError(oomeMsg);
4315 int m = (int)sz;
4316 T[] r = (a.length >= m) ? a :
4317 (T[])java.lang.reflect.Array
4318 .newInstance(a.getClass().getComponentType(), m);
4319 int n = r.length;
4320 int i = 0;
4321 for (E e : this) {
4322 if (i == n) {
4323 if (n >= MAX_ARRAY_SIZE)
4324 throw new OutOfMemoryError(oomeMsg);
4325 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4326 n = MAX_ARRAY_SIZE;
4327 else
4328 n += (n >>> 1) + 1;
4329 r = Arrays.copyOf(r, n);
4330 }
4331 r[i++] = (T)e;
4332 }
4333 if (a == r && i < n) {
4334 r[i] = null; // null-terminate
4335 return r;
4336 }
4337 return (i == n) ? r : Arrays.copyOf(r, i);
4338 }
4339
4340 /**
4341 * Returns a string representation of this collection.
4342 * The string representation consists of the string representations
4343 * of the collection's elements in the order they are returned by
4344 * its iterator, enclosed in square brackets ({@code "[]"}).
4345 * Adjacent elements are separated by the characters {@code ", "}
4346 * (comma and space). Elements are converted to strings as by
4347 * {@link String#valueOf(Object)}.
4348 *
4349 * @return a string representation of this collection
4350 */
4351 public final String toString() {
4352 StringBuilder sb = new StringBuilder();
4353 sb.append('[');
4354 Iterator<E> it = iterator();
4355 if (it.hasNext()) {
4356 for (;;) {
4357 Object e = it.next();
4358 sb.append(e == this ? "(this Collection)" : e);
4359 if (!it.hasNext())
4360 break;
4361 sb.append(',').append(' ');
4362 }
4363 }
4364 return sb.append(']').toString();
4365 }
4366
4367 public final boolean containsAll(Collection<?> c) {
4368 if (c != this) {
4369 for (Object e : c) {
4370 if (e == null || !contains(e))
4371 return false;
4372 }
4373 }
4374 return true;
4375 }
4376
4377 public final boolean removeAll(Collection<?> c) {
4378 boolean modified = false;
4379 for (Iterator<E> it = iterator(); it.hasNext();) {
4380 if (c.contains(it.next())) {
4381 it.remove();
4382 modified = true;
4383 }
4384 }
4385 return modified;
4386 }
4387
4388 public final boolean retainAll(Collection<?> c) {
4389 boolean modified = false;
4390 for (Iterator<E> it = iterator(); it.hasNext();) {
4391 if (!c.contains(it.next())) {
4392 it.remove();
4393 modified = true;
4394 }
4395 }
4396 return modified;
4397 }
4398
4399 }
4400
4401 /**
4402 * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in
4403 * which additions may optionally be enabled by mapping to a
4404 * common value. This class cannot be directly instantiated.
4405 * See {@link #keySet() keySet()},
4406 * {@link #keySet(Object) keySet(V)},
4407 * {@link #newKeySet() newKeySet()},
4408 * {@link #newKeySet(int) newKeySet(int)}.
4409 *
4410 * @since 1.8
4411 */
4412 public static class KeySetView<K,V> extends CollectionView<K,V,K>
4413 implements Set<K>, java.io.Serializable {
4414 private static final long serialVersionUID = 7249069246763182397L;
4415 private final V value;
4416 KeySetView(ConcurrentHashMapV8<K,V> map, V value) { // non-public
4417 super(map);
4418 this.value = value;
4419 }
4420
4421 /**
4422 * Returns the default mapped value for additions,
4423 * or {@code null} if additions are not supported.
4424 *
4425 * @return the default mapped value for additions, or {@code null}
4426 * if not supported
4427 */
4428 public V getMappedValue() { return value; }
4429
4430 /**
4431 * {@inheritDoc}
4432 * @throws NullPointerException if the specified key is null
4433 */
4434 public boolean contains(Object o) { return map.containsKey(o); }
4435
4436 /**
4437 * Removes the key from this map view, by removing the key (and its
4438 * corresponding value) from the backing map. This method does
4439 * nothing if the key is not in the map.
4440 *
4441 * @param o the key to be removed from the backing map
4442 * @return {@code true} if the backing map contained the specified key
4443 * @throws NullPointerException if the specified key is null
4444 */
4445 public boolean remove(Object o) { return map.remove(o) != null; }
4446
4447 /**
4448 * @return an iterator over the keys of the backing map
4449 */
4450 public Iterator<K> iterator() {
4451 Node<K,V>[] t;
4452 ConcurrentHashMapV8<K,V> m = map;
4453 int f = (t = m.table) == null ? 0 : t.length;
4454 return new KeyIterator<K,V>(t, f, 0, f, m);
4455 }
4456
4457 /**
4458 * Adds the specified key to this set view by mapping the key to
4459 * the default mapped value in the backing map, if defined.
4460 *
4461 * @param e key to be added
4462 * @return {@code true} if this set changed as a result of the call
4463 * @throws NullPointerException if the specified key is null
4464 * @throws UnsupportedOperationException if no default mapped value
4465 * for additions was provided
4466 */
4467 public boolean add(K e) {
4468 V v;
4469 if ((v = value) == null)
4470 throw new UnsupportedOperationException();
4471 return map.putVal(e, v, true) == null;
4472 }
4473
4474 /**
4475 * Adds all of the elements in the specified collection to this set,
4476 * as if by calling {@link #add} on each one.
4477 *
4478 * @param c the elements to be inserted into this set
4479 * @return {@code true} if this set changed as a result of the call
4480 * @throws NullPointerException if the collection or any of its
4481 * elements are {@code null}
4482 * @throws UnsupportedOperationException if no default mapped value
4483 * for additions was provided
4484 */
4485 public boolean addAll(Collection<? extends K> c) {
4486 boolean added = false;
4487 V v;
4488 if ((v = value) == null)
4489 throw new UnsupportedOperationException();
4490 for (K e : c) {
4491 if (map.putVal(e, v, true) == null)
4492 added = true;
4493 }
4494 return added;
4495 }
4496
4497 public int hashCode() {
4498 int h = 0;
4499 for (K e : this)
4500 h += e.hashCode();
4501 return h;
4502 }
4503
4504 public boolean equals(Object o) {
4505 Set<?> c;
4506 return ((o instanceof Set) &&
4507 ((c = (Set<?>)o) == this ||
4508 (containsAll(c) && c.containsAll(this))));
4509 }
4510
4511 public ConcurrentHashMapSpliterator<K> spliterator() {
4512 Node<K,V>[] t;
4513 ConcurrentHashMapV8<K,V> m = map;
4514 long n = m.sumCount();
4515 int f = (t = m.table) == null ? 0 : t.length;
4516 return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4517 }
4518
4519 public void forEach(Action<? super K> action) {
4520 if (action == null) throw new NullPointerException();
4521 Node<K,V>[] t;
4522 if ((t = map.table) != null) {
4523 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4524 for (Node<K,V> p; (p = it.advance()) != null; )
4525 action.apply(p.key);
4526 }
4527 }
4528 }
4529
4530 /**
4531 * A view of a ConcurrentHashMapV8 as a {@link Collection} of
4532 * values, in which additions are disabled. This class cannot be
4533 * directly instantiated. See {@link #values()}.
4534 */
4535 static final class ValuesView<K,V> extends CollectionView<K,V,V>
4536 implements Collection<V>, java.io.Serializable {
4537 private static final long serialVersionUID = 2249069246763182397L;
4538 ValuesView(ConcurrentHashMapV8<K,V> map) { super(map); }
4539 public final boolean contains(Object o) {
4540 return map.containsValue(o);
4541 }
4542
4543 public final boolean remove(Object o) {
4544 if (o != null) {
4545 for (Iterator<V> it = iterator(); it.hasNext();) {
4546 if (o.equals(it.next())) {
4547 it.remove();
4548 return true;
4549 }
4550 }
4551 }
4552 return false;
4553 }
4554
4555 public final Iterator<V> iterator() {
4556 ConcurrentHashMapV8<K,V> m = map;
4557 Node<K,V>[] t;
4558 int f = (t = m.table) == null ? 0 : t.length;
4559 return new ValueIterator<K,V>(t, f, 0, f, m);
4560 }
4561
4562 public final boolean add(V e) {
4563 throw new UnsupportedOperationException();
4564 }
4565 public final boolean addAll(Collection<? extends V> c) {
4566 throw new UnsupportedOperationException();
4567 }
4568
4569 public ConcurrentHashMapSpliterator<V> spliterator() {
4570 Node<K,V>[] t;
4571 ConcurrentHashMapV8<K,V> m = map;
4572 long n = m.sumCount();
4573 int f = (t = m.table) == null ? 0 : t.length;
4574 return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4575 }
4576
4577 public void forEach(Action<? super V> action) {
4578 if (action == null) throw new NullPointerException();
4579 Node<K,V>[] t;
4580 if ((t = map.table) != null) {
4581 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4582 for (Node<K,V> p; (p = it.advance()) != null; )
4583 action.apply(p.val);
4584 }
4585 }
4586 }
4587
4588 /**
4589 * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value)
4590 * entries. This class cannot be directly instantiated. See
4591 * {@link #entrySet()}.
4592 */
4593 static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>>
4594 implements Set<Map.Entry<K,V>>, java.io.Serializable {
4595 private static final long serialVersionUID = 2249069246763182397L;
4596 EntrySetView(ConcurrentHashMapV8<K,V> map) { super(map); }
4597
4598 public boolean contains(Object o) {
4599 Object k, v, r; Map.Entry<?,?> e;
4600 return ((o instanceof Map.Entry) &&
4601 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4602 (r = map.get(k)) != null &&
4603 (v = e.getValue()) != null &&
4604 (v == r || v.equals(r)));
4605 }
4606
4607 public boolean remove(Object o) {
4608 Object k, v; Map.Entry<?,?> e;
4609 return ((o instanceof Map.Entry) &&
4610 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4611 (v = e.getValue()) != null &&
4612 map.remove(k, v));
4613 }
4614
4615 /**
4616 * @return an iterator over the entries of the backing map
4617 */
4618 public Iterator<Map.Entry<K,V>> iterator() {
4619 ConcurrentHashMapV8<K,V> m = map;
4620 Node<K,V>[] t;
4621 int f = (t = m.table) == null ? 0 : t.length;
4622 return new EntryIterator<K,V>(t, f, 0, f, m);
4623 }
4624
4625 public boolean add(Entry<K,V> e) {
4626 return map.putVal(e.getKey(), e.getValue(), false) == null;
4627 }
4628
4629 public boolean addAll(Collection<? extends Entry<K,V>> c) {
4630 boolean added = false;
4631 for (Entry<K,V> e : c) {
4632 if (add(e))
4633 added = true;
4634 }
4635 return added;
4636 }
4637
4638 public final int hashCode() {
4639 int h = 0;
4640 Node<K,V>[] t;
4641 if ((t = map.table) != null) {
4642 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4643 for (Node<K,V> p; (p = it.advance()) != null; ) {
4644 h += p.hashCode();
4645 }
4646 }
4647 return h;
4648 }
4649
4650 public final boolean equals(Object o) {
4651 Set<?> c;
4652 return ((o instanceof Set) &&
4653 ((c = (Set<?>)o) == this ||
4654 (containsAll(c) && c.containsAll(this))));
4655 }
4656
4657 public ConcurrentHashMapSpliterator<Map.Entry<K,V>> spliterator() {
4658 Node<K,V>[] t;
4659 ConcurrentHashMapV8<K,V> m = map;
4660 long n = m.sumCount();
4661 int f = (t = m.table) == null ? 0 : t.length;
4662 return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m);
4663 }
4664
4665 public void forEach(Action<? super Map.Entry<K,V>> action) {
4666 if (action == null) throw new NullPointerException();
4667 Node<K,V>[] t;
4668 if ((t = map.table) != null) {
4669 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4670 for (Node<K,V> p; (p = it.advance()) != null; )
4671 action.apply(new MapEntry<K,V>(p.key, p.val, map));
4672 }
4673 }
4674
4675 }
4676
4677 // -------------------------------------------------------
4678
4679 /**
4680 * Base class for bulk tasks. Repeats some fields and code from
4681 * class Traverser, because we need to subclass CountedCompleter.
4682 */
4683 abstract static class BulkTask<K,V,R> extends CountedCompleter<R> {
4684 Node<K,V>[] tab; // same as Traverser
4685 Node<K,V> next;
4686 int index;
4687 int baseIndex;
4688 int baseLimit;
4689 final int baseSize;
4690 int batch; // split control
4691
4692 BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) {
4693 super(par);
4694 this.batch = b;
4695 this.index = this.baseIndex = i;
4696 if ((this.tab = t) == null)
4697 this.baseSize = this.baseLimit = 0;
4698 else if (par == null)
4699 this.baseSize = this.baseLimit = t.length;
4700 else {
4701 this.baseLimit = f;
4702 this.baseSize = par.baseSize;
4703 }
4704 }
4705
4706 /**
4707 * Same as Traverser version
4708 */
4709 final Node<K,V> advance() {
4710 Node<K,V> e;
4711 if ((e = next) != null)
4712 e = e.next;
4713 for (;;) {
4714 Node<K,V>[] t; int i, n; K ek; // must use locals in checks
4715 if (e != null)
4716 return next = e;
4717 if (baseIndex >= baseLimit || (t = tab) == null ||
4718 (n = t.length) <= (i = index) || i < 0)
4719 return next = null;
4720 if ((e = tabAt(t, index)) != null && e.hash < 0) {
4721 if (e instanceof ForwardingNode) {
4722 tab = ((ForwardingNode<K,V>)e).nextTable;
4723 e = null;
4724 continue;
4725 }
4726 else if (e instanceof TreeBin)
4727 e = ((TreeBin<K,V>)e).first;
4728 else
4729 e = null;
4730 }
4731 if ((index += baseSize) >= n)
4732 index = ++baseIndex; // visit upper slots if present
4733 }
4734 }
4735 }
4736
4737 /*
4738 * Task classes. Coded in a regular but ugly format/style to
4739 * simplify checks that each variant differs in the right way from
4740 * others. The null screenings exist because compilers cannot tell
4741 * that we've already null-checked task arguments, so we force
4742 * simplest hoisted bypass to help avoid convoluted traps.
4743 */
4744 @SuppressWarnings("serial")
4745 static final class ForEachKeyTask<K,V>
4746 extends BulkTask<K,V,Void> {
4747 final Action<? super K> action;
4748 ForEachKeyTask
4749 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4750 Action<? super K> action) {
4751 super(p, b, i, f, t);
4752 this.action = action;
4753 }
4754 public final void compute() {
4755 final Action<? super K> action;
4756 if ((action = this.action) != null) {
4757 for (int i = baseIndex, f, h; batch > 0 &&
4758 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4759 addToPendingCount(1);
4760 new ForEachKeyTask<K,V>
4761 (this, batch >>>= 1, baseLimit = h, f, tab,
4762 action).fork();
4763 }
4764 for (Node<K,V> p; (p = advance()) != null;)
4765 action.apply(p.key);
4766 propagateCompletion();
4767 }
4768 }
4769 }
4770
4771 @SuppressWarnings("serial")
4772 static final class ForEachValueTask<K,V>
4773 extends BulkTask<K,V,Void> {
4774 final Action<? super V> action;
4775 ForEachValueTask
4776 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4777 Action<? super V> action) {
4778 super(p, b, i, f, t);
4779 this.action = action;
4780 }
4781 public final void compute() {
4782 final Action<? super V> action;
4783 if ((action = this.action) != null) {
4784 for (int i = baseIndex, f, h; batch > 0 &&
4785 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4786 addToPendingCount(1);
4787 new ForEachValueTask<K,V>
4788 (this, batch >>>= 1, baseLimit = h, f, tab,
4789 action).fork();
4790 }
4791 for (Node<K,V> p; (p = advance()) != null;)
4792 action.apply(p.val);
4793 propagateCompletion();
4794 }
4795 }
4796 }
4797
4798 @SuppressWarnings("serial")
4799 static final class ForEachEntryTask<K,V>
4800 extends BulkTask<K,V,Void> {
4801 final Action<? super Entry<K,V>> action;
4802 ForEachEntryTask
4803 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4804 Action<? super Entry<K,V>> action) {
4805 super(p, b, i, f, t);
4806 this.action = action;
4807 }
4808 public final void compute() {
4809 final Action<? super Entry<K,V>> action;
4810 if ((action = this.action) != null) {
4811 for (int i = baseIndex, f, h; batch > 0 &&
4812 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4813 addToPendingCount(1);
4814 new ForEachEntryTask<K,V>
4815 (this, batch >>>= 1, baseLimit = h, f, tab,
4816 action).fork();
4817 }
4818 for (Node<K,V> p; (p = advance()) != null; )
4819 action.apply(p);
4820 propagateCompletion();
4821 }
4822 }
4823 }
4824
4825 @SuppressWarnings("serial")
4826 static final class ForEachMappingTask<K,V>
4827 extends BulkTask<K,V,Void> {
4828 final BiAction<? super K, ? super V> action;
4829 ForEachMappingTask
4830 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4831 BiAction<? super K,? super V> action) {
4832 super(p, b, i, f, t);
4833 this.action = action;
4834 }
4835 public final void compute() {
4836 final BiAction<? super K, ? super V> action;
4837 if ((action = this.action) != null) {
4838 for (int i = baseIndex, f, h; batch > 0 &&
4839 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4840 addToPendingCount(1);
4841 new ForEachMappingTask<K,V>
4842 (this, batch >>>= 1, baseLimit = h, f, tab,
4843 action).fork();
4844 }
4845 for (Node<K,V> p; (p = advance()) != null; )
4846 action.apply(p.key, p.val);
4847 propagateCompletion();
4848 }
4849 }
4850 }
4851
4852 @SuppressWarnings("serial")
4853 static final class ForEachTransformedKeyTask<K,V,U>
4854 extends BulkTask<K,V,Void> {
4855 final Fun<? super K, ? extends U> transformer;
4856 final Action<? super U> action;
4857 ForEachTransformedKeyTask
4858 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4859 Fun<? super K, ? extends U> transformer, Action<? super U> action) {
4860 super(p, b, i, f, t);
4861 this.transformer = transformer; this.action = action;
4862 }
4863 public final void compute() {
4864 final Fun<? super K, ? extends U> transformer;
4865 final Action<? super U> action;
4866 if ((transformer = this.transformer) != null &&
4867 (action = this.action) != null) {
4868 for (int i = baseIndex, f, h; batch > 0 &&
4869 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4870 addToPendingCount(1);
4871 new ForEachTransformedKeyTask<K,V,U>
4872 (this, batch >>>= 1, baseLimit = h, f, tab,
4873 transformer, action).fork();
4874 }
4875 for (Node<K,V> p; (p = advance()) != null; ) {
4876 U u;
4877 if ((u = transformer.apply(p.key)) != null)
4878 action.apply(u);
4879 }
4880 propagateCompletion();
4881 }
4882 }
4883 }
4884
4885 @SuppressWarnings("serial")
4886 static final class ForEachTransformedValueTask<K,V,U>
4887 extends BulkTask<K,V,Void> {
4888 final Fun<? super V, ? extends U> transformer;
4889 final Action<? super U> action;
4890 ForEachTransformedValueTask
4891 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4892 Fun<? super V, ? extends U> transformer, Action<? super U> action) {
4893 super(p, b, i, f, t);
4894 this.transformer = transformer; this.action = action;
4895 }
4896 public final void compute() {
4897 final Fun<? super V, ? extends U> transformer;
4898 final Action<? super U> action;
4899 if ((transformer = this.transformer) != null &&
4900 (action = this.action) != null) {
4901 for (int i = baseIndex, f, h; batch > 0 &&
4902 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4903 addToPendingCount(1);
4904 new ForEachTransformedValueTask<K,V,U>
4905 (this, batch >>>= 1, baseLimit = h, f, tab,
4906 transformer, action).fork();
4907 }
4908 for (Node<K,V> p; (p = advance()) != null; ) {
4909 U u;
4910 if ((u = transformer.apply(p.val)) != null)
4911 action.apply(u);
4912 }
4913 propagateCompletion();
4914 }
4915 }
4916 }
4917
4918 @SuppressWarnings("serial")
4919 static final class ForEachTransformedEntryTask<K,V,U>
4920 extends BulkTask<K,V,Void> {
4921 final Fun<Map.Entry<K,V>, ? extends U> transformer;
4922 final Action<? super U> action;
4923 ForEachTransformedEntryTask
4924 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4925 Fun<Map.Entry<K,V>, ? extends U> transformer, Action<? super U> action) {
4926 super(p, b, i, f, t);
4927 this.transformer = transformer; this.action = action;
4928 }
4929 public final void compute() {
4930 final Fun<Map.Entry<K,V>, ? extends U> transformer;
4931 final Action<? super U> action;
4932 if ((transformer = this.transformer) != null &&
4933 (action = this.action) != null) {
4934 for (int i = baseIndex, f, h; batch > 0 &&
4935 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4936 addToPendingCount(1);
4937 new ForEachTransformedEntryTask<K,V,U>
4938 (this, batch >>>= 1, baseLimit = h, f, tab,
4939 transformer, action).fork();
4940 }
4941 for (Node<K,V> p; (p = advance()) != null; ) {
4942 U u;
4943 if ((u = transformer.apply(p)) != null)
4944 action.apply(u);
4945 }
4946 propagateCompletion();
4947 }
4948 }
4949 }
4950
4951 @SuppressWarnings("serial")
4952 static final class ForEachTransformedMappingTask<K,V,U>
4953 extends BulkTask<K,V,Void> {
4954 final BiFun<? super K, ? super V, ? extends U> transformer;
4955 final Action<? super U> action;
4956 ForEachTransformedMappingTask
4957 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4958 BiFun<? super K, ? super V, ? extends U> transformer,
4959 Action<? super U> action) {
4960 super(p, b, i, f, t);
4961 this.transformer = transformer; this.action = action;
4962 }
4963 public final void compute() {
4964 final BiFun<? super K, ? super V, ? extends U> transformer;
4965 final Action<? super U> action;
4966 if ((transformer = this.transformer) != null &&
4967 (action = this.action) != null) {
4968 for (int i = baseIndex, f, h; batch > 0 &&
4969 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4970 addToPendingCount(1);
4971 new ForEachTransformedMappingTask<K,V,U>
4972 (this, batch >>>= 1, baseLimit = h, f, tab,
4973 transformer, action).fork();
4974 }
4975 for (Node<K,V> p; (p = advance()) != null; ) {
4976 U u;
4977 if ((u = transformer.apply(p.key, p.val)) != null)
4978 action.apply(u);
4979 }
4980 propagateCompletion();
4981 }
4982 }
4983 }
4984
4985 @SuppressWarnings("serial")
4986 static final class SearchKeysTask<K,V,U>
4987 extends BulkTask<K,V,U> {
4988 final Fun<? super K, ? extends U> searchFunction;
4989 final AtomicReference<U> result;
4990 SearchKeysTask
4991 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4992 Fun<? super K, ? extends U> searchFunction,
4993 AtomicReference<U> result) {
4994 super(p, b, i, f, t);
4995 this.searchFunction = searchFunction; this.result = result;
4996 }
4997 public final U getRawResult() { return result.get(); }
4998 public final void compute() {
4999 final Fun<? super K, ? extends U> searchFunction;
5000 final AtomicReference<U> result;
5001 if ((searchFunction = this.searchFunction) != null &&
5002 (result = this.result) != null) {
5003 for (int i = baseIndex, f, h; batch > 0 &&
5004 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5005 if (result.get() != null)
5006 return;
5007 addToPendingCount(1);
5008 new SearchKeysTask<K,V,U>
5009 (this, batch >>>= 1, baseLimit = h, f, tab,
5010 searchFunction, result).fork();
5011 }
5012 while (result.get() == null) {
5013 U u;
5014 Node<K,V> p;
5015 if ((p = advance()) == null) {
5016 propagateCompletion();
5017 break;
5018 }
5019 if ((u = searchFunction.apply(p.key)) != null) {
5020 if (result.compareAndSet(null, u))
5021 quietlyCompleteRoot();
5022 break;
5023 }
5024 }
5025 }
5026 }
5027 }
5028
5029 @SuppressWarnings("serial")
5030 static final class SearchValuesTask<K,V,U>
5031 extends BulkTask<K,V,U> {
5032 final Fun<? super V, ? extends U> searchFunction;
5033 final AtomicReference<U> result;
5034 SearchValuesTask
5035 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5036 Fun<? super V, ? extends U> searchFunction,
5037 AtomicReference<U> result) {
5038 super(p, b, i, f, t);
5039 this.searchFunction = searchFunction; this.result = result;
5040 }
5041 public final U getRawResult() { return result.get(); }
5042 public final void compute() {
5043 final Fun<? super V, ? extends U> searchFunction;
5044 final AtomicReference<U> result;
5045 if ((searchFunction = this.searchFunction) != null &&
5046 (result = this.result) != null) {
5047 for (int i = baseIndex, f, h; batch > 0 &&
5048 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5049 if (result.get() != null)
5050 return;
5051 addToPendingCount(1);
5052 new SearchValuesTask<K,V,U>
5053 (this, batch >>>= 1, baseLimit = h, f, tab,
5054 searchFunction, result).fork();
5055 }
5056 while (result.get() == null) {
5057 U u;
5058 Node<K,V> p;
5059 if ((p = advance()) == null) {
5060 propagateCompletion();
5061 break;
5062 }
5063 if ((u = searchFunction.apply(p.val)) != null) {
5064 if (result.compareAndSet(null, u))
5065 quietlyCompleteRoot();
5066 break;
5067 }
5068 }
5069 }
5070 }
5071 }
5072
5073 @SuppressWarnings("serial")
5074 static final class SearchEntriesTask<K,V,U>
5075 extends BulkTask<K,V,U> {
5076 final Fun<Entry<K,V>, ? extends U> searchFunction;
5077 final AtomicReference<U> result;
5078 SearchEntriesTask
5079 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5080 Fun<Entry<K,V>, ? extends U> searchFunction,
5081 AtomicReference<U> result) {
5082 super(p, b, i, f, t);
5083 this.searchFunction = searchFunction; this.result = result;
5084 }
5085 public final U getRawResult() { return result.get(); }
5086 public final void compute() {
5087 final Fun<Entry<K,V>, ? extends U> searchFunction;
5088 final AtomicReference<U> result;
5089 if ((searchFunction = this.searchFunction) != null &&
5090 (result = this.result) != null) {
5091 for (int i = baseIndex, f, h; batch > 0 &&
5092 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5093 if (result.get() != null)
5094 return;
5095 addToPendingCount(1);
5096 new SearchEntriesTask<K,V,U>
5097 (this, batch >>>= 1, baseLimit = h, f, tab,
5098 searchFunction, result).fork();
5099 }
5100 while (result.get() == null) {
5101 U u;
5102 Node<K,V> p;
5103 if ((p = advance()) == null) {
5104 propagateCompletion();
5105 break;
5106 }
5107 if ((u = searchFunction.apply(p)) != null) {
5108 if (result.compareAndSet(null, u))
5109 quietlyCompleteRoot();
5110 return;
5111 }
5112 }
5113 }
5114 }
5115 }
5116
5117 @SuppressWarnings("serial")
5118 static final class SearchMappingsTask<K,V,U>
5119 extends BulkTask<K,V,U> {
5120 final BiFun<? super K, ? super V, ? extends U> searchFunction;
5121 final AtomicReference<U> result;
5122 SearchMappingsTask
5123 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5124 BiFun<? super K, ? super V, ? extends U> searchFunction,
5125 AtomicReference<U> result) {
5126 super(p, b, i, f, t);
5127 this.searchFunction = searchFunction; this.result = result;
5128 }
5129 public final U getRawResult() { return result.get(); }
5130 public final void compute() {
5131 final BiFun<? super K, ? super V, ? extends U> searchFunction;
5132 final AtomicReference<U> result;
5133 if ((searchFunction = this.searchFunction) != null &&
5134 (result = this.result) != null) {
5135 for (int i = baseIndex, f, h; batch > 0 &&
5136 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5137 if (result.get() != null)
5138 return;
5139 addToPendingCount(1);
5140 new SearchMappingsTask<K,V,U>
5141 (this, batch >>>= 1, baseLimit = h, f, tab,
5142 searchFunction, result).fork();
5143 }
5144 while (result.get() == null) {
5145 U u;
5146 Node<K,V> p;
5147 if ((p = advance()) == null) {
5148 propagateCompletion();
5149 break;
5150 }
5151 if ((u = searchFunction.apply(p.key, p.val)) != null) {
5152 if (result.compareAndSet(null, u))
5153 quietlyCompleteRoot();
5154 break;
5155 }
5156 }
5157 }
5158 }
5159 }
5160
5161 @SuppressWarnings("serial")
5162 static final class ReduceKeysTask<K,V>
5163 extends BulkTask<K,V,K> {
5164 final BiFun<? super K, ? super K, ? extends K> reducer;
5165 K result;
5166 ReduceKeysTask<K,V> rights, nextRight;
5167 ReduceKeysTask
5168 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5169 ReduceKeysTask<K,V> nextRight,
5170 BiFun<? super K, ? super K, ? extends K> reducer) {
5171 super(p, b, i, f, t); this.nextRight = nextRight;
5172 this.reducer = reducer;
5173 }
5174 public final K getRawResult() { return result; }
5175 public final void compute() {
5176 final BiFun<? super K, ? super K, ? extends K> reducer;
5177 if ((reducer = this.reducer) != null) {
5178 for (int i = baseIndex, f, h; batch > 0 &&
5179 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5180 addToPendingCount(1);
5181 (rights = new ReduceKeysTask<K,V>
5182 (this, batch >>>= 1, baseLimit = h, f, tab,
5183 rights, reducer)).fork();
5184 }
5185 K r = null;
5186 for (Node<K,V> p; (p = advance()) != null; ) {
5187 K u = p.key;
5188 r = (r == null) ? u : u == null ? r : reducer.apply(r, u);
5189 }
5190 result = r;
5191 CountedCompleter<?> c;
5192 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5193 @SuppressWarnings("unchecked") ReduceKeysTask<K,V>
5194 t = (ReduceKeysTask<K,V>)c,
5195 s = t.rights;
5196 while (s != null) {
5197 K tr, sr;
5198 if ((sr = s.result) != null)
5199 t.result = (((tr = t.result) == null) ? sr :
5200 reducer.apply(tr, sr));
5201 s = t.rights = s.nextRight;
5202 }
5203 }
5204 }
5205 }
5206 }
5207
5208 @SuppressWarnings("serial")
5209 static final class ReduceValuesTask<K,V>
5210 extends BulkTask<K,V,V> {
5211 final BiFun<? super V, ? super V, ? extends V> reducer;
5212 V result;
5213 ReduceValuesTask<K,V> rights, nextRight;
5214 ReduceValuesTask
5215 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5216 ReduceValuesTask<K,V> nextRight,
5217 BiFun<? super V, ? super V, ? extends V> reducer) {
5218 super(p, b, i, f, t); this.nextRight = nextRight;
5219 this.reducer = reducer;
5220 }
5221 public final V getRawResult() { return result; }
5222 public final void compute() {
5223 final BiFun<? super V, ? super V, ? extends V> reducer;
5224 if ((reducer = this.reducer) != null) {
5225 for (int i = baseIndex, f, h; batch > 0 &&
5226 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5227 addToPendingCount(1);
5228 (rights = new ReduceValuesTask<K,V>
5229 (this, batch >>>= 1, baseLimit = h, f, tab,
5230 rights, reducer)).fork();
5231 }
5232 V r = null;
5233 for (Node<K,V> p; (p = advance()) != null; ) {
5234 V v = p.val;
5235 r = (r == null) ? v : reducer.apply(r, v);
5236 }
5237 result = r;
5238 CountedCompleter<?> c;
5239 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5240 @SuppressWarnings("unchecked") ReduceValuesTask<K,V>
5241 t = (ReduceValuesTask<K,V>)c,
5242 s = t.rights;
5243 while (s != null) {
5244 V tr, sr;
5245 if ((sr = s.result) != null)
5246 t.result = (((tr = t.result) == null) ? sr :
5247 reducer.apply(tr, sr));
5248 s = t.rights = s.nextRight;
5249 }
5250 }
5251 }
5252 }
5253 }
5254
5255 @SuppressWarnings("serial")
5256 static final class ReduceEntriesTask<K,V>
5257 extends BulkTask<K,V,Map.Entry<K,V>> {
5258 final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5259 Map.Entry<K,V> result;
5260 ReduceEntriesTask<K,V> rights, nextRight;
5261 ReduceEntriesTask
5262 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5263 ReduceEntriesTask<K,V> nextRight,
5264 BiFun<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5265 super(p, b, i, f, t); this.nextRight = nextRight;
5266 this.reducer = reducer;
5267 }
5268 public final Map.Entry<K,V> getRawResult() { return result; }
5269 public final void compute() {
5270 final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5271 if ((reducer = this.reducer) != null) {
5272 for (int i = baseIndex, f, h; batch > 0 &&
5273 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5274 addToPendingCount(1);
5275 (rights = new ReduceEntriesTask<K,V>
5276 (this, batch >>>= 1, baseLimit = h, f, tab,
5277 rights, reducer)).fork();
5278 }
5279 Map.Entry<K,V> r = null;
5280 for (Node<K,V> p; (p = advance()) != null; )
5281 r = (r == null) ? p : reducer.apply(r, p);
5282 result = r;
5283 CountedCompleter<?> c;
5284 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5285 @SuppressWarnings("unchecked") ReduceEntriesTask<K,V>
5286 t = (ReduceEntriesTask<K,V>)c,
5287 s = t.rights;
5288 while (s != null) {
5289 Map.Entry<K,V> tr, sr;
5290 if ((sr = s.result) != null)
5291 t.result = (((tr = t.result) == null) ? sr :
5292 reducer.apply(tr, sr));
5293 s = t.rights = s.nextRight;
5294 }
5295 }
5296 }
5297 }
5298 }
5299
5300 @SuppressWarnings("serial")
5301 static final class MapReduceKeysTask<K,V,U>
5302 extends BulkTask<K,V,U> {
5303 final Fun<? super K, ? extends U> transformer;
5304 final BiFun<? super U, ? super U, ? extends U> reducer;
5305 U result;
5306 MapReduceKeysTask<K,V,U> rights, nextRight;
5307 MapReduceKeysTask
5308 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5309 MapReduceKeysTask<K,V,U> nextRight,
5310 Fun<? super K, ? extends U> transformer,
5311 BiFun<? super U, ? super U, ? extends U> reducer) {
5312 super(p, b, i, f, t); this.nextRight = nextRight;
5313 this.transformer = transformer;
5314 this.reducer = reducer;
5315 }
5316 public final U getRawResult() { return result; }
5317 public final void compute() {
5318 final Fun<? super K, ? extends U> transformer;
5319 final BiFun<? super U, ? super U, ? extends U> reducer;
5320 if ((transformer = this.transformer) != null &&
5321 (reducer = this.reducer) != null) {
5322 for (int i = baseIndex, f, h; batch > 0 &&
5323 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5324 addToPendingCount(1);
5325 (rights = new MapReduceKeysTask<K,V,U>
5326 (this, batch >>>= 1, baseLimit = h, f, tab,
5327 rights, transformer, reducer)).fork();
5328 }
5329 U r = null;
5330 for (Node<K,V> p; (p = advance()) != null; ) {
5331 U u;
5332 if ((u = transformer.apply(p.key)) != null)
5333 r = (r == null) ? u : reducer.apply(r, u);
5334 }
5335 result = r;
5336 CountedCompleter<?> c;
5337 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5338 @SuppressWarnings("unchecked") MapReduceKeysTask<K,V,U>
5339 t = (MapReduceKeysTask<K,V,U>)c,
5340 s = t.rights;
5341 while (s != null) {
5342 U tr, sr;
5343 if ((sr = s.result) != null)
5344 t.result = (((tr = t.result) == null) ? sr :
5345 reducer.apply(tr, sr));
5346 s = t.rights = s.nextRight;
5347 }
5348 }
5349 }
5350 }
5351 }
5352
5353 @SuppressWarnings("serial")
5354 static final class MapReduceValuesTask<K,V,U>
5355 extends BulkTask<K,V,U> {
5356 final Fun<? super V, ? extends U> transformer;
5357 final BiFun<? super U, ? super U, ? extends U> reducer;
5358 U result;
5359 MapReduceValuesTask<K,V,U> rights, nextRight;
5360 MapReduceValuesTask
5361 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5362 MapReduceValuesTask<K,V,U> nextRight,
5363 Fun<? super V, ? extends U> transformer,
5364 BiFun<? super U, ? super U, ? extends U> reducer) {
5365 super(p, b, i, f, t); this.nextRight = nextRight;
5366 this.transformer = transformer;
5367 this.reducer = reducer;
5368 }
5369 public final U getRawResult() { return result; }
5370 public final void compute() {
5371 final Fun<? super V, ? extends U> transformer;
5372 final BiFun<? super U, ? super U, ? extends U> reducer;
5373 if ((transformer = this.transformer) != null &&
5374 (reducer = this.reducer) != null) {
5375 for (int i = baseIndex, f, h; batch > 0 &&
5376 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5377 addToPendingCount(1);
5378 (rights = new MapReduceValuesTask<K,V,U>
5379 (this, batch >>>= 1, baseLimit = h, f, tab,
5380 rights, transformer, reducer)).fork();
5381 }
5382 U r = null;
5383 for (Node<K,V> p; (p = advance()) != null; ) {
5384 U u;
5385 if ((u = transformer.apply(p.val)) != null)
5386 r = (r == null) ? u : reducer.apply(r, u);
5387 }
5388 result = r;
5389 CountedCompleter<?> c;
5390 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5391 @SuppressWarnings("unchecked") MapReduceValuesTask<K,V,U>
5392 t = (MapReduceValuesTask<K,V,U>)c,
5393 s = t.rights;
5394 while (s != null) {
5395 U tr, sr;
5396 if ((sr = s.result) != null)
5397 t.result = (((tr = t.result) == null) ? sr :
5398 reducer.apply(tr, sr));
5399 s = t.rights = s.nextRight;
5400 }
5401 }
5402 }
5403 }
5404 }
5405
5406 @SuppressWarnings("serial")
5407 static final class MapReduceEntriesTask<K,V,U>
5408 extends BulkTask<K,V,U> {
5409 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5410 final BiFun<? super U, ? super U, ? extends U> reducer;
5411 U result;
5412 MapReduceEntriesTask<K,V,U> rights, nextRight;
5413 MapReduceEntriesTask
5414 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5415 MapReduceEntriesTask<K,V,U> nextRight,
5416 Fun<Map.Entry<K,V>, ? extends U> transformer,
5417 BiFun<? super U, ? super U, ? extends U> reducer) {
5418 super(p, b, i, f, t); this.nextRight = nextRight;
5419 this.transformer = transformer;
5420 this.reducer = reducer;
5421 }
5422 public final U getRawResult() { return result; }
5423 public final void compute() {
5424 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5425 final BiFun<? super U, ? super U, ? extends U> reducer;
5426 if ((transformer = this.transformer) != null &&
5427 (reducer = this.reducer) != null) {
5428 for (int i = baseIndex, f, h; batch > 0 &&
5429 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5430 addToPendingCount(1);
5431 (rights = new MapReduceEntriesTask<K,V,U>
5432 (this, batch >>>= 1, baseLimit = h, f, tab,
5433 rights, transformer, reducer)).fork();
5434 }
5435 U r = null;
5436 for (Node<K,V> p; (p = advance()) != null; ) {
5437 U u;
5438 if ((u = transformer.apply(p)) != null)
5439 r = (r == null) ? u : reducer.apply(r, u);
5440 }
5441 result = r;
5442 CountedCompleter<?> c;
5443 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5444 @SuppressWarnings("unchecked") MapReduceEntriesTask<K,V,U>
5445 t = (MapReduceEntriesTask<K,V,U>)c,
5446 s = t.rights;
5447 while (s != null) {
5448 U tr, sr;
5449 if ((sr = s.result) != null)
5450 t.result = (((tr = t.result) == null) ? sr :
5451 reducer.apply(tr, sr));
5452 s = t.rights = s.nextRight;
5453 }
5454 }
5455 }
5456 }
5457 }
5458
5459 @SuppressWarnings("serial")
5460 static final class MapReduceMappingsTask<K,V,U>
5461 extends BulkTask<K,V,U> {
5462 final BiFun<? super K, ? super V, ? extends U> transformer;
5463 final BiFun<? super U, ? super U, ? extends U> reducer;
5464 U result;
5465 MapReduceMappingsTask<K,V,U> rights, nextRight;
5466 MapReduceMappingsTask
5467 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5468 MapReduceMappingsTask<K,V,U> nextRight,
5469 BiFun<? super K, ? super V, ? extends U> transformer,
5470 BiFun<? super U, ? super U, ? extends U> reducer) {
5471 super(p, b, i, f, t); this.nextRight = nextRight;
5472 this.transformer = transformer;
5473 this.reducer = reducer;
5474 }
5475 public final U getRawResult() { return result; }
5476 public final void compute() {
5477 final BiFun<? super K, ? super V, ? extends U> transformer;
5478 final BiFun<? super U, ? super U, ? extends U> reducer;
5479 if ((transformer = this.transformer) != null &&
5480 (reducer = this.reducer) != null) {
5481 for (int i = baseIndex, f, h; batch > 0 &&
5482 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5483 addToPendingCount(1);
5484 (rights = new MapReduceMappingsTask<K,V,U>
5485 (this, batch >>>= 1, baseLimit = h, f, tab,
5486 rights, transformer, reducer)).fork();
5487 }
5488 U r = null;
5489 for (Node<K,V> p; (p = advance()) != null; ) {
5490 U u;
5491 if ((u = transformer.apply(p.key, p.val)) != null)
5492 r = (r == null) ? u : reducer.apply(r, u);
5493 }
5494 result = r;
5495 CountedCompleter<?> c;
5496 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5497 @SuppressWarnings("unchecked") MapReduceMappingsTask<K,V,U>
5498 t = (MapReduceMappingsTask<K,V,U>)c,
5499 s = t.rights;
5500 while (s != null) {
5501 U tr, sr;
5502 if ((sr = s.result) != null)
5503 t.result = (((tr = t.result) == null) ? sr :
5504 reducer.apply(tr, sr));
5505 s = t.rights = s.nextRight;
5506 }
5507 }
5508 }
5509 }
5510 }
5511
5512 @SuppressWarnings("serial")
5513 static final class MapReduceKeysToDoubleTask<K,V>
5514 extends BulkTask<K,V,Double> {
5515 final ObjectToDouble<? super K> transformer;
5516 final DoubleByDoubleToDouble reducer;
5517 final double basis;
5518 double result;
5519 MapReduceKeysToDoubleTask<K,V> rights, nextRight;
5520 MapReduceKeysToDoubleTask
5521 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5522 MapReduceKeysToDoubleTask<K,V> nextRight,
5523 ObjectToDouble<? super K> transformer,
5524 double basis,
5525 DoubleByDoubleToDouble reducer) {
5526 super(p, b, i, f, t); this.nextRight = nextRight;
5527 this.transformer = transformer;
5528 this.basis = basis; this.reducer = reducer;
5529 }
5530 public final Double getRawResult() { return result; }
5531 public final void compute() {
5532 final ObjectToDouble<? super K> transformer;
5533 final DoubleByDoubleToDouble reducer;
5534 if ((transformer = this.transformer) != null &&
5535 (reducer = this.reducer) != null) {
5536 double r = this.basis;
5537 for (int i = baseIndex, f, h; batch > 0 &&
5538 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5539 addToPendingCount(1);
5540 (rights = new MapReduceKeysToDoubleTask<K,V>
5541 (this, batch >>>= 1, baseLimit = h, f, tab,
5542 rights, transformer, r, reducer)).fork();
5543 }
5544 for (Node<K,V> p; (p = advance()) != null; )
5545 r = reducer.apply(r, transformer.apply(p.key));
5546 result = r;
5547 CountedCompleter<?> c;
5548 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5549 @SuppressWarnings("unchecked") MapReduceKeysToDoubleTask<K,V>
5550 t = (MapReduceKeysToDoubleTask<K,V>)c,
5551 s = t.rights;
5552 while (s != null) {
5553 t.result = reducer.apply(t.result, s.result);
5554 s = t.rights = s.nextRight;
5555 }
5556 }
5557 }
5558 }
5559 }
5560
5561 @SuppressWarnings("serial")
5562 static final class MapReduceValuesToDoubleTask<K,V>
5563 extends BulkTask<K,V,Double> {
5564 final ObjectToDouble<? super V> transformer;
5565 final DoubleByDoubleToDouble reducer;
5566 final double basis;
5567 double result;
5568 MapReduceValuesToDoubleTask<K,V> rights, nextRight;
5569 MapReduceValuesToDoubleTask
5570 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5571 MapReduceValuesToDoubleTask<K,V> nextRight,
5572 ObjectToDouble<? super V> transformer,
5573 double basis,
5574 DoubleByDoubleToDouble reducer) {
5575 super(p, b, i, f, t); this.nextRight = nextRight;
5576 this.transformer = transformer;
5577 this.basis = basis; this.reducer = reducer;
5578 }
5579 public final Double getRawResult() { return result; }
5580 public final void compute() {
5581 final ObjectToDouble<? super V> transformer;
5582 final DoubleByDoubleToDouble reducer;
5583 if ((transformer = this.transformer) != null &&
5584 (reducer = this.reducer) != null) {
5585 double r = this.basis;
5586 for (int i = baseIndex, f, h; batch > 0 &&
5587 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5588 addToPendingCount(1);
5589 (rights = new MapReduceValuesToDoubleTask<K,V>
5590 (this, batch >>>= 1, baseLimit = h, f, tab,
5591 rights, transformer, r, reducer)).fork();
5592 }
5593 for (Node<K,V> p; (p = advance()) != null; )
5594 r = reducer.apply(r, transformer.apply(p.val));
5595 result = r;
5596 CountedCompleter<?> c;
5597 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5598 @SuppressWarnings("unchecked") MapReduceValuesToDoubleTask<K,V>
5599 t = (MapReduceValuesToDoubleTask<K,V>)c,
5600 s = t.rights;
5601 while (s != null) {
5602 t.result = reducer.apply(t.result, s.result);
5603 s = t.rights = s.nextRight;
5604 }
5605 }
5606 }
5607 }
5608 }
5609
5610 @SuppressWarnings("serial")
5611 static final class MapReduceEntriesToDoubleTask<K,V>
5612 extends BulkTask<K,V,Double> {
5613 final ObjectToDouble<Map.Entry<K,V>> transformer;
5614 final DoubleByDoubleToDouble reducer;
5615 final double basis;
5616 double result;
5617 MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
5618 MapReduceEntriesToDoubleTask
5619 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5620 MapReduceEntriesToDoubleTask<K,V> nextRight,
5621 ObjectToDouble<Map.Entry<K,V>> transformer,
5622 double basis,
5623 DoubleByDoubleToDouble reducer) {
5624 super(p, b, i, f, t); this.nextRight = nextRight;
5625 this.transformer = transformer;
5626 this.basis = basis; this.reducer = reducer;
5627 }
5628 public final Double getRawResult() { return result; }
5629 public final void compute() {
5630 final ObjectToDouble<Map.Entry<K,V>> transformer;
5631 final DoubleByDoubleToDouble reducer;
5632 if ((transformer = this.transformer) != null &&
5633 (reducer = this.reducer) != null) {
5634 double r = this.basis;
5635 for (int i = baseIndex, f, h; batch > 0 &&
5636 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5637 addToPendingCount(1);
5638 (rights = new MapReduceEntriesToDoubleTask<K,V>
5639 (this, batch >>>= 1, baseLimit = h, f, tab,
5640 rights, transformer, r, reducer)).fork();
5641 }
5642 for (Node<K,V> p; (p = advance()) != null; )
5643 r = reducer.apply(r, transformer.apply(p));
5644 result = r;
5645 CountedCompleter<?> c;
5646 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5647 @SuppressWarnings("unchecked") MapReduceEntriesToDoubleTask<K,V>
5648 t = (MapReduceEntriesToDoubleTask<K,V>)c,
5649 s = t.rights;
5650 while (s != null) {
5651 t.result = reducer.apply(t.result, s.result);
5652 s = t.rights = s.nextRight;
5653 }
5654 }
5655 }
5656 }
5657 }
5658
5659 @SuppressWarnings("serial")
5660 static final class MapReduceMappingsToDoubleTask<K,V>
5661 extends BulkTask<K,V,Double> {
5662 final ObjectByObjectToDouble<? super K, ? super V> transformer;
5663 final DoubleByDoubleToDouble reducer;
5664 final double basis;
5665 double result;
5666 MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
5667 MapReduceMappingsToDoubleTask
5668 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5669 MapReduceMappingsToDoubleTask<K,V> nextRight,
5670 ObjectByObjectToDouble<? super K, ? super V> transformer,
5671 double basis,
5672 DoubleByDoubleToDouble reducer) {
5673 super(p, b, i, f, t); this.nextRight = nextRight;
5674 this.transformer = transformer;
5675 this.basis = basis; this.reducer = reducer;
5676 }
5677 public final Double getRawResult() { return result; }
5678 public final void compute() {
5679 final ObjectByObjectToDouble<? super K, ? super V> transformer;
5680 final DoubleByDoubleToDouble reducer;
5681 if ((transformer = this.transformer) != null &&
5682 (reducer = this.reducer) != null) {
5683 double r = this.basis;
5684 for (int i = baseIndex, f, h; batch > 0 &&
5685 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5686 addToPendingCount(1);
5687 (rights = new MapReduceMappingsToDoubleTask<K,V>
5688 (this, batch >>>= 1, baseLimit = h, f, tab,
5689 rights, transformer, r, reducer)).fork();
5690 }
5691 for (Node<K,V> p; (p = advance()) != null; )
5692 r = reducer.apply(r, transformer.apply(p.key, p.val));
5693 result = r;
5694 CountedCompleter<?> c;
5695 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5696 @SuppressWarnings("unchecked") MapReduceMappingsToDoubleTask<K,V>
5697 t = (MapReduceMappingsToDoubleTask<K,V>)c,
5698 s = t.rights;
5699 while (s != null) {
5700 t.result = reducer.apply(t.result, s.result);
5701 s = t.rights = s.nextRight;
5702 }
5703 }
5704 }
5705 }
5706 }
5707
5708 @SuppressWarnings("serial")
5709 static final class MapReduceKeysToLongTask<K,V>
5710 extends BulkTask<K,V,Long> {
5711 final ObjectToLong<? super K> transformer;
5712 final LongByLongToLong reducer;
5713 final long basis;
5714 long result;
5715 MapReduceKeysToLongTask<K,V> rights, nextRight;
5716 MapReduceKeysToLongTask
5717 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5718 MapReduceKeysToLongTask<K,V> nextRight,
5719 ObjectToLong<? super K> transformer,
5720 long basis,
5721 LongByLongToLong reducer) {
5722 super(p, b, i, f, t); this.nextRight = nextRight;
5723 this.transformer = transformer;
5724 this.basis = basis; this.reducer = reducer;
5725 }
5726 public final Long getRawResult() { return result; }
5727 public final void compute() {
5728 final ObjectToLong<? super K> transformer;
5729 final LongByLongToLong reducer;
5730 if ((transformer = this.transformer) != null &&
5731 (reducer = this.reducer) != null) {
5732 long r = this.basis;
5733 for (int i = baseIndex, f, h; batch > 0 &&
5734 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5735 addToPendingCount(1);
5736 (rights = new MapReduceKeysToLongTask<K,V>
5737 (this, batch >>>= 1, baseLimit = h, f, tab,
5738 rights, transformer, r, reducer)).fork();
5739 }
5740 for (Node<K,V> p; (p = advance()) != null; )
5741 r = reducer.apply(r, transformer.apply(p.key));
5742 result = r;
5743 CountedCompleter<?> c;
5744 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5745 @SuppressWarnings("unchecked") MapReduceKeysToLongTask<K,V>
5746 t = (MapReduceKeysToLongTask<K,V>)c,
5747 s = t.rights;
5748 while (s != null) {
5749 t.result = reducer.apply(t.result, s.result);
5750 s = t.rights = s.nextRight;
5751 }
5752 }
5753 }
5754 }
5755 }
5756
5757 @SuppressWarnings("serial")
5758 static final class MapReduceValuesToLongTask<K,V>
5759 extends BulkTask<K,V,Long> {
5760 final ObjectToLong<? super V> transformer;
5761 final LongByLongToLong reducer;
5762 final long basis;
5763 long result;
5764 MapReduceValuesToLongTask<K,V> rights, nextRight;
5765 MapReduceValuesToLongTask
5766 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5767 MapReduceValuesToLongTask<K,V> nextRight,
5768 ObjectToLong<? super V> transformer,
5769 long basis,
5770 LongByLongToLong reducer) {
5771 super(p, b, i, f, t); this.nextRight = nextRight;
5772 this.transformer = transformer;
5773 this.basis = basis; this.reducer = reducer;
5774 }
5775 public final Long getRawResult() { return result; }
5776 public final void compute() {
5777 final ObjectToLong<? super V> transformer;
5778 final LongByLongToLong reducer;
5779 if ((transformer = this.transformer) != null &&
5780 (reducer = this.reducer) != null) {
5781 long r = this.basis;
5782 for (int i = baseIndex, f, h; batch > 0 &&
5783 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5784 addToPendingCount(1);
5785 (rights = new MapReduceValuesToLongTask<K,V>
5786 (this, batch >>>= 1, baseLimit = h, f, tab,
5787 rights, transformer, r, reducer)).fork();
5788 }
5789 for (Node<K,V> p; (p = advance()) != null; )
5790 r = reducer.apply(r, transformer.apply(p.val));
5791 result = r;
5792 CountedCompleter<?> c;
5793 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5794 @SuppressWarnings("unchecked") MapReduceValuesToLongTask<K,V>
5795 t = (MapReduceValuesToLongTask<K,V>)c,
5796 s = t.rights;
5797 while (s != null) {
5798 t.result = reducer.apply(t.result, s.result);
5799 s = t.rights = s.nextRight;
5800 }
5801 }
5802 }
5803 }
5804 }
5805
5806 @SuppressWarnings("serial")
5807 static final class MapReduceEntriesToLongTask<K,V>
5808 extends BulkTask<K,V,Long> {
5809 final ObjectToLong<Map.Entry<K,V>> transformer;
5810 final LongByLongToLong reducer;
5811 final long basis;
5812 long result;
5813 MapReduceEntriesToLongTask<K,V> rights, nextRight;
5814 MapReduceEntriesToLongTask
5815 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5816 MapReduceEntriesToLongTask<K,V> nextRight,
5817 ObjectToLong<Map.Entry<K,V>> transformer,
5818 long basis,
5819 LongByLongToLong reducer) {
5820 super(p, b, i, f, t); this.nextRight = nextRight;
5821 this.transformer = transformer;
5822 this.basis = basis; this.reducer = reducer;
5823 }
5824 public final Long getRawResult() { return result; }
5825 public final void compute() {
5826 final ObjectToLong<Map.Entry<K,V>> transformer;
5827 final LongByLongToLong reducer;
5828 if ((transformer = this.transformer) != null &&
5829 (reducer = this.reducer) != null) {
5830 long r = this.basis;
5831 for (int i = baseIndex, f, h; batch > 0 &&
5832 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5833 addToPendingCount(1);
5834 (rights = new MapReduceEntriesToLongTask<K,V>
5835 (this, batch >>>= 1, baseLimit = h, f, tab,
5836 rights, transformer, r, reducer)).fork();
5837 }
5838 for (Node<K,V> p; (p = advance()) != null; )
5839 r = reducer.apply(r, transformer.apply(p));
5840 result = r;
5841 CountedCompleter<?> c;
5842 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5843 @SuppressWarnings("unchecked") MapReduceEntriesToLongTask<K,V>
5844 t = (MapReduceEntriesToLongTask<K,V>)c,
5845 s = t.rights;
5846 while (s != null) {
5847 t.result = reducer.apply(t.result, s.result);
5848 s = t.rights = s.nextRight;
5849 }
5850 }
5851 }
5852 }
5853 }
5854
5855 @SuppressWarnings("serial")
5856 static final class MapReduceMappingsToLongTask<K,V>
5857 extends BulkTask<K,V,Long> {
5858 final ObjectByObjectToLong<? super K, ? super V> transformer;
5859 final LongByLongToLong reducer;
5860 final long basis;
5861 long result;
5862 MapReduceMappingsToLongTask<K,V> rights, nextRight;
5863 MapReduceMappingsToLongTask
5864 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5865 MapReduceMappingsToLongTask<K,V> nextRight,
5866 ObjectByObjectToLong<? super K, ? super V> transformer,
5867 long basis,
5868 LongByLongToLong reducer) {
5869 super(p, b, i, f, t); this.nextRight = nextRight;
5870 this.transformer = transformer;
5871 this.basis = basis; this.reducer = reducer;
5872 }
5873 public final Long getRawResult() { return result; }
5874 public final void compute() {
5875 final ObjectByObjectToLong<? super K, ? super V> transformer;
5876 final LongByLongToLong reducer;
5877 if ((transformer = this.transformer) != null &&
5878 (reducer = this.reducer) != null) {
5879 long r = this.basis;
5880 for (int i = baseIndex, f, h; batch > 0 &&
5881 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5882 addToPendingCount(1);
5883 (rights = new MapReduceMappingsToLongTask<K,V>
5884 (this, batch >>>= 1, baseLimit = h, f, tab,
5885 rights, transformer, r, reducer)).fork();
5886 }
5887 for (Node<K,V> p; (p = advance()) != null; )
5888 r = reducer.apply(r, transformer.apply(p.key, p.val));
5889 result = r;
5890 CountedCompleter<?> c;
5891 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5892 @SuppressWarnings("unchecked") MapReduceMappingsToLongTask<K,V>
5893 t = (MapReduceMappingsToLongTask<K,V>)c,
5894 s = t.rights;
5895 while (s != null) {
5896 t.result = reducer.apply(t.result, s.result);
5897 s = t.rights = s.nextRight;
5898 }
5899 }
5900 }
5901 }
5902 }
5903
5904 @SuppressWarnings("serial")
5905 static final class MapReduceKeysToIntTask<K,V>
5906 extends BulkTask<K,V,Integer> {
5907 final ObjectToInt<? super K> transformer;
5908 final IntByIntToInt reducer;
5909 final int basis;
5910 int result;
5911 MapReduceKeysToIntTask<K,V> rights, nextRight;
5912 MapReduceKeysToIntTask
5913 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5914 MapReduceKeysToIntTask<K,V> nextRight,
5915 ObjectToInt<? super K> transformer,
5916 int basis,
5917 IntByIntToInt reducer) {
5918 super(p, b, i, f, t); this.nextRight = nextRight;
5919 this.transformer = transformer;
5920 this.basis = basis; this.reducer = reducer;
5921 }
5922 public final Integer getRawResult() { return result; }
5923 public final void compute() {
5924 final ObjectToInt<? super K> transformer;
5925 final IntByIntToInt reducer;
5926 if ((transformer = this.transformer) != null &&
5927 (reducer = this.reducer) != null) {
5928 int r = this.basis;
5929 for (int i = baseIndex, f, h; batch > 0 &&
5930 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5931 addToPendingCount(1);
5932 (rights = new MapReduceKeysToIntTask<K,V>
5933 (this, batch >>>= 1, baseLimit = h, f, tab,
5934 rights, transformer, r, reducer)).fork();
5935 }
5936 for (Node<K,V> p; (p = advance()) != null; )
5937 r = reducer.apply(r, transformer.apply(p.key));
5938 result = r;
5939 CountedCompleter<?> c;
5940 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5941 @SuppressWarnings("unchecked") MapReduceKeysToIntTask<K,V>
5942 t = (MapReduceKeysToIntTask<K,V>)c,
5943 s = t.rights;
5944 while (s != null) {
5945 t.result = reducer.apply(t.result, s.result);
5946 s = t.rights = s.nextRight;
5947 }
5948 }
5949 }
5950 }
5951 }
5952
5953 @SuppressWarnings("serial")
5954 static final class MapReduceValuesToIntTask<K,V>
5955 extends BulkTask<K,V,Integer> {
5956 final ObjectToInt<? super V> transformer;
5957 final IntByIntToInt reducer;
5958 final int basis;
5959 int result;
5960 MapReduceValuesToIntTask<K,V> rights, nextRight;
5961 MapReduceValuesToIntTask
5962 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5963 MapReduceValuesToIntTask<K,V> nextRight,
5964 ObjectToInt<? super V> transformer,
5965 int basis,
5966 IntByIntToInt reducer) {
5967 super(p, b, i, f, t); this.nextRight = nextRight;
5968 this.transformer = transformer;
5969 this.basis = basis; this.reducer = reducer;
5970 }
5971 public final Integer getRawResult() { return result; }
5972 public final void compute() {
5973 final ObjectToInt<? super V> transformer;
5974 final IntByIntToInt reducer;
5975 if ((transformer = this.transformer) != null &&
5976 (reducer = this.reducer) != null) {
5977 int r = this.basis;
5978 for (int i = baseIndex, f, h; batch > 0 &&
5979 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5980 addToPendingCount(1);
5981 (rights = new MapReduceValuesToIntTask<K,V>
5982 (this, batch >>>= 1, baseLimit = h, f, tab,
5983 rights, transformer, r, reducer)).fork();
5984 }
5985 for (Node<K,V> p; (p = advance()) != null; )
5986 r = reducer.apply(r, transformer.apply(p.val));
5987 result = r;
5988 CountedCompleter<?> c;
5989 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5990 @SuppressWarnings("unchecked") MapReduceValuesToIntTask<K,V>
5991 t = (MapReduceValuesToIntTask<K,V>)c,
5992 s = t.rights;
5993 while (s != null) {
5994 t.result = reducer.apply(t.result, s.result);
5995 s = t.rights = s.nextRight;
5996 }
5997 }
5998 }
5999 }
6000 }
6001
6002 @SuppressWarnings("serial")
6003 static final class MapReduceEntriesToIntTask<K,V>
6004 extends BulkTask<K,V,Integer> {
6005 final ObjectToInt<Map.Entry<K,V>> transformer;
6006 final IntByIntToInt reducer;
6007 final int basis;
6008 int result;
6009 MapReduceEntriesToIntTask<K,V> rights, nextRight;
6010 MapReduceEntriesToIntTask
6011 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6012 MapReduceEntriesToIntTask<K,V> nextRight,
6013 ObjectToInt<Map.Entry<K,V>> transformer,
6014 int basis,
6015 IntByIntToInt reducer) {
6016 super(p, b, i, f, t); this.nextRight = nextRight;
6017 this.transformer = transformer;
6018 this.basis = basis; this.reducer = reducer;
6019 }
6020 public final Integer getRawResult() { return result; }
6021 public final void compute() {
6022 final ObjectToInt<Map.Entry<K,V>> transformer;
6023 final IntByIntToInt reducer;
6024 if ((transformer = this.transformer) != null &&
6025 (reducer = this.reducer) != null) {
6026 int r = this.basis;
6027 for (int i = baseIndex, f, h; batch > 0 &&
6028 (h = ((f = baseLimit) + i) >>> 1) > i;) {
6029 addToPendingCount(1);
6030 (rights = new MapReduceEntriesToIntTask<K,V>
6031 (this, batch >>>= 1, baseLimit = h, f, tab,
6032 rights, transformer, r, reducer)).fork();
6033 }
6034 for (Node<K,V> p; (p = advance()) != null; )
6035 r = reducer.apply(r, transformer.apply(p));
6036 result = r;
6037 CountedCompleter<?> c;
6038 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6039 @SuppressWarnings("unchecked") MapReduceEntriesToIntTask<K,V>
6040 t = (MapReduceEntriesToIntTask<K,V>)c,
6041 s = t.rights;
6042 while (s != null) {
6043 t.result = reducer.apply(t.result, s.result);
6044 s = t.rights = s.nextRight;
6045 }
6046 }
6047 }
6048 }
6049 }
6050
6051 @SuppressWarnings("serial")
6052 static final class MapReduceMappingsToIntTask<K,V>
6053 extends BulkTask<K,V,Integer> {
6054 final ObjectByObjectToInt<? super K, ? super V> transformer;
6055 final IntByIntToInt reducer;
6056 final int basis;
6057 int result;
6058 MapReduceMappingsToIntTask<K,V> rights, nextRight;
6059 MapReduceMappingsToIntTask
6060 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6061 MapReduceMappingsToIntTask<K,V> nextRight,
6062 ObjectByObjectToInt<? super K, ? super V> transformer,
6063 int basis,
6064 IntByIntToInt reducer) {
6065 super(p, b, i, f, t); this.nextRight = nextRight;
6066 this.transformer = transformer;
6067 this.basis = basis; this.reducer = reducer;
6068 }
6069 public final Integer getRawResult() { return result; }
6070 public final void compute() {
6071 final ObjectByObjectToInt<? super K, ? super V> transformer;
6072 final IntByIntToInt reducer;
6073 if ((transformer = this.transformer) != null &&
6074 (reducer = this.reducer) != null) {
6075 int r = this.basis;
6076 for (int i = baseIndex, f, h; batch > 0 &&
6077 (h = ((f = baseLimit) + i) >>> 1) > i;) {
6078 addToPendingCount(1);
6079 (rights = new MapReduceMappingsToIntTask<K,V>
6080 (this, batch >>>= 1, baseLimit = h, f, tab,
6081 rights, transformer, r, reducer)).fork();
6082 }
6083 for (Node<K,V> p; (p = advance()) != null; )
6084 r = reducer.apply(r, transformer.apply(p.key, p.val));
6085 result = r;
6086 CountedCompleter<?> c;
6087 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6088 @SuppressWarnings("unchecked") MapReduceMappingsToIntTask<K,V>
6089 t = (MapReduceMappingsToIntTask<K,V>)c,
6090 s = t.rights;
6091 while (s != null) {
6092 t.result = reducer.apply(t.result, s.result);
6093 s = t.rights = s.nextRight;
6094 }
6095 }
6096 }
6097 }
6098 }
6099
6100 /* ---------------- Counters -------------- */
6101
6102 // Adapted from LongAdder and Striped64.
6103 // See their internal docs for explanation.
6104
6105 // A padded cell for distributing counts
6106 static final class CounterCell {
6107 volatile long p0, p1, p2, p3, p4, p5, p6;
6108 volatile long value;
6109 volatile long q0, q1, q2, q3, q4, q5, q6;
6110 CounterCell(long x) { value = x; }
6111 }
6112
6113 /**
6114 * Holder for the thread-local hash code determining which
6115 * CounterCell to use. The code is initialized via the
6116 * counterHashCodeGenerator, but may be moved upon collisions.
6117 */
6118 static final class CounterHashCode {
6119 int code;
6120 }
6121
6122 /**
6123 * Generates initial value for per-thread CounterHashCodes.
6124 */
6125 static final AtomicInteger counterHashCodeGenerator = new AtomicInteger();
6126
6127 /**
6128 * Increment for counterHashCodeGenerator. See class ThreadLocal
6129 * for explanation.
6130 */
6131 static final int SEED_INCREMENT = 0x61c88647;
6132
6133 /**
6134 * Per-thread counter hash codes. Shared across all instances.
6135 */
6136 static final ThreadLocal<CounterHashCode> threadCounterHashCode =
6137 new ThreadLocal<CounterHashCode>();
6138
6139
6140 final long sumCount() {
6141 CounterCell[] as = counterCells; CounterCell a;
6142 long sum = baseCount;
6143 if (as != null) {
6144 for (int i = 0; i < as.length; ++i) {
6145 if ((a = as[i]) != null)
6146 sum += a.value;
6147 }
6148 }
6149 return sum;
6150 }
6151
6152 // See LongAdder version for explanation
6153 private final void fullAddCount(long x, CounterHashCode hc,
6154 boolean wasUncontended) {
6155 int h;
6156 if (hc == null) {
6157 hc = new CounterHashCode();
6158 int s = counterHashCodeGenerator.addAndGet(SEED_INCREMENT);
6159 h = hc.code = (s == 0) ? 1 : s; // Avoid zero
6160 threadCounterHashCode.set(hc);
6161 }
6162 else
6163 h = hc.code;
6164 boolean collide = false; // True if last slot nonempty
6165 for (;;) {
6166 CounterCell[] as; CounterCell a; int n; long v;
6167 if ((as = counterCells) != null && (n = as.length) > 0) {
6168 if ((a = as[(n - 1) & h]) == null) {
6169 if (cellsBusy == 0) { // Try to attach new Cell
6170 CounterCell r = new CounterCell(x); // Optimistic create
6171 if (cellsBusy == 0 &&
6172 U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
6173 boolean created = false;
6174 try { // Recheck under lock
6175 CounterCell[] rs; int m, j;
6176 if ((rs = counterCells) != null &&
6177 (m = rs.length) > 0 &&
6178 rs[j = (m - 1) & h] == null) {
6179 rs[j] = r;
6180 created = true;
6181 }
6182 } finally {
6183 cellsBusy = 0;
6184 }
6185 if (created)
6186 break;
6187 continue; // Slot is now non-empty
6188 }
6189 }
6190 collide = false;
6191 }
6192 else if (!wasUncontended) // CAS already known to fail
6193 wasUncontended = true; // Continue after rehash
6194 else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))
6195 break;
6196 else if (counterCells != as || n >= NCPU)
6197 collide = false; // At max size or stale
6198 else if (!collide)
6199 collide = true;
6200 else if (cellsBusy == 0 &&
6201 U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
6202 try {
6203 if (counterCells == as) {// Expand table unless stale
6204 CounterCell[] rs = new CounterCell[n << 1];
6205 for (int i = 0; i < n; ++i)
6206 rs[i] = as[i];
6207 counterCells = rs;
6208 }
6209 } finally {
6210 cellsBusy = 0;
6211 }
6212 collide = false;
6213 continue; // Retry with expanded table
6214 }
6215 h ^= h << 13; // Rehash
6216 h ^= h >>> 17;
6217 h ^= h << 5;
6218 }
6219 else if (cellsBusy == 0 && counterCells == as &&
6220 U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
6221 boolean init = false;
6222 try { // Initialize table
6223 if (counterCells == as) {
6224 CounterCell[] rs = new CounterCell[2];
6225 rs[h & 1] = new CounterCell(x);
6226 counterCells = rs;
6227 init = true;
6228 }
6229 } finally {
6230 cellsBusy = 0;
6231 }
6232 if (init)
6233 break;
6234 }
6235 else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x))
6236 break; // Fall back on using base
6237 }
6238 hc.code = h; // Record index for next time
6239 }
6240
6241 // Unsafe mechanics
6242 private static final sun.misc.Unsafe U;
6243 private static final long SIZECTL;
6244 private static final long TRANSFERINDEX;
6245 private static final long BASECOUNT;
6246 private static final long CELLSBUSY;
6247 private static final long CELLVALUE;
6248 private static final long ABASE;
6249 private static final int ASHIFT;
6250
6251 static {
6252 try {
6253 U = getUnsafe();
6254 Class<?> k = ConcurrentHashMapV8.class;
6255 SIZECTL = U.objectFieldOffset
6256 (k.getDeclaredField("sizeCtl"));
6257 TRANSFERINDEX = U.objectFieldOffset
6258 (k.getDeclaredField("transferIndex"));
6259 BASECOUNT = U.objectFieldOffset
6260 (k.getDeclaredField("baseCount"));
6261 CELLSBUSY = U.objectFieldOffset
6262 (k.getDeclaredField("cellsBusy"));
6263 Class<?> ck = CounterCell.class;
6264 CELLVALUE = U.objectFieldOffset
6265 (ck.getDeclaredField("value"));
6266 Class<?> ak = Node[].class;
6267 ABASE = U.arrayBaseOffset(ak);
6268 int scale = U.arrayIndexScale(ak);
6269 if ((scale & (scale - 1)) != 0)
6270 throw new Error("data type scale not a power of two");
6271 ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
6272 } catch (Exception e) {
6273 throw new Error(e);
6274 }
6275 }
6276
6277 /**
6278 * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
6279 * Replace with a simple call to Unsafe.getUnsafe when integrating
6280 * into a jdk.
6281 *
6282 * @return a sun.misc.Unsafe
6283 */
6284 private static sun.misc.Unsafe getUnsafe() {
6285 try {
6286 return sun.misc.Unsafe.getUnsafe();
6287 } catch (SecurityException tryReflectionInstead) {}
6288 try {
6289 return java.security.AccessController.doPrivileged
6290 (new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
6291 public sun.misc.Unsafe run() throws Exception {
6292 Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
6293 for (java.lang.reflect.Field f : k.getDeclaredFields()) {
6294 f.setAccessible(true);
6295 Object x = f.get(null);
6296 if (k.isInstance(x))
6297 return k.cast(x);
6298 }
6299 throw new NoSuchFieldError("the Unsafe");
6300 }});
6301 } catch (java.security.PrivilegedActionException e) {
6302 throw new RuntimeException("Could not initialize intrinsics",
6303 e.getCause());
6304 }
6305 }
6306 }