ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/jsr166e/ConcurrentHashMapV8.java
Revision: 1.116
Committed: Wed Sep 11 14:53:38 2013 UTC (10 years, 7 months ago) by dl
Branch: MAIN
Changes since 1.115: +110 -72 lines
Log Message:
Apply jdk8 traversal improvements

File Contents

# Content
1 /*
2 * Written by Doug Lea with assistance from members of JCP JSR-166
3 * Expert Group and released to the public domain, as explained at
4 * http://creativecommons.org/publicdomain/zero/1.0/
5 */
6
7 package jsr166e;
8
9 import jsr166e.ForkJoinPool;
10
11 import java.io.ObjectStreamField;
12 import java.io.Serializable;
13 import java.lang.reflect.ParameterizedType;
14 import java.lang.reflect.Type;
15 import java.util.AbstractMap;
16 import java.util.Arrays;
17 import java.util.Collection;
18 import java.util.Comparator;
19 import java.util.ConcurrentModificationException;
20 import java.util.Enumeration;
21 import java.util.HashMap;
22 import java.util.Hashtable;
23 import java.util.Iterator;
24 import java.util.Map;
25 import java.util.NoSuchElementException;
26 import java.util.Set;
27 import java.util.concurrent.ConcurrentMap;
28 import java.util.concurrent.atomic.AtomicReference;
29 import java.util.concurrent.atomic.AtomicInteger;
30 import java.util.concurrent.locks.LockSupport;
31 import java.util.concurrent.locks.ReentrantLock;
32
33 /**
34 * A hash table supporting full concurrency of retrievals and
35 * high expected concurrency for updates. This class obeys the
36 * same functional specification as {@link java.util.Hashtable}, and
37 * includes versions of methods corresponding to each method of
38 * {@code Hashtable}. However, even though all operations are
39 * thread-safe, retrieval operations do <em>not</em> entail locking,
40 * and there is <em>not</em> any support for locking the entire table
41 * in a way that prevents all access. This class is fully
42 * interoperable with {@code Hashtable} in programs that rely on its
43 * thread safety but not on its synchronization details.
44 *
45 * <p>Retrieval operations (including {@code get}) generally do not
46 * block, so may overlap with update operations (including {@code put}
47 * and {@code remove}). Retrievals reflect the results of the most
48 * recently <em>completed</em> update operations holding upon their
49 * onset. (More formally, an update operation for a given key bears a
50 * <em>happens-before</em> relation with any (non-null) retrieval for
51 * that key reporting the updated value.) For aggregate operations
52 * such as {@code putAll} and {@code clear}, concurrent retrievals may
53 * reflect insertion or removal of only some entries. Similarly,
54 * Iterators and Enumerations return elements reflecting the state of
55 * the hash table at some point at or since the creation of the
56 * iterator/enumeration. They do <em>not</em> throw {@link
57 * ConcurrentModificationException}. However, iterators are designed
58 * to be used by only one thread at a time. Bear in mind that the
59 * results of aggregate status methods including {@code size}, {@code
60 * isEmpty}, and {@code containsValue} are typically useful only when
61 * a map is not undergoing concurrent updates in other threads.
62 * Otherwise the results of these methods reflect transient states
63 * that may be adequate for monitoring or estimation purposes, but not
64 * for program control.
65 *
66 * <p>The table is dynamically expanded when there are too many
67 * collisions (i.e., keys that have distinct hash codes but fall into
68 * the same slot modulo the table size), with the expected average
69 * effect of maintaining roughly two bins per mapping (corresponding
70 * to a 0.75 load factor threshold for resizing). There may be much
71 * variance around this average as mappings are added and removed, but
72 * overall, this maintains a commonly accepted time/space tradeoff for
73 * hash tables. However, resizing this or any other kind of hash
74 * table may be a relatively slow operation. When possible, it is a
75 * good idea to provide a size estimate as an optional {@code
76 * initialCapacity} constructor argument. An additional optional
77 * {@code loadFactor} constructor argument provides a further means of
78 * customizing initial table capacity by specifying the table density
79 * to be used in calculating the amount of space to allocate for the
80 * given number of elements. Also, for compatibility with previous
81 * versions of this class, constructors may optionally specify an
82 * expected {@code concurrencyLevel} as an additional hint for
83 * internal sizing. Note that using many keys with exactly the same
84 * {@code hashCode()} is a sure way to slow down performance of any
85 * hash table. To ameliorate impact, when keys are {@link Comparable},
86 * this class may use comparison order among keys to help break ties.
87 *
88 * <p>A {@link Set} projection of a ConcurrentHashMapV8 may be created
89 * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
90 * (using {@link #keySet(Object)} when only keys are of interest, and the
91 * mapped values are (perhaps transiently) not used or all take the
92 * same mapping value.
93 *
94 * <p>This class and its views and iterators implement all of the
95 * <em>optional</em> methods of the {@link Map} and {@link Iterator}
96 * interfaces.
97 *
98 * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
99 * does <em>not</em> allow {@code null} to be used as a key or value.
100 *
101 * <p>ConcurrentHashMapV8s support a set of sequential and parallel bulk
102 * operations that are designed
103 * to be safely, and often sensibly, applied even with maps that are
104 * being concurrently updated by other threads; for example, when
105 * computing a snapshot summary of the values in a shared registry.
106 * There are three kinds of operation, each with four forms, accepting
107 * functions with Keys, Values, Entries, and (Key, Value) arguments
108 * and/or return values. Because the elements of a ConcurrentHashMapV8
109 * are not ordered in any particular way, and may be processed in
110 * different orders in different parallel executions, the correctness
111 * of supplied functions should not depend on any ordering, or on any
112 * other objects or values that may transiently change while
113 * computation is in progress; and except for forEach actions, should
114 * ideally be side-effect-free. Bulk operations on {@link java.util.Map.Entry}
115 * objects do not support method {@code setValue}.
116 *
117 * <ul>
118 * <li> forEach: Perform a given action on each element.
119 * A variant form applies a given transformation on each element
120 * before performing the action.</li>
121 *
122 * <li> search: Return the first available non-null result of
123 * applying a given function on each element; skipping further
124 * search when a result is found.</li>
125 *
126 * <li> reduce: Accumulate each element. The supplied reduction
127 * function cannot rely on ordering (more formally, it should be
128 * both associative and commutative). There are five variants:
129 *
130 * <ul>
131 *
132 * <li> Plain reductions. (There is not a form of this method for
133 * (key, value) function arguments since there is no corresponding
134 * return type.)</li>
135 *
136 * <li> Mapped reductions that accumulate the results of a given
137 * function applied to each element.</li>
138 *
139 * <li> Reductions to scalar doubles, longs, and ints, using a
140 * given basis value.</li>
141 *
142 * </ul>
143 * </li>
144 * </ul>
145 *
146 * <p>These bulk operations accept a {@code parallelismThreshold}
147 * argument. Methods proceed sequentially if the current map size is
148 * estimated to be less than the given threshold. Using a value of
149 * {@code Long.MAX_VALUE} suppresses all parallelism. Using a value
150 * of {@code 1} results in maximal parallelism by partitioning into
151 * enough subtasks to fully utilize the {@link
152 * ForkJoinPool#commonPool()} that is used for all parallel
153 * computations. Normally, you would initially choose one of these
154 * extreme values, and then measure performance of using in-between
155 * values that trade off overhead versus throughput.
156 *
157 * <p>The concurrency properties of bulk operations follow
158 * from those of ConcurrentHashMapV8: Any non-null result returned
159 * from {@code get(key)} and related access methods bears a
160 * happens-before relation with the associated insertion or
161 * update. The result of any bulk operation reflects the
162 * composition of these per-element relations (but is not
163 * necessarily atomic with respect to the map as a whole unless it
164 * is somehow known to be quiescent). Conversely, because keys
165 * and values in the map are never null, null serves as a reliable
166 * atomic indicator of the current lack of any result. To
167 * maintain this property, null serves as an implicit basis for
168 * all non-scalar reduction operations. For the double, long, and
169 * int versions, the basis should be one that, when combined with
170 * any other value, returns that other value (more formally, it
171 * should be the identity element for the reduction). Most common
172 * reductions have these properties; for example, computing a sum
173 * with basis 0 or a minimum with basis MAX_VALUE.
174 *
175 * <p>Search and transformation functions provided as arguments
176 * should similarly return null to indicate the lack of any result
177 * (in which case it is not used). In the case of mapped
178 * reductions, this also enables transformations to serve as
179 * filters, returning null (or, in the case of primitive
180 * specializations, the identity basis) if the element should not
181 * be combined. You can create compound transformations and
182 * filterings by composing them yourself under this "null means
183 * there is nothing there now" rule before using them in search or
184 * reduce operations.
185 *
186 * <p>Methods accepting and/or returning Entry arguments maintain
187 * key-value associations. They may be useful for example when
188 * finding the key for the greatest value. Note that "plain" Entry
189 * arguments can be supplied using {@code new
190 * AbstractMap.SimpleEntry(k,v)}.
191 *
192 * <p>Bulk operations may complete abruptly, throwing an
193 * exception encountered in the application of a supplied
194 * function. Bear in mind when handling such exceptions that other
195 * concurrently executing functions could also have thrown
196 * exceptions, or would have done so if the first exception had
197 * not occurred.
198 *
199 * <p>Speedups for parallel compared to sequential forms are common
200 * but not guaranteed. Parallel operations involving brief functions
201 * on small maps may execute more slowly than sequential forms if the
202 * underlying work to parallelize the computation is more expensive
203 * than the computation itself. Similarly, parallelization may not
204 * lead to much actual parallelism if all processors are busy
205 * performing unrelated tasks.
206 *
207 * <p>All arguments to all task methods must be non-null.
208 *
209 * <p><em>jsr166e note: During transition, this class
210 * uses nested functional interfaces with different names but the
211 * same forms as those expected for JDK8.</em>
212 *
213 * <p>This class is a member of the
214 * <a href="{@docRoot}/../technotes/guides/collections/index.html">
215 * Java Collections Framework</a>.
216 *
217 * @since 1.5
218 * @author Doug Lea
219 * @param <K> the type of keys maintained by this map
220 * @param <V> the type of mapped values
221 */
222 public class ConcurrentHashMapV8<K,V> extends AbstractMap<K,V>
223 implements ConcurrentMap<K,V>, Serializable {
224 private static final long serialVersionUID = 7249069246763182397L;
225
226 /**
227 * An object for traversing and partitioning elements of a source.
228 * This interface provides a subset of the functionality of JDK8
229 * java.util.Spliterator.
230 */
231 public static interface ConcurrentHashMapSpliterator<T> {
232 /**
233 * If possible, returns a new spliterator covering
234 * approximately one half of the elements, which will not be
235 * covered by this spliterator. Returns null if cannot be
236 * split.
237 */
238 ConcurrentHashMapSpliterator<T> trySplit();
239 /**
240 * Returns an estimate of the number of elements covered by
241 * this Spliterator.
242 */
243 long estimateSize();
244
245 /** Applies the action to each untraversed element */
246 void forEachRemaining(Action<? super T> action);
247 /** If an element remains, applies the action and returns true. */
248 boolean tryAdvance(Action<? super T> action);
249 }
250
251 // Sams
252 /** Interface describing a void action of one argument */
253 public interface Action<A> { void apply(A a); }
254 /** Interface describing a void action of two arguments */
255 public interface BiAction<A,B> { void apply(A a, B b); }
256 /** Interface describing a function of one argument */
257 public interface Fun<A,T> { T apply(A a); }
258 /** Interface describing a function of two arguments */
259 public interface BiFun<A,B,T> { T apply(A a, B b); }
260 /** Interface describing a function mapping its argument to a double */
261 public interface ObjectToDouble<A> { double apply(A a); }
262 /** Interface describing a function mapping its argument to a long */
263 public interface ObjectToLong<A> { long apply(A a); }
264 /** Interface describing a function mapping its argument to an int */
265 public interface ObjectToInt<A> {int apply(A a); }
266 /** Interface describing a function mapping two arguments to a double */
267 public interface ObjectByObjectToDouble<A,B> { double apply(A a, B b); }
268 /** Interface describing a function mapping two arguments to a long */
269 public interface ObjectByObjectToLong<A,B> { long apply(A a, B b); }
270 /** Interface describing a function mapping two arguments to an int */
271 public interface ObjectByObjectToInt<A,B> {int apply(A a, B b); }
272 /** Interface describing a function mapping two doubles to a double */
273 public interface DoubleByDoubleToDouble { double apply(double a, double b); }
274 /** Interface describing a function mapping two longs to a long */
275 public interface LongByLongToLong { long apply(long a, long b); }
276 /** Interface describing a function mapping two ints to an int */
277 public interface IntByIntToInt { int apply(int a, int b); }
278
279
280 /*
281 * Overview:
282 *
283 * The primary design goal of this hash table is to maintain
284 * concurrent readability (typically method get(), but also
285 * iterators and related methods) while minimizing update
286 * contention. Secondary goals are to keep space consumption about
287 * the same or better than java.util.HashMap, and to support high
288 * initial insertion rates on an empty table by many threads.
289 *
290 * This map usually acts as a binned (bucketed) hash table. Each
291 * key-value mapping is held in a Node. Most nodes are instances
292 * of the basic Node class with hash, key, value, and next
293 * fields. However, various subclasses exist: TreeNodes are
294 * arranged in balanced trees, not lists. TreeBins hold the roots
295 * of sets of TreeNodes. ForwardingNodes are placed at the heads
296 * of bins during resizing. ReservationNodes are used as
297 * placeholders while establishing values in computeIfAbsent and
298 * related methods. The types TreeBin, ForwardingNode, and
299 * ReservationNode do not hold normal user keys, values, or
300 * hashes, and are readily distinguishable during search etc
301 * because they have negative hash fields and null key and value
302 * fields. (These special nodes are either uncommon or transient,
303 * so the impact of carrying around some unused fields is
304 * insignificant.)
305 *
306 * The table is lazily initialized to a power-of-two size upon the
307 * first insertion. Each bin in the table normally contains a
308 * list of Nodes (most often, the list has only zero or one Node).
309 * Table accesses require volatile/atomic reads, writes, and
310 * CASes. Because there is no other way to arrange this without
311 * adding further indirections, we use intrinsics
312 * (sun.misc.Unsafe) operations.
313 *
314 * We use the top (sign) bit of Node hash fields for control
315 * purposes -- it is available anyway because of addressing
316 * constraints. Nodes with negative hash fields are specially
317 * handled or ignored in map methods.
318 *
319 * Insertion (via put or its variants) of the first node in an
320 * empty bin is performed by just CASing it to the bin. This is
321 * by far the most common case for put operations under most
322 * key/hash distributions. Other update operations (insert,
323 * delete, and replace) require locks. We do not want to waste
324 * the space required to associate a distinct lock object with
325 * each bin, so instead use the first node of a bin list itself as
326 * a lock. Locking support for these locks relies on builtin
327 * "synchronized" monitors.
328 *
329 * Using the first node of a list as a lock does not by itself
330 * suffice though: When a node is locked, any update must first
331 * validate that it is still the first node after locking it, and
332 * retry if not. Because new nodes are always appended to lists,
333 * once a node is first in a bin, it remains first until deleted
334 * or the bin becomes invalidated (upon resizing).
335 *
336 * The main disadvantage of per-bin locks is that other update
337 * operations on other nodes in a bin list protected by the same
338 * lock can stall, for example when user equals() or mapping
339 * functions take a long time. However, statistically, under
340 * random hash codes, this is not a common problem. Ideally, the
341 * frequency of nodes in bins follows a Poisson distribution
342 * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
343 * parameter of about 0.5 on average, given the resizing threshold
344 * of 0.75, although with a large variance because of resizing
345 * granularity. Ignoring variance, the expected occurrences of
346 * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
347 * first values are:
348 *
349 * 0: 0.60653066
350 * 1: 0.30326533
351 * 2: 0.07581633
352 * 3: 0.01263606
353 * 4: 0.00157952
354 * 5: 0.00015795
355 * 6: 0.00001316
356 * 7: 0.00000094
357 * 8: 0.00000006
358 * more: less than 1 in ten million
359 *
360 * Lock contention probability for two threads accessing distinct
361 * elements is roughly 1 / (8 * #elements) under random hashes.
362 *
363 * Actual hash code distributions encountered in practice
364 * sometimes deviate significantly from uniform randomness. This
365 * includes the case when N > (1<<30), so some keys MUST collide.
366 * Similarly for dumb or hostile usages in which multiple keys are
367 * designed to have identical hash codes or ones that differs only
368 * in masked-out high bits. So we use a secondary strategy that
369 * applies when the number of nodes in a bin exceeds a
370 * threshold. These TreeBins use a balanced tree to hold nodes (a
371 * specialized form of red-black trees), bounding search time to
372 * O(log N). Each search step in a TreeBin is at least twice as
373 * slow as in a regular list, but given that N cannot exceed
374 * (1<<64) (before running out of addresses) this bounds search
375 * steps, lock hold times, etc, to reasonable constants (roughly
376 * 100 nodes inspected per operation worst case) so long as keys
377 * are Comparable (which is very common -- String, Long, etc).
378 * TreeBin nodes (TreeNodes) also maintain the same "next"
379 * traversal pointers as regular nodes, so can be traversed in
380 * iterators in the same way.
381 *
382 * The table is resized when occupancy exceeds a percentage
383 * threshold (nominally, 0.75, but see below). Any thread
384 * noticing an overfull bin may assist in resizing after the
385 * initiating thread allocates and sets up the replacement array.
386 * However, rather than stalling, these other threads may proceed
387 * with insertions etc. The use of TreeBins shields us from the
388 * worst case effects of overfilling while resizes are in
389 * progress. Resizing proceeds by transferring bins, one by one,
390 * from the table to the next table. However, threads claim small
391 * blocks of indices to transfer (via field transferIndex) before
392 * doing so, reducing contention. Because we are using
393 * power-of-two expansion, the elements from each bin must either
394 * stay at same index, or move with a power of two offset. We
395 * eliminate unnecessary node creation by catching cases where old
396 * nodes can be reused because their next fields won't change. On
397 * average, only about one-sixth of them need cloning when a table
398 * doubles. The nodes they replace will be garbage collectable as
399 * soon as they are no longer referenced by any reader thread that
400 * may be in the midst of concurrently traversing table. Upon
401 * transfer, the old table bin contains only a special forwarding
402 * node (with hash field "MOVED") that contains the next table as
403 * its key. On encountering a forwarding node, access and update
404 * operations restart, using the new table.
405 *
406 * Each bin transfer requires its bin lock, which can stall
407 * waiting for locks while resizing. However, because other
408 * threads can join in and help resize rather than contend for
409 * locks, average aggregate waits become shorter as resizing
410 * progresses. The transfer operation must also ensure that all
411 * accessible bins in both the old and new table are usable by any
412 * traversal. This is arranged in part by proceeding from the
413 * last bin (table.length - 1) up towards the first. Upon seeing
414 * a forwarding node, traversals (see class Traverser) arrange to
415 * move to the new table without revisiting nodes. To ensure that
416 * no intervening nodes are skipped even when moved out of order,
417 * a stack (see class TableStack) is created on first encounter of
418 * a forwarding node during a traversal, to maintain its place if
419 * later processing the current table. The need for these
420 * save/restore mechanics is relatively rare, but when one
421 * forwarding node is encountered, typically many more will be.
422 * So Traversers use a simple caching scheme to avoid creating so
423 * many new TableStack nodes. (Thanks to Peter Levart for
424 * suggesting use of a stack here.)
425 *
426 * The traversal scheme also applies to partial traversals of
427 * ranges of bins (via an alternate Traverser constructor)
428 * to support partitioned aggregate operations. Also, read-only
429 * operations give up if ever forwarded to a null table, which
430 * provides support for shutdown-style clearing, which is also not
431 * currently implemented.
432 *
433 * Lazy table initialization minimizes footprint until first use,
434 * and also avoids resizings when the first operation is from a
435 * putAll, constructor with map argument, or deserialization.
436 * These cases attempt to override the initial capacity settings,
437 * but harmlessly fail to take effect in cases of races.
438 *
439 * The element count is maintained using a specialization of
440 * LongAdder. We need to incorporate a specialization rather than
441 * just use a LongAdder in order to access implicit
442 * contention-sensing that leads to creation of multiple
443 * CounterCells. The counter mechanics avoid contention on
444 * updates but can encounter cache thrashing if read too
445 * frequently during concurrent access. To avoid reading so often,
446 * resizing under contention is attempted only upon adding to a
447 * bin already holding two or more nodes. Under uniform hash
448 * distributions, the probability of this occurring at threshold
449 * is around 13%, meaning that only about 1 in 8 puts check
450 * threshold (and after resizing, many fewer do so).
451 *
452 * TreeBins use a special form of comparison for search and
453 * related operations (which is the main reason we cannot use
454 * existing collections such as TreeMaps). TreeBins contain
455 * Comparable elements, but may contain others, as well as
456 * elements that are Comparable but not necessarily Comparable for
457 * the same T, so we cannot invoke compareTo among them. To handle
458 * this, the tree is ordered primarily by hash value, then by
459 * Comparable.compareTo order if applicable. On lookup at a node,
460 * if elements are not comparable or compare as 0 then both left
461 * and right children may need to be searched in the case of tied
462 * hash values. (This corresponds to the full list search that
463 * would be necessary if all elements were non-Comparable and had
464 * tied hashes.) On insertion, to keep a total ordering (or as
465 * close as is required here) across rebalancings, we compare
466 * classes and identityHashCodes as tie-breakers. The red-black
467 * balancing code is updated from pre-jdk-collections
468 * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
469 * based in turn on Cormen, Leiserson, and Rivest "Introduction to
470 * Algorithms" (CLR).
471 *
472 * TreeBins also require an additional locking mechanism. While
473 * list traversal is always possible by readers even during
474 * updates, tree traversal is not, mainly because of tree-rotations
475 * that may change the root node and/or its linkages. TreeBins
476 * include a simple read-write lock mechanism parasitic on the
477 * main bin-synchronization strategy: Structural adjustments
478 * associated with an insertion or removal are already bin-locked
479 * (and so cannot conflict with other writers) but must wait for
480 * ongoing readers to finish. Since there can be only one such
481 * waiter, we use a simple scheme using a single "waiter" field to
482 * block writers. However, readers need never block. If the root
483 * lock is held, they proceed along the slow traversal path (via
484 * next-pointers) until the lock becomes available or the list is
485 * exhausted, whichever comes first. These cases are not fast, but
486 * maximize aggregate expected throughput.
487 *
488 * Maintaining API and serialization compatibility with previous
489 * versions of this class introduces several oddities. Mainly: We
490 * leave untouched but unused constructor arguments refering to
491 * concurrencyLevel. We accept a loadFactor constructor argument,
492 * but apply it only to initial table capacity (which is the only
493 * time that we can guarantee to honor it.) We also declare an
494 * unused "Segment" class that is instantiated in minimal form
495 * only when serializing.
496 *
497 * Also, solely for compatibility with previous versions of this
498 * class, it extends AbstractMap, even though all of its methods
499 * are overridden, so it is just useless baggage.
500 *
501 * This file is organized to make things a little easier to follow
502 * while reading than they might otherwise: First the main static
503 * declarations and utilities, then fields, then main public
504 * methods (with a few factorings of multiple public methods into
505 * internal ones), then sizing methods, trees, traversers, and
506 * bulk operations.
507 */
508
509 /* ---------------- Constants -------------- */
510
511 /**
512 * The largest possible table capacity. This value must be
513 * exactly 1<<30 to stay within Java array allocation and indexing
514 * bounds for power of two table sizes, and is further required
515 * because the top two bits of 32bit hash fields are used for
516 * control purposes.
517 */
518 private static final int MAXIMUM_CAPACITY = 1 << 30;
519
520 /**
521 * The default initial table capacity. Must be a power of 2
522 * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
523 */
524 private static final int DEFAULT_CAPACITY = 16;
525
526 /**
527 * The largest possible (non-power of two) array size.
528 * Needed by toArray and related methods.
529 */
530 static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
531
532 /**
533 * The default concurrency level for this table. Unused but
534 * defined for compatibility with previous versions of this class.
535 */
536 private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
537
538 /**
539 * The load factor for this table. Overrides of this value in
540 * constructors affect only the initial table capacity. The
541 * actual floating point value isn't normally used -- it is
542 * simpler to use expressions such as {@code n - (n >>> 2)} for
543 * the associated resizing threshold.
544 */
545 private static final float LOAD_FACTOR = 0.75f;
546
547 /**
548 * The bin count threshold for using a tree rather than list for a
549 * bin. Bins are converted to trees when adding an element to a
550 * bin with at least this many nodes. The value must be greater
551 * than 2, and should be at least 8 to mesh with assumptions in
552 * tree removal about conversion back to plain bins upon
553 * shrinkage.
554 */
555 static final int TREEIFY_THRESHOLD = 8;
556
557 /**
558 * The bin count threshold for untreeifying a (split) bin during a
559 * resize operation. Should be less than TREEIFY_THRESHOLD, and at
560 * most 6 to mesh with shrinkage detection under removal.
561 */
562 static final int UNTREEIFY_THRESHOLD = 6;
563
564 /**
565 * The smallest table capacity for which bins may be treeified.
566 * (Otherwise the table is resized if too many nodes in a bin.)
567 * The value should be at least 4 * TREEIFY_THRESHOLD to avoid
568 * conflicts between resizing and treeification thresholds.
569 */
570 static final int MIN_TREEIFY_CAPACITY = 64;
571
572 /**
573 * Minimum number of rebinnings per transfer step. Ranges are
574 * subdivided to allow multiple resizer threads. This value
575 * serves as a lower bound to avoid resizers encountering
576 * excessive memory contention. The value should be at least
577 * DEFAULT_CAPACITY.
578 */
579 private static final int MIN_TRANSFER_STRIDE = 16;
580
581 /*
582 * Encodings for Node hash fields. See above for explanation.
583 */
584 static final int MOVED = -1; // hash for forwarding nodes
585 static final int TREEBIN = -2; // hash for roots of trees
586 static final int RESERVED = -3; // hash for transient reservations
587 static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
588
589 /** Number of CPUS, to place bounds on some sizings */
590 static final int NCPU = Runtime.getRuntime().availableProcessors();
591
592 /** For serialization compatibility. */
593 private static final ObjectStreamField[] serialPersistentFields = {
594 new ObjectStreamField("segments", Segment[].class),
595 new ObjectStreamField("segmentMask", Integer.TYPE),
596 new ObjectStreamField("segmentShift", Integer.TYPE)
597 };
598
599 /* ---------------- Nodes -------------- */
600
601 /**
602 * Key-value entry. This class is never exported out as a
603 * user-mutable Map.Entry (i.e., one supporting setValue; see
604 * MapEntry below), but can be used for read-only traversals used
605 * in bulk tasks. Subclasses of Node with a negative hash field
606 * are special, and contain null keys and values (but are never
607 * exported). Otherwise, keys and vals are never null.
608 */
609 static class Node<K,V> implements Map.Entry<K,V> {
610 final int hash;
611 final K key;
612 volatile V val;
613 volatile Node<K,V> next;
614
615 Node(int hash, K key, V val, Node<K,V> next) {
616 this.hash = hash;
617 this.key = key;
618 this.val = val;
619 this.next = next;
620 }
621
622 public final K getKey() { return key; }
623 public final V getValue() { return val; }
624 public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
625 public final String toString(){ return key + "=" + val; }
626 public final V setValue(V value) {
627 throw new UnsupportedOperationException();
628 }
629
630 public final boolean equals(Object o) {
631 Object k, v, u; Map.Entry<?,?> e;
632 return ((o instanceof Map.Entry) &&
633 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
634 (v = e.getValue()) != null &&
635 (k == key || k.equals(key)) &&
636 (v == (u = val) || v.equals(u)));
637 }
638
639 /**
640 * Virtualized support for map.get(); overridden in subclasses.
641 */
642 Node<K,V> find(int h, Object k) {
643 Node<K,V> e = this;
644 if (k != null) {
645 do {
646 K ek;
647 if (e.hash == h &&
648 ((ek = e.key) == k || (ek != null && k.equals(ek))))
649 return e;
650 } while ((e = e.next) != null);
651 }
652 return null;
653 }
654 }
655
656 /* ---------------- Static utilities -------------- */
657
658 /**
659 * Spreads (XORs) higher bits of hash to lower and also forces top
660 * bit to 0. Because the table uses power-of-two masking, sets of
661 * hashes that vary only in bits above the current mask will
662 * always collide. (Among known examples are sets of Float keys
663 * holding consecutive whole numbers in small tables.) So we
664 * apply a transform that spreads the impact of higher bits
665 * downward. There is a tradeoff between speed, utility, and
666 * quality of bit-spreading. Because many common sets of hashes
667 * are already reasonably distributed (so don't benefit from
668 * spreading), and because we use trees to handle large sets of
669 * collisions in bins, we just XOR some shifted bits in the
670 * cheapest possible way to reduce systematic lossage, as well as
671 * to incorporate impact of the highest bits that would otherwise
672 * never be used in index calculations because of table bounds.
673 */
674 static final int spread(int h) {
675 return (h ^ (h >>> 16)) & HASH_BITS;
676 }
677
678 /**
679 * Returns a power of two table size for the given desired capacity.
680 * See Hackers Delight, sec 3.2
681 */
682 private static final int tableSizeFor(int c) {
683 int n = c - 1;
684 n |= n >>> 1;
685 n |= n >>> 2;
686 n |= n >>> 4;
687 n |= n >>> 8;
688 n |= n >>> 16;
689 return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
690 }
691
692 /**
693 * Returns x's Class if it is of the form "class C implements
694 * Comparable<C>", else null.
695 */
696 static Class<?> comparableClassFor(Object x) {
697 if (x instanceof Comparable) {
698 Class<?> c; Type[] ts, as; Type t; ParameterizedType p;
699 if ((c = x.getClass()) == String.class) // bypass checks
700 return c;
701 if ((ts = c.getGenericInterfaces()) != null) {
702 for (int i = 0; i < ts.length; ++i) {
703 if (((t = ts[i]) instanceof ParameterizedType) &&
704 ((p = (ParameterizedType)t).getRawType() ==
705 Comparable.class) &&
706 (as = p.getActualTypeArguments()) != null &&
707 as.length == 1 && as[0] == c) // type arg is c
708 return c;
709 }
710 }
711 }
712 return null;
713 }
714
715 /**
716 * Returns k.compareTo(x) if x matches kc (k's screened comparable
717 * class), else 0.
718 */
719 @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
720 static int compareComparables(Class<?> kc, Object k, Object x) {
721 return (x == null || x.getClass() != kc ? 0 :
722 ((Comparable)k).compareTo(x));
723 }
724
725 /* ---------------- Table element access -------------- */
726
727 /*
728 * Volatile access methods are used for table elements as well as
729 * elements of in-progress next table while resizing. All uses of
730 * the tab arguments must be null checked by callers. All callers
731 * also paranoically precheck that tab's length is not zero (or an
732 * equivalent check), thus ensuring that any index argument taking
733 * the form of a hash value anded with (length - 1) is a valid
734 * index. Note that, to be correct wrt arbitrary concurrency
735 * errors by users, these checks must operate on local variables,
736 * which accounts for some odd-looking inline assignments below.
737 * Note that calls to setTabAt always occur within locked regions,
738 * and so in principle require only release ordering, not need
739 * full volatile semantics, but are currently coded as volatile
740 * writes to be conservative.
741 */
742
743 @SuppressWarnings("unchecked")
744 static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) {
745 return (Node<K,V>)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE);
746 }
747
748 static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i,
749 Node<K,V> c, Node<K,V> v) {
750 return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
751 }
752
753 static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) {
754 U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v);
755 }
756
757 /* ---------------- Fields -------------- */
758
759 /**
760 * The array of bins. Lazily initialized upon first insertion.
761 * Size is always a power of two. Accessed directly by iterators.
762 */
763 transient volatile Node<K,V>[] table;
764
765 /**
766 * The next table to use; non-null only while resizing.
767 */
768 private transient volatile Node<K,V>[] nextTable;
769
770 /**
771 * Base counter value, used mainly when there is no contention,
772 * but also as a fallback during table initialization
773 * races. Updated via CAS.
774 */
775 private transient volatile long baseCount;
776
777 /**
778 * Table initialization and resizing control. When negative, the
779 * table is being initialized or resized: -1 for initialization,
780 * else -(1 + the number of active resizing threads). Otherwise,
781 * when table is null, holds the initial table size to use upon
782 * creation, or 0 for default. After initialization, holds the
783 * next element count value upon which to resize the table.
784 */
785 private transient volatile int sizeCtl;
786
787 /**
788 * The next table index (plus one) to split while resizing.
789 */
790 private transient volatile int transferIndex;
791
792 /**
793 * Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
794 */
795 private transient volatile int cellsBusy;
796
797 /**
798 * Table of counter cells. When non-null, size is a power of 2.
799 */
800 private transient volatile CounterCell[] counterCells;
801
802 // views
803 private transient KeySetView<K,V> keySet;
804 private transient ValuesView<K,V> values;
805 private transient EntrySetView<K,V> entrySet;
806
807
808 /* ---------------- Public operations -------------- */
809
810 /**
811 * Creates a new, empty map with the default initial table size (16).
812 */
813 public ConcurrentHashMapV8() {
814 }
815
816 /**
817 * Creates a new, empty map with an initial table size
818 * accommodating the specified number of elements without the need
819 * to dynamically resize.
820 *
821 * @param initialCapacity The implementation performs internal
822 * sizing to accommodate this many elements.
823 * @throws IllegalArgumentException if the initial capacity of
824 * elements is negative
825 */
826 public ConcurrentHashMapV8(int initialCapacity) {
827 if (initialCapacity < 0)
828 throw new IllegalArgumentException();
829 int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
830 MAXIMUM_CAPACITY :
831 tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
832 this.sizeCtl = cap;
833 }
834
835 /**
836 * Creates a new map with the same mappings as the given map.
837 *
838 * @param m the map
839 */
840 public ConcurrentHashMapV8(Map<? extends K, ? extends V> m) {
841 this.sizeCtl = DEFAULT_CAPACITY;
842 putAll(m);
843 }
844
845 /**
846 * Creates a new, empty map with an initial table size based on
847 * the given number of elements ({@code initialCapacity}) and
848 * initial table density ({@code loadFactor}).
849 *
850 * @param initialCapacity the initial capacity. The implementation
851 * performs internal sizing to accommodate this many elements,
852 * given the specified load factor.
853 * @param loadFactor the load factor (table density) for
854 * establishing the initial table size
855 * @throws IllegalArgumentException if the initial capacity of
856 * elements is negative or the load factor is nonpositive
857 *
858 * @since 1.6
859 */
860 public ConcurrentHashMapV8(int initialCapacity, float loadFactor) {
861 this(initialCapacity, loadFactor, 1);
862 }
863
864 /**
865 * Creates a new, empty map with an initial table size based on
866 * the given number of elements ({@code initialCapacity}), table
867 * density ({@code loadFactor}), and number of concurrently
868 * updating threads ({@code concurrencyLevel}).
869 *
870 * @param initialCapacity the initial capacity. The implementation
871 * performs internal sizing to accommodate this many elements,
872 * given the specified load factor.
873 * @param loadFactor the load factor (table density) for
874 * establishing the initial table size
875 * @param concurrencyLevel the estimated number of concurrently
876 * updating threads. The implementation may use this value as
877 * a sizing hint.
878 * @throws IllegalArgumentException if the initial capacity is
879 * negative or the load factor or concurrencyLevel are
880 * nonpositive
881 */
882 public ConcurrentHashMapV8(int initialCapacity,
883 float loadFactor, int concurrencyLevel) {
884 if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
885 throw new IllegalArgumentException();
886 if (initialCapacity < concurrencyLevel) // Use at least as many bins
887 initialCapacity = concurrencyLevel; // as estimated threads
888 long size = (long)(1.0 + (long)initialCapacity / loadFactor);
889 int cap = (size >= (long)MAXIMUM_CAPACITY) ?
890 MAXIMUM_CAPACITY : tableSizeFor((int)size);
891 this.sizeCtl = cap;
892 }
893
894 // Original (since JDK1.2) Map methods
895
896 /**
897 * {@inheritDoc}
898 */
899 public int size() {
900 long n = sumCount();
901 return ((n < 0L) ? 0 :
902 (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
903 (int)n);
904 }
905
906 /**
907 * {@inheritDoc}
908 */
909 public boolean isEmpty() {
910 return sumCount() <= 0L; // ignore transient negative values
911 }
912
913 /**
914 * Returns the value to which the specified key is mapped,
915 * or {@code null} if this map contains no mapping for the key.
916 *
917 * <p>More formally, if this map contains a mapping from a key
918 * {@code k} to a value {@code v} such that {@code key.equals(k)},
919 * then this method returns {@code v}; otherwise it returns
920 * {@code null}. (There can be at most one such mapping.)
921 *
922 * @throws NullPointerException if the specified key is null
923 */
924 public V get(Object key) {
925 Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
926 int h = spread(key.hashCode());
927 if ((tab = table) != null && (n = tab.length) > 0 &&
928 (e = tabAt(tab, (n - 1) & h)) != null) {
929 if ((eh = e.hash) == h) {
930 if ((ek = e.key) == key || (ek != null && key.equals(ek)))
931 return e.val;
932 }
933 else if (eh < 0)
934 return (p = e.find(h, key)) != null ? p.val : null;
935 while ((e = e.next) != null) {
936 if (e.hash == h &&
937 ((ek = e.key) == key || (ek != null && key.equals(ek))))
938 return e.val;
939 }
940 }
941 return null;
942 }
943
944 /**
945 * Tests if the specified object is a key in this table.
946 *
947 * @param key possible key
948 * @return {@code true} if and only if the specified object
949 * is a key in this table, as determined by the
950 * {@code equals} method; {@code false} otherwise
951 * @throws NullPointerException if the specified key is null
952 */
953 public boolean containsKey(Object key) {
954 return get(key) != null;
955 }
956
957 /**
958 * Returns {@code true} if this map maps one or more keys to the
959 * specified value. Note: This method may require a full traversal
960 * of the map, and is much slower than method {@code containsKey}.
961 *
962 * @param value value whose presence in this map is to be tested
963 * @return {@code true} if this map maps one or more keys to the
964 * specified value
965 * @throws NullPointerException if the specified value is null
966 */
967 public boolean containsValue(Object value) {
968 if (value == null)
969 throw new NullPointerException();
970 Node<K,V>[] t;
971 if ((t = table) != null) {
972 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
973 for (Node<K,V> p; (p = it.advance()) != null; ) {
974 V v;
975 if ((v = p.val) == value || (v != null && value.equals(v)))
976 return true;
977 }
978 }
979 return false;
980 }
981
982 /**
983 * Maps the specified key to the specified value in this table.
984 * Neither the key nor the value can be null.
985 *
986 * <p>The value can be retrieved by calling the {@code get} method
987 * with a key that is equal to the original key.
988 *
989 * @param key key with which the specified value is to be associated
990 * @param value value to be associated with the specified key
991 * @return the previous value associated with {@code key}, or
992 * {@code null} if there was no mapping for {@code key}
993 * @throws NullPointerException if the specified key or value is null
994 */
995 public V put(K key, V value) {
996 return putVal(key, value, false);
997 }
998
999 /** Implementation for put and putIfAbsent */
1000 final V putVal(K key, V value, boolean onlyIfAbsent) {
1001 if (key == null || value == null) throw new NullPointerException();
1002 int hash = spread(key.hashCode());
1003 int binCount = 0;
1004 for (Node<K,V>[] tab = table;;) {
1005 Node<K,V> f; int n, i, fh;
1006 if (tab == null || (n = tab.length) == 0)
1007 tab = initTable();
1008 else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
1009 if (casTabAt(tab, i, null,
1010 new Node<K,V>(hash, key, value, null)))
1011 break; // no lock when adding to empty bin
1012 }
1013 else if ((fh = f.hash) == MOVED)
1014 tab = helpTransfer(tab, f);
1015 else {
1016 V oldVal = null;
1017 synchronized (f) {
1018 if (tabAt(tab, i) == f) {
1019 if (fh >= 0) {
1020 binCount = 1;
1021 for (Node<K,V> e = f;; ++binCount) {
1022 K ek;
1023 if (e.hash == hash &&
1024 ((ek = e.key) == key ||
1025 (ek != null && key.equals(ek)))) {
1026 oldVal = e.val;
1027 if (!onlyIfAbsent)
1028 e.val = value;
1029 break;
1030 }
1031 Node<K,V> pred = e;
1032 if ((e = e.next) == null) {
1033 pred.next = new Node<K,V>(hash, key,
1034 value, null);
1035 break;
1036 }
1037 }
1038 }
1039 else if (f instanceof TreeBin) {
1040 Node<K,V> p;
1041 binCount = 2;
1042 if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
1043 value)) != null) {
1044 oldVal = p.val;
1045 if (!onlyIfAbsent)
1046 p.val = value;
1047 }
1048 }
1049 }
1050 }
1051 if (binCount != 0) {
1052 if (binCount >= TREEIFY_THRESHOLD)
1053 treeifyBin(tab, i);
1054 if (oldVal != null)
1055 return oldVal;
1056 break;
1057 }
1058 }
1059 }
1060 addCount(1L, binCount);
1061 return null;
1062 }
1063
1064 /**
1065 * Copies all of the mappings from the specified map to this one.
1066 * These mappings replace any mappings that this map had for any of the
1067 * keys currently in the specified map.
1068 *
1069 * @param m mappings to be stored in this map
1070 */
1071 public void putAll(Map<? extends K, ? extends V> m) {
1072 tryPresize(m.size());
1073 for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
1074 putVal(e.getKey(), e.getValue(), false);
1075 }
1076
1077 /**
1078 * Removes the key (and its corresponding value) from this map.
1079 * This method does nothing if the key is not in the map.
1080 *
1081 * @param key the key that needs to be removed
1082 * @return the previous value associated with {@code key}, or
1083 * {@code null} if there was no mapping for {@code key}
1084 * @throws NullPointerException if the specified key is null
1085 */
1086 public V remove(Object key) {
1087 return replaceNode(key, null, null);
1088 }
1089
1090 /**
1091 * Implementation for the four public remove/replace methods:
1092 * Replaces node value with v, conditional upon match of cv if
1093 * non-null. If resulting value is null, delete.
1094 */
1095 final V replaceNode(Object key, V value, Object cv) {
1096 int hash = spread(key.hashCode());
1097 for (Node<K,V>[] tab = table;;) {
1098 Node<K,V> f; int n, i, fh;
1099 if (tab == null || (n = tab.length) == 0 ||
1100 (f = tabAt(tab, i = (n - 1) & hash)) == null)
1101 break;
1102 else if ((fh = f.hash) == MOVED)
1103 tab = helpTransfer(tab, f);
1104 else {
1105 V oldVal = null;
1106 boolean validated = false;
1107 synchronized (f) {
1108 if (tabAt(tab, i) == f) {
1109 if (fh >= 0) {
1110 validated = true;
1111 for (Node<K,V> e = f, pred = null;;) {
1112 K ek;
1113 if (e.hash == hash &&
1114 ((ek = e.key) == key ||
1115 (ek != null && key.equals(ek)))) {
1116 V ev = e.val;
1117 if (cv == null || cv == ev ||
1118 (ev != null && cv.equals(ev))) {
1119 oldVal = ev;
1120 if (value != null)
1121 e.val = value;
1122 else if (pred != null)
1123 pred.next = e.next;
1124 else
1125 setTabAt(tab, i, e.next);
1126 }
1127 break;
1128 }
1129 pred = e;
1130 if ((e = e.next) == null)
1131 break;
1132 }
1133 }
1134 else if (f instanceof TreeBin) {
1135 validated = true;
1136 TreeBin<K,V> t = (TreeBin<K,V>)f;
1137 TreeNode<K,V> r, p;
1138 if ((r = t.root) != null &&
1139 (p = r.findTreeNode(hash, key, null)) != null) {
1140 V pv = p.val;
1141 if (cv == null || cv == pv ||
1142 (pv != null && cv.equals(pv))) {
1143 oldVal = pv;
1144 if (value != null)
1145 p.val = value;
1146 else if (t.removeTreeNode(p))
1147 setTabAt(tab, i, untreeify(t.first));
1148 }
1149 }
1150 }
1151 }
1152 }
1153 if (validated) {
1154 if (oldVal != null) {
1155 if (value == null)
1156 addCount(-1L, -1);
1157 return oldVal;
1158 }
1159 break;
1160 }
1161 }
1162 }
1163 return null;
1164 }
1165
1166 /**
1167 * Removes all of the mappings from this map.
1168 */
1169 public void clear() {
1170 long delta = 0L; // negative number of deletions
1171 int i = 0;
1172 Node<K,V>[] tab = table;
1173 while (tab != null && i < tab.length) {
1174 int fh;
1175 Node<K,V> f = tabAt(tab, i);
1176 if (f == null)
1177 ++i;
1178 else if ((fh = f.hash) == MOVED) {
1179 tab = helpTransfer(tab, f);
1180 i = 0; // restart
1181 }
1182 else {
1183 synchronized (f) {
1184 if (tabAt(tab, i) == f) {
1185 Node<K,V> p = (fh >= 0 ? f :
1186 (f instanceof TreeBin) ?
1187 ((TreeBin<K,V>)f).first : null);
1188 while (p != null) {
1189 --delta;
1190 p = p.next;
1191 }
1192 setTabAt(tab, i++, null);
1193 }
1194 }
1195 }
1196 }
1197 if (delta != 0L)
1198 addCount(delta, -1);
1199 }
1200
1201 /**
1202 * Returns a {@link Set} view of the keys contained in this map.
1203 * The set is backed by the map, so changes to the map are
1204 * reflected in the set, and vice-versa. The set supports element
1205 * removal, which removes the corresponding mapping from this map,
1206 * via the {@code Iterator.remove}, {@code Set.remove},
1207 * {@code removeAll}, {@code retainAll}, and {@code clear}
1208 * operations. It does not support the {@code add} or
1209 * {@code addAll} operations.
1210 *
1211 * <p>The view's {@code iterator} is a "weakly consistent" iterator
1212 * that will never throw {@link ConcurrentModificationException},
1213 * and guarantees to traverse elements as they existed upon
1214 * construction of the iterator, and may (but is not guaranteed to)
1215 * reflect any modifications subsequent to construction.
1216 *
1217 * @return the set view
1218 */
1219 public KeySetView<K,V> keySet() {
1220 KeySetView<K,V> ks;
1221 return (ks = keySet) != null ? ks : (keySet = new KeySetView<K,V>(this, null));
1222 }
1223
1224 /**
1225 * Returns a {@link Collection} view of the values contained in this map.
1226 * The collection is backed by the map, so changes to the map are
1227 * reflected in the collection, and vice-versa. The collection
1228 * supports element removal, which removes the corresponding
1229 * mapping from this map, via the {@code Iterator.remove},
1230 * {@code Collection.remove}, {@code removeAll},
1231 * {@code retainAll}, and {@code clear} operations. It does not
1232 * support the {@code add} or {@code addAll} operations.
1233 *
1234 * <p>The view's {@code iterator} is a "weakly consistent" iterator
1235 * that will never throw {@link ConcurrentModificationException},
1236 * and guarantees to traverse elements as they existed upon
1237 * construction of the iterator, and may (but is not guaranteed to)
1238 * reflect any modifications subsequent to construction.
1239 *
1240 * @return the collection view
1241 */
1242 public Collection<V> values() {
1243 ValuesView<K,V> vs;
1244 return (vs = values) != null ? vs : (values = new ValuesView<K,V>(this));
1245 }
1246
1247 /**
1248 * Returns a {@link Set} view of the mappings contained in this map.
1249 * The set is backed by the map, so changes to the map are
1250 * reflected in the set, and vice-versa. The set supports element
1251 * removal, which removes the corresponding mapping from the map,
1252 * via the {@code Iterator.remove}, {@code Set.remove},
1253 * {@code removeAll}, {@code retainAll}, and {@code clear}
1254 * operations.
1255 *
1256 * <p>The view's {@code iterator} is a "weakly consistent" iterator
1257 * that will never throw {@link ConcurrentModificationException},
1258 * and guarantees to traverse elements as they existed upon
1259 * construction of the iterator, and may (but is not guaranteed to)
1260 * reflect any modifications subsequent to construction.
1261 *
1262 * @return the set view
1263 */
1264 public Set<Map.Entry<K,V>> entrySet() {
1265 EntrySetView<K,V> es;
1266 return (es = entrySet) != null ? es : (entrySet = new EntrySetView<K,V>(this));
1267 }
1268
1269 /**
1270 * Returns the hash code value for this {@link Map}, i.e.,
1271 * the sum of, for each key-value pair in the map,
1272 * {@code key.hashCode() ^ value.hashCode()}.
1273 *
1274 * @return the hash code value for this map
1275 */
1276 public int hashCode() {
1277 int h = 0;
1278 Node<K,V>[] t;
1279 if ((t = table) != null) {
1280 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1281 for (Node<K,V> p; (p = it.advance()) != null; )
1282 h += p.key.hashCode() ^ p.val.hashCode();
1283 }
1284 return h;
1285 }
1286
1287 /**
1288 * Returns a string representation of this map. The string
1289 * representation consists of a list of key-value mappings (in no
1290 * particular order) enclosed in braces ("{@code {}}"). Adjacent
1291 * mappings are separated by the characters {@code ", "} (comma
1292 * and space). Each key-value mapping is rendered as the key
1293 * followed by an equals sign ("{@code =}") followed by the
1294 * associated value.
1295 *
1296 * @return a string representation of this map
1297 */
1298 public String toString() {
1299 Node<K,V>[] t;
1300 int f = (t = table) == null ? 0 : t.length;
1301 Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1302 StringBuilder sb = new StringBuilder();
1303 sb.append('{');
1304 Node<K,V> p;
1305 if ((p = it.advance()) != null) {
1306 for (;;) {
1307 K k = p.key;
1308 V v = p.val;
1309 sb.append(k == this ? "(this Map)" : k);
1310 sb.append('=');
1311 sb.append(v == this ? "(this Map)" : v);
1312 if ((p = it.advance()) == null)
1313 break;
1314 sb.append(',').append(' ');
1315 }
1316 }
1317 return sb.append('}').toString();
1318 }
1319
1320 /**
1321 * Compares the specified object with this map for equality.
1322 * Returns {@code true} if the given object is a map with the same
1323 * mappings as this map. This operation may return misleading
1324 * results if either map is concurrently modified during execution
1325 * of this method.
1326 *
1327 * @param o object to be compared for equality with this map
1328 * @return {@code true} if the specified object is equal to this map
1329 */
1330 public boolean equals(Object o) {
1331 if (o != this) {
1332 if (!(o instanceof Map))
1333 return false;
1334 Map<?,?> m = (Map<?,?>) o;
1335 Node<K,V>[] t;
1336 int f = (t = table) == null ? 0 : t.length;
1337 Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1338 for (Node<K,V> p; (p = it.advance()) != null; ) {
1339 V val = p.val;
1340 Object v = m.get(p.key);
1341 if (v == null || (v != val && !v.equals(val)))
1342 return false;
1343 }
1344 for (Map.Entry<?,?> e : m.entrySet()) {
1345 Object mk, mv, v;
1346 if ((mk = e.getKey()) == null ||
1347 (mv = e.getValue()) == null ||
1348 (v = get(mk)) == null ||
1349 (mv != v && !mv.equals(v)))
1350 return false;
1351 }
1352 }
1353 return true;
1354 }
1355
1356 /**
1357 * Stripped-down version of helper class used in previous version,
1358 * declared for the sake of serialization compatibility
1359 */
1360 static class Segment<K,V> extends ReentrantLock implements Serializable {
1361 private static final long serialVersionUID = 2249069246763182397L;
1362 final float loadFactor;
1363 Segment(float lf) { this.loadFactor = lf; }
1364 }
1365
1366 /**
1367 * Saves the state of the {@code ConcurrentHashMapV8} instance to a
1368 * stream (i.e., serializes it).
1369 * @param s the stream
1370 * @throws java.io.IOException if an I/O error occurs
1371 * @serialData
1372 * the key (Object) and value (Object)
1373 * for each key-value mapping, followed by a null pair.
1374 * The key-value mappings are emitted in no particular order.
1375 */
1376 private void writeObject(java.io.ObjectOutputStream s)
1377 throws java.io.IOException {
1378 // For serialization compatibility
1379 // Emulate segment calculation from previous version of this class
1380 int sshift = 0;
1381 int ssize = 1;
1382 while (ssize < DEFAULT_CONCURRENCY_LEVEL) {
1383 ++sshift;
1384 ssize <<= 1;
1385 }
1386 int segmentShift = 32 - sshift;
1387 int segmentMask = ssize - 1;
1388 @SuppressWarnings("unchecked") Segment<K,V>[] segments = (Segment<K,V>[])
1389 new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
1390 for (int i = 0; i < segments.length; ++i)
1391 segments[i] = new Segment<K,V>(LOAD_FACTOR);
1392 s.putFields().put("segments", segments);
1393 s.putFields().put("segmentShift", segmentShift);
1394 s.putFields().put("segmentMask", segmentMask);
1395 s.writeFields();
1396
1397 Node<K,V>[] t;
1398 if ((t = table) != null) {
1399 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1400 for (Node<K,V> p; (p = it.advance()) != null; ) {
1401 s.writeObject(p.key);
1402 s.writeObject(p.val);
1403 }
1404 }
1405 s.writeObject(null);
1406 s.writeObject(null);
1407 segments = null; // throw away
1408 }
1409
1410 /**
1411 * Reconstitutes the instance from a stream (that is, deserializes it).
1412 * @param s the stream
1413 * @throws ClassNotFoundException if the class of a serialized object
1414 * could not be found
1415 * @throws java.io.IOException if an I/O error occurs
1416 */
1417 private void readObject(java.io.ObjectInputStream s)
1418 throws java.io.IOException, ClassNotFoundException {
1419 /*
1420 * To improve performance in typical cases, we create nodes
1421 * while reading, then place in table once size is known.
1422 * However, we must also validate uniqueness and deal with
1423 * overpopulated bins while doing so, which requires
1424 * specialized versions of putVal mechanics.
1425 */
1426 sizeCtl = -1; // force exclusion for table construction
1427 s.defaultReadObject();
1428 long size = 0L;
1429 Node<K,V> p = null;
1430 for (;;) {
1431 @SuppressWarnings("unchecked") K k = (K) s.readObject();
1432 @SuppressWarnings("unchecked") V v = (V) s.readObject();
1433 if (k != null && v != null) {
1434 p = new Node<K,V>(spread(k.hashCode()), k, v, p);
1435 ++size;
1436 }
1437 else
1438 break;
1439 }
1440 if (size == 0L)
1441 sizeCtl = 0;
1442 else {
1443 int n;
1444 if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
1445 n = MAXIMUM_CAPACITY;
1446 else {
1447 int sz = (int)size;
1448 n = tableSizeFor(sz + (sz >>> 1) + 1);
1449 }
1450 @SuppressWarnings("unchecked")
1451 Node<K,V>[] tab = (Node<K,V>[])new Node<?,?>[n];
1452 int mask = n - 1;
1453 long added = 0L;
1454 while (p != null) {
1455 boolean insertAtFront;
1456 Node<K,V> next = p.next, first;
1457 int h = p.hash, j = h & mask;
1458 if ((first = tabAt(tab, j)) == null)
1459 insertAtFront = true;
1460 else {
1461 K k = p.key;
1462 if (first.hash < 0) {
1463 TreeBin<K,V> t = (TreeBin<K,V>)first;
1464 if (t.putTreeVal(h, k, p.val) == null)
1465 ++added;
1466 insertAtFront = false;
1467 }
1468 else {
1469 int binCount = 0;
1470 insertAtFront = true;
1471 Node<K,V> q; K qk;
1472 for (q = first; q != null; q = q.next) {
1473 if (q.hash == h &&
1474 ((qk = q.key) == k ||
1475 (qk != null && k.equals(qk)))) {
1476 insertAtFront = false;
1477 break;
1478 }
1479 ++binCount;
1480 }
1481 if (insertAtFront && binCount >= TREEIFY_THRESHOLD) {
1482 insertAtFront = false;
1483 ++added;
1484 p.next = first;
1485 TreeNode<K,V> hd = null, tl = null;
1486 for (q = p; q != null; q = q.next) {
1487 TreeNode<K,V> t = new TreeNode<K,V>
1488 (q.hash, q.key, q.val, null, null);
1489 if ((t.prev = tl) == null)
1490 hd = t;
1491 else
1492 tl.next = t;
1493 tl = t;
1494 }
1495 setTabAt(tab, j, new TreeBin<K,V>(hd));
1496 }
1497 }
1498 }
1499 if (insertAtFront) {
1500 ++added;
1501 p.next = first;
1502 setTabAt(tab, j, p);
1503 }
1504 p = next;
1505 }
1506 table = tab;
1507 sizeCtl = n - (n >>> 2);
1508 baseCount = added;
1509 }
1510 }
1511
1512 // ConcurrentMap methods
1513
1514 /**
1515 * {@inheritDoc}
1516 *
1517 * @return the previous value associated with the specified key,
1518 * or {@code null} if there was no mapping for the key
1519 * @throws NullPointerException if the specified key or value is null
1520 */
1521 public V putIfAbsent(K key, V value) {
1522 return putVal(key, value, true);
1523 }
1524
1525 /**
1526 * {@inheritDoc}
1527 *
1528 * @throws NullPointerException if the specified key is null
1529 */
1530 public boolean remove(Object key, Object value) {
1531 if (key == null)
1532 throw new NullPointerException();
1533 return value != null && replaceNode(key, null, value) != null;
1534 }
1535
1536 /**
1537 * {@inheritDoc}
1538 *
1539 * @throws NullPointerException if any of the arguments are null
1540 */
1541 public boolean replace(K key, V oldValue, V newValue) {
1542 if (key == null || oldValue == null || newValue == null)
1543 throw new NullPointerException();
1544 return replaceNode(key, newValue, oldValue) != null;
1545 }
1546
1547 /**
1548 * {@inheritDoc}
1549 *
1550 * @return the previous value associated with the specified key,
1551 * or {@code null} if there was no mapping for the key
1552 * @throws NullPointerException if the specified key or value is null
1553 */
1554 public V replace(K key, V value) {
1555 if (key == null || value == null)
1556 throw new NullPointerException();
1557 return replaceNode(key, value, null);
1558 }
1559
1560 // Overrides of JDK8+ Map extension method defaults
1561
1562 /**
1563 * Returns the value to which the specified key is mapped, or the
1564 * given default value if this map contains no mapping for the
1565 * key.
1566 *
1567 * @param key the key whose associated value is to be returned
1568 * @param defaultValue the value to return if this map contains
1569 * no mapping for the given key
1570 * @return the mapping for the key, if present; else the default value
1571 * @throws NullPointerException if the specified key is null
1572 */
1573 public V getOrDefault(Object key, V defaultValue) {
1574 V v;
1575 return (v = get(key)) == null ? defaultValue : v;
1576 }
1577
1578 public void forEach(BiAction<? super K, ? super V> action) {
1579 if (action == null) throw new NullPointerException();
1580 Node<K,V>[] t;
1581 if ((t = table) != null) {
1582 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1583 for (Node<K,V> p; (p = it.advance()) != null; ) {
1584 action.apply(p.key, p.val);
1585 }
1586 }
1587 }
1588
1589 public void replaceAll(BiFun<? super K, ? super V, ? extends V> function) {
1590 if (function == null) throw new NullPointerException();
1591 Node<K,V>[] t;
1592 if ((t = table) != null) {
1593 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1594 for (Node<K,V> p; (p = it.advance()) != null; ) {
1595 V oldValue = p.val;
1596 for (K key = p.key;;) {
1597 V newValue = function.apply(key, oldValue);
1598 if (newValue == null)
1599 throw new NullPointerException();
1600 if (replaceNode(key, newValue, oldValue) != null ||
1601 (oldValue = get(key)) == null)
1602 break;
1603 }
1604 }
1605 }
1606 }
1607
1608 /**
1609 * If the specified key is not already associated with a value,
1610 * attempts to compute its value using the given mapping function
1611 * and enters it into this map unless {@code null}. The entire
1612 * method invocation is performed atomically, so the function is
1613 * applied at most once per key. Some attempted update operations
1614 * on this map by other threads may be blocked while computation
1615 * is in progress, so the computation should be short and simple,
1616 * and must not attempt to update any other mappings of this map.
1617 *
1618 * @param key key with which the specified value is to be associated
1619 * @param mappingFunction the function to compute a value
1620 * @return the current (existing or computed) value associated with
1621 * the specified key, or null if the computed value is null
1622 * @throws NullPointerException if the specified key or mappingFunction
1623 * is null
1624 * @throws IllegalStateException if the computation detectably
1625 * attempts a recursive update to this map that would
1626 * otherwise never complete
1627 * @throws RuntimeException or Error if the mappingFunction does so,
1628 * in which case the mapping is left unestablished
1629 */
1630 public V computeIfAbsent(K key, Fun<? super K, ? extends V> mappingFunction) {
1631 if (key == null || mappingFunction == null)
1632 throw new NullPointerException();
1633 int h = spread(key.hashCode());
1634 V val = null;
1635 int binCount = 0;
1636 for (Node<K,V>[] tab = table;;) {
1637 Node<K,V> f; int n, i, fh;
1638 if (tab == null || (n = tab.length) == 0)
1639 tab = initTable();
1640 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1641 Node<K,V> r = new ReservationNode<K,V>();
1642 synchronized (r) {
1643 if (casTabAt(tab, i, null, r)) {
1644 binCount = 1;
1645 Node<K,V> node = null;
1646 try {
1647 if ((val = mappingFunction.apply(key)) != null)
1648 node = new Node<K,V>(h, key, val, null);
1649 } finally {
1650 setTabAt(tab, i, node);
1651 }
1652 }
1653 }
1654 if (binCount != 0)
1655 break;
1656 }
1657 else if ((fh = f.hash) == MOVED)
1658 tab = helpTransfer(tab, f);
1659 else {
1660 boolean added = false;
1661 synchronized (f) {
1662 if (tabAt(tab, i) == f) {
1663 if (fh >= 0) {
1664 binCount = 1;
1665 for (Node<K,V> e = f;; ++binCount) {
1666 K ek; V ev;
1667 if (e.hash == h &&
1668 ((ek = e.key) == key ||
1669 (ek != null && key.equals(ek)))) {
1670 val = e.val;
1671 break;
1672 }
1673 Node<K,V> pred = e;
1674 if ((e = e.next) == null) {
1675 if ((val = mappingFunction.apply(key)) != null) {
1676 added = true;
1677 pred.next = new Node<K,V>(h, key, val, null);
1678 }
1679 break;
1680 }
1681 }
1682 }
1683 else if (f instanceof TreeBin) {
1684 binCount = 2;
1685 TreeBin<K,V> t = (TreeBin<K,V>)f;
1686 TreeNode<K,V> r, p;
1687 if ((r = t.root) != null &&
1688 (p = r.findTreeNode(h, key, null)) != null)
1689 val = p.val;
1690 else if ((val = mappingFunction.apply(key)) != null) {
1691 added = true;
1692 t.putTreeVal(h, key, val);
1693 }
1694 }
1695 }
1696 }
1697 if (binCount != 0) {
1698 if (binCount >= TREEIFY_THRESHOLD)
1699 treeifyBin(tab, i);
1700 if (!added)
1701 return val;
1702 break;
1703 }
1704 }
1705 }
1706 if (val != null)
1707 addCount(1L, binCount);
1708 return val;
1709 }
1710
1711 /**
1712 * If the value for the specified key is present, attempts to
1713 * compute a new mapping given the key and its current mapped
1714 * value. The entire method invocation is performed atomically.
1715 * Some attempted update operations on this map by other threads
1716 * may be blocked while computation is in progress, so the
1717 * computation should be short and simple, and must not attempt to
1718 * update any other mappings of this map.
1719 *
1720 * @param key key with which a value may be associated
1721 * @param remappingFunction the function to compute a value
1722 * @return the new value associated with the specified key, or null if none
1723 * @throws NullPointerException if the specified key or remappingFunction
1724 * is null
1725 * @throws IllegalStateException if the computation detectably
1726 * attempts a recursive update to this map that would
1727 * otherwise never complete
1728 * @throws RuntimeException or Error if the remappingFunction does so,
1729 * in which case the mapping is unchanged
1730 */
1731 public V computeIfPresent(K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
1732 if (key == null || remappingFunction == null)
1733 throw new NullPointerException();
1734 int h = spread(key.hashCode());
1735 V val = null;
1736 int delta = 0;
1737 int binCount = 0;
1738 for (Node<K,V>[] tab = table;;) {
1739 Node<K,V> f; int n, i, fh;
1740 if (tab == null || (n = tab.length) == 0)
1741 tab = initTable();
1742 else if ((f = tabAt(tab, i = (n - 1) & h)) == null)
1743 break;
1744 else if ((fh = f.hash) == MOVED)
1745 tab = helpTransfer(tab, f);
1746 else {
1747 synchronized (f) {
1748 if (tabAt(tab, i) == f) {
1749 if (fh >= 0) {
1750 binCount = 1;
1751 for (Node<K,V> e = f, pred = null;; ++binCount) {
1752 K ek;
1753 if (e.hash == h &&
1754 ((ek = e.key) == key ||
1755 (ek != null && key.equals(ek)))) {
1756 val = remappingFunction.apply(key, e.val);
1757 if (val != null)
1758 e.val = val;
1759 else {
1760 delta = -1;
1761 Node<K,V> en = e.next;
1762 if (pred != null)
1763 pred.next = en;
1764 else
1765 setTabAt(tab, i, en);
1766 }
1767 break;
1768 }
1769 pred = e;
1770 if ((e = e.next) == null)
1771 break;
1772 }
1773 }
1774 else if (f instanceof TreeBin) {
1775 binCount = 2;
1776 TreeBin<K,V> t = (TreeBin<K,V>)f;
1777 TreeNode<K,V> r, p;
1778 if ((r = t.root) != null &&
1779 (p = r.findTreeNode(h, key, null)) != null) {
1780 val = remappingFunction.apply(key, p.val);
1781 if (val != null)
1782 p.val = val;
1783 else {
1784 delta = -1;
1785 if (t.removeTreeNode(p))
1786 setTabAt(tab, i, untreeify(t.first));
1787 }
1788 }
1789 }
1790 }
1791 }
1792 if (binCount != 0)
1793 break;
1794 }
1795 }
1796 if (delta != 0)
1797 addCount((long)delta, binCount);
1798 return val;
1799 }
1800
1801 /**
1802 * Attempts to compute a mapping for the specified key and its
1803 * current mapped value (or {@code null} if there is no current
1804 * mapping). The entire method invocation is performed atomically.
1805 * Some attempted update operations on this map by other threads
1806 * may be blocked while computation is in progress, so the
1807 * computation should be short and simple, and must not attempt to
1808 * update any other mappings of this Map.
1809 *
1810 * @param key key with which the specified value is to be associated
1811 * @param remappingFunction the function to compute a value
1812 * @return the new value associated with the specified key, or null if none
1813 * @throws NullPointerException if the specified key or remappingFunction
1814 * is null
1815 * @throws IllegalStateException if the computation detectably
1816 * attempts a recursive update to this map that would
1817 * otherwise never complete
1818 * @throws RuntimeException or Error if the remappingFunction does so,
1819 * in which case the mapping is unchanged
1820 */
1821 public V compute(K key,
1822 BiFun<? super K, ? super V, ? extends V> remappingFunction) {
1823 if (key == null || remappingFunction == null)
1824 throw new NullPointerException();
1825 int h = spread(key.hashCode());
1826 V val = null;
1827 int delta = 0;
1828 int binCount = 0;
1829 for (Node<K,V>[] tab = table;;) {
1830 Node<K,V> f; int n, i, fh;
1831 if (tab == null || (n = tab.length) == 0)
1832 tab = initTable();
1833 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1834 Node<K,V> r = new ReservationNode<K,V>();
1835 synchronized (r) {
1836 if (casTabAt(tab, i, null, r)) {
1837 binCount = 1;
1838 Node<K,V> node = null;
1839 try {
1840 if ((val = remappingFunction.apply(key, null)) != null) {
1841 delta = 1;
1842 node = new Node<K,V>(h, key, val, null);
1843 }
1844 } finally {
1845 setTabAt(tab, i, node);
1846 }
1847 }
1848 }
1849 if (binCount != 0)
1850 break;
1851 }
1852 else if ((fh = f.hash) == MOVED)
1853 tab = helpTransfer(tab, f);
1854 else {
1855 synchronized (f) {
1856 if (tabAt(tab, i) == f) {
1857 if (fh >= 0) {
1858 binCount = 1;
1859 for (Node<K,V> e = f, pred = null;; ++binCount) {
1860 K ek;
1861 if (e.hash == h &&
1862 ((ek = e.key) == key ||
1863 (ek != null && key.equals(ek)))) {
1864 val = remappingFunction.apply(key, e.val);
1865 if (val != null)
1866 e.val = val;
1867 else {
1868 delta = -1;
1869 Node<K,V> en = e.next;
1870 if (pred != null)
1871 pred.next = en;
1872 else
1873 setTabAt(tab, i, en);
1874 }
1875 break;
1876 }
1877 pred = e;
1878 if ((e = e.next) == null) {
1879 val = remappingFunction.apply(key, null);
1880 if (val != null) {
1881 delta = 1;
1882 pred.next =
1883 new Node<K,V>(h, key, val, null);
1884 }
1885 break;
1886 }
1887 }
1888 }
1889 else if (f instanceof TreeBin) {
1890 binCount = 1;
1891 TreeBin<K,V> t = (TreeBin<K,V>)f;
1892 TreeNode<K,V> r, p;
1893 if ((r = t.root) != null)
1894 p = r.findTreeNode(h, key, null);
1895 else
1896 p = null;
1897 V pv = (p == null) ? null : p.val;
1898 val = remappingFunction.apply(key, pv);
1899 if (val != null) {
1900 if (p != null)
1901 p.val = val;
1902 else {
1903 delta = 1;
1904 t.putTreeVal(h, key, val);
1905 }
1906 }
1907 else if (p != null) {
1908 delta = -1;
1909 if (t.removeTreeNode(p))
1910 setTabAt(tab, i, untreeify(t.first));
1911 }
1912 }
1913 }
1914 }
1915 if (binCount != 0) {
1916 if (binCount >= TREEIFY_THRESHOLD)
1917 treeifyBin(tab, i);
1918 break;
1919 }
1920 }
1921 }
1922 if (delta != 0)
1923 addCount((long)delta, binCount);
1924 return val;
1925 }
1926
1927 /**
1928 * If the specified key is not already associated with a
1929 * (non-null) value, associates it with the given value.
1930 * Otherwise, replaces the value with the results of the given
1931 * remapping function, or removes if {@code null}. The entire
1932 * method invocation is performed atomically. Some attempted
1933 * update operations on this map by other threads may be blocked
1934 * while computation is in progress, so the computation should be
1935 * short and simple, and must not attempt to update any other
1936 * mappings of this Map.
1937 *
1938 * @param key key with which the specified value is to be associated
1939 * @param value the value to use if absent
1940 * @param remappingFunction the function to recompute a value if present
1941 * @return the new value associated with the specified key, or null if none
1942 * @throws NullPointerException if the specified key or the
1943 * remappingFunction is null
1944 * @throws RuntimeException or Error if the remappingFunction does so,
1945 * in which case the mapping is unchanged
1946 */
1947 public V merge(K key, V value, BiFun<? super V, ? super V, ? extends V> remappingFunction) {
1948 if (key == null || value == null || remappingFunction == null)
1949 throw new NullPointerException();
1950 int h = spread(key.hashCode());
1951 V val = null;
1952 int delta = 0;
1953 int binCount = 0;
1954 for (Node<K,V>[] tab = table;;) {
1955 Node<K,V> f; int n, i, fh;
1956 if (tab == null || (n = tab.length) == 0)
1957 tab = initTable();
1958 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1959 if (casTabAt(tab, i, null, new Node<K,V>(h, key, value, null))) {
1960 delta = 1;
1961 val = value;
1962 break;
1963 }
1964 }
1965 else if ((fh = f.hash) == MOVED)
1966 tab = helpTransfer(tab, f);
1967 else {
1968 synchronized (f) {
1969 if (tabAt(tab, i) == f) {
1970 if (fh >= 0) {
1971 binCount = 1;
1972 for (Node<K,V> e = f, pred = null;; ++binCount) {
1973 K ek;
1974 if (e.hash == h &&
1975 ((ek = e.key) == key ||
1976 (ek != null && key.equals(ek)))) {
1977 val = remappingFunction.apply(e.val, value);
1978 if (val != null)
1979 e.val = val;
1980 else {
1981 delta = -1;
1982 Node<K,V> en = e.next;
1983 if (pred != null)
1984 pred.next = en;
1985 else
1986 setTabAt(tab, i, en);
1987 }
1988 break;
1989 }
1990 pred = e;
1991 if ((e = e.next) == null) {
1992 delta = 1;
1993 val = value;
1994 pred.next =
1995 new Node<K,V>(h, key, val, null);
1996 break;
1997 }
1998 }
1999 }
2000 else if (f instanceof TreeBin) {
2001 binCount = 2;
2002 TreeBin<K,V> t = (TreeBin<K,V>)f;
2003 TreeNode<K,V> r = t.root;
2004 TreeNode<K,V> p = (r == null) ? null :
2005 r.findTreeNode(h, key, null);
2006 val = (p == null) ? value :
2007 remappingFunction.apply(p.val, value);
2008 if (val != null) {
2009 if (p != null)
2010 p.val = val;
2011 else {
2012 delta = 1;
2013 t.putTreeVal(h, key, val);
2014 }
2015 }
2016 else if (p != null) {
2017 delta = -1;
2018 if (t.removeTreeNode(p))
2019 setTabAt(tab, i, untreeify(t.first));
2020 }
2021 }
2022 }
2023 }
2024 if (binCount != 0) {
2025 if (binCount >= TREEIFY_THRESHOLD)
2026 treeifyBin(tab, i);
2027 break;
2028 }
2029 }
2030 }
2031 if (delta != 0)
2032 addCount((long)delta, binCount);
2033 return val;
2034 }
2035
2036 // Hashtable legacy methods
2037
2038 /**
2039 * Legacy method testing if some key maps into the specified value
2040 * in this table. This method is identical in functionality to
2041 * {@link #containsValue(Object)}, and exists solely to ensure
2042 * full compatibility with class {@link java.util.Hashtable},
2043 * which supported this method prior to introduction of the
2044 * Java Collections framework.
2045 *
2046 * @param value a value to search for
2047 * @return {@code true} if and only if some key maps to the
2048 * {@code value} argument in this table as
2049 * determined by the {@code equals} method;
2050 * {@code false} otherwise
2051 * @throws NullPointerException if the specified value is null
2052 */
2053 @Deprecated public boolean contains(Object value) {
2054 return containsValue(value);
2055 }
2056
2057 /**
2058 * Returns an enumeration of the keys in this table.
2059 *
2060 * @return an enumeration of the keys in this table
2061 * @see #keySet()
2062 */
2063 public Enumeration<K> keys() {
2064 Node<K,V>[] t;
2065 int f = (t = table) == null ? 0 : t.length;
2066 return new KeyIterator<K,V>(t, f, 0, f, this);
2067 }
2068
2069 /**
2070 * Returns an enumeration of the values in this table.
2071 *
2072 * @return an enumeration of the values in this table
2073 * @see #values()
2074 */
2075 public Enumeration<V> elements() {
2076 Node<K,V>[] t;
2077 int f = (t = table) == null ? 0 : t.length;
2078 return new ValueIterator<K,V>(t, f, 0, f, this);
2079 }
2080
2081 // ConcurrentHashMapV8-only methods
2082
2083 /**
2084 * Returns the number of mappings. This method should be used
2085 * instead of {@link #size} because a ConcurrentHashMapV8 may
2086 * contain more mappings than can be represented as an int. The
2087 * value returned is an estimate; the actual count may differ if
2088 * there are concurrent insertions or removals.
2089 *
2090 * @return the number of mappings
2091 * @since 1.8
2092 */
2093 public long mappingCount() {
2094 long n = sumCount();
2095 return (n < 0L) ? 0L : n; // ignore transient negative values
2096 }
2097
2098 /**
2099 * Creates a new {@link Set} backed by a ConcurrentHashMapV8
2100 * from the given type to {@code Boolean.TRUE}.
2101 *
2102 * @return the new set
2103 * @since 1.8
2104 */
2105 public static <K> KeySetView<K,Boolean> newKeySet() {
2106 return new KeySetView<K,Boolean>
2107 (new ConcurrentHashMapV8<K,Boolean>(), Boolean.TRUE);
2108 }
2109
2110 /**
2111 * Creates a new {@link Set} backed by a ConcurrentHashMapV8
2112 * from the given type to {@code Boolean.TRUE}.
2113 *
2114 * @param initialCapacity The implementation performs internal
2115 * sizing to accommodate this many elements.
2116 * @return the new set
2117 * @throws IllegalArgumentException if the initial capacity of
2118 * elements is negative
2119 * @since 1.8
2120 */
2121 public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2122 return new KeySetView<K,Boolean>
2123 (new ConcurrentHashMapV8<K,Boolean>(initialCapacity), Boolean.TRUE);
2124 }
2125
2126 /**
2127 * Returns a {@link Set} view of the keys in this map, using the
2128 * given common mapped value for any additions (i.e., {@link
2129 * Collection#add} and {@link Collection#addAll(Collection)}).
2130 * This is of course only appropriate if it is acceptable to use
2131 * the same value for all additions from this view.
2132 *
2133 * @param mappedValue the mapped value to use for any additions
2134 * @return the set view
2135 * @throws NullPointerException if the mappedValue is null
2136 */
2137 public KeySetView<K,V> keySet(V mappedValue) {
2138 if (mappedValue == null)
2139 throw new NullPointerException();
2140 return new KeySetView<K,V>(this, mappedValue);
2141 }
2142
2143 /* ---------------- Special Nodes -------------- */
2144
2145 /**
2146 * A node inserted at head of bins during transfer operations.
2147 */
2148 static final class ForwardingNode<K,V> extends Node<K,V> {
2149 final Node<K,V>[] nextTable;
2150 ForwardingNode(Node<K,V>[] tab) {
2151 super(MOVED, null, null, null);
2152 this.nextTable = tab;
2153 }
2154
2155 Node<K,V> find(int h, Object k) {
2156 // loop to avoid arbitrarily deep recursion on forwarding nodes
2157 outer: for (Node<K,V>[] tab = nextTable;;) {
2158 Node<K,V> e; int n;
2159 if (k == null || tab == null || (n = tab.length) == 0 ||
2160 (e = tabAt(tab, (n - 1) & h)) == null)
2161 return null;
2162 for (;;) {
2163 int eh; K ek;
2164 if ((eh = e.hash) == h &&
2165 ((ek = e.key) == k || (ek != null && k.equals(ek))))
2166 return e;
2167 if (eh < 0) {
2168 if (e instanceof ForwardingNode) {
2169 tab = ((ForwardingNode<K,V>)e).nextTable;
2170 continue outer;
2171 }
2172 else
2173 return e.find(h, k);
2174 }
2175 if ((e = e.next) == null)
2176 return null;
2177 }
2178 }
2179 }
2180 }
2181
2182 /**
2183 * A place-holder node used in computeIfAbsent and compute
2184 */
2185 static final class ReservationNode<K,V> extends Node<K,V> {
2186 ReservationNode() {
2187 super(RESERVED, null, null, null);
2188 }
2189
2190 Node<K,V> find(int h, Object k) {
2191 return null;
2192 }
2193 }
2194
2195 /* ---------------- Table Initialization and Resizing -------------- */
2196
2197 /**
2198 * Initializes table, using the size recorded in sizeCtl.
2199 */
2200 private final Node<K,V>[] initTable() {
2201 Node<K,V>[] tab; int sc;
2202 while ((tab = table) == null || tab.length == 0) {
2203 if ((sc = sizeCtl) < 0)
2204 Thread.yield(); // lost initialization race; just spin
2205 else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
2206 try {
2207 if ((tab = table) == null || tab.length == 0) {
2208 int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
2209 @SuppressWarnings("unchecked")
2210 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
2211 table = tab = nt;
2212 sc = n - (n >>> 2);
2213 }
2214 } finally {
2215 sizeCtl = sc;
2216 }
2217 break;
2218 }
2219 }
2220 return tab;
2221 }
2222
2223 /**
2224 * Adds to count, and if table is too small and not already
2225 * resizing, initiates transfer. If already resizing, helps
2226 * perform transfer if work is available. Rechecks occupancy
2227 * after a transfer to see if another resize is already needed
2228 * because resizings are lagging additions.
2229 *
2230 * @param x the count to add
2231 * @param check if <0, don't check resize, if <= 1 only check if uncontended
2232 */
2233 private final void addCount(long x, int check) {
2234 CounterCell[] as; long b, s;
2235 if ((as = counterCells) != null ||
2236 !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
2237 CounterHashCode hc; CounterCell a; long v; int m;
2238 boolean uncontended = true;
2239 if ((hc = threadCounterHashCode.get()) == null ||
2240 as == null || (m = as.length - 1) < 0 ||
2241 (a = as[m & hc.code]) == null ||
2242 !(uncontended =
2243 U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
2244 fullAddCount(x, hc, uncontended);
2245 return;
2246 }
2247 if (check <= 1)
2248 return;
2249 s = sumCount();
2250 }
2251 if (check >= 0) {
2252 Node<K,V>[] tab, nt; int sc;
2253 while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
2254 tab.length < MAXIMUM_CAPACITY) {
2255 if (sc < 0) {
2256 if (sc == -1 || transferIndex <= 0 ||
2257 (nt = nextTable) == null)
2258 break;
2259 if (U.compareAndSwapInt(this, SIZECTL, sc, sc - 1))
2260 transfer(tab, nt);
2261 }
2262 else if (U.compareAndSwapInt(this, SIZECTL, sc, -2))
2263 transfer(tab, null);
2264 s = sumCount();
2265 }
2266 }
2267 }
2268
2269 /**
2270 * Helps transfer if a resize is in progress.
2271 */
2272 final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) {
2273 Node<K,V>[] nextTab; int sc;
2274 if ((f instanceof ForwardingNode) &&
2275 (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) {
2276 while (transferIndex > 0 && nextTab == nextTable &&
2277 (sc = sizeCtl) < -1) {
2278 if (U.compareAndSwapInt(this, SIZECTL, sc, sc - 1)) {
2279 transfer(tab, nextTab);
2280 break;
2281 }
2282 }
2283 return nextTab;
2284 }
2285 return table;
2286 }
2287
2288 /**
2289 * Tries to presize table to accommodate the given number of elements.
2290 *
2291 * @param size number of elements (doesn't need to be perfectly accurate)
2292 */
2293 private final void tryPresize(int size) {
2294 int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
2295 tableSizeFor(size + (size >>> 1) + 1);
2296 int sc;
2297 while ((sc = sizeCtl) >= 0) {
2298 Node<K,V>[] tab = table; int n;
2299 if (tab == null || (n = tab.length) == 0) {
2300 n = (sc > c) ? sc : c;
2301 if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
2302 try {
2303 if (table == tab) {
2304 @SuppressWarnings("unchecked")
2305 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
2306 table = nt;
2307 sc = n - (n >>> 2);
2308 }
2309 } finally {
2310 sizeCtl = sc;
2311 }
2312 }
2313 }
2314 else if (c <= sc || n >= MAXIMUM_CAPACITY)
2315 break;
2316 else if (tab == table &&
2317 U.compareAndSwapInt(this, SIZECTL, sc, -2))
2318 transfer(tab, null);
2319 }
2320 }
2321
2322 /**
2323 * Moves and/or copies the nodes in each bin to new table. See
2324 * above for explanation.
2325 */
2326 private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
2327 int n = tab.length, stride;
2328 if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
2329 stride = MIN_TRANSFER_STRIDE; // subdivide range
2330 if (nextTab == null) { // initiating
2331 try {
2332 @SuppressWarnings("unchecked")
2333 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1];
2334 nextTab = nt;
2335 } catch (Throwable ex) { // try to cope with OOME
2336 sizeCtl = Integer.MAX_VALUE;
2337 return;
2338 }
2339 nextTable = nextTab;
2340 transferIndex = n;
2341 }
2342 int nextn = nextTab.length;
2343 ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
2344 boolean advance = true;
2345 boolean finishing = false; // to ensure sweep before committing nextTab
2346 for (int i = 0, bound = 0;;) {
2347 Node<K,V> f; int fh;
2348 while (advance) {
2349 int nextIndex, nextBound;
2350 if (--i >= bound || finishing)
2351 advance = false;
2352 else if ((nextIndex = transferIndex) <= 0) {
2353 i = -1;
2354 advance = false;
2355 }
2356 else if (U.compareAndSwapInt
2357 (this, TRANSFERINDEX, nextIndex,
2358 nextBound = (nextIndex > stride ?
2359 nextIndex - stride : 0))) {
2360 bound = nextBound;
2361 i = nextIndex - 1;
2362 advance = false;
2363 }
2364 }
2365 if (i < 0 || i >= n || i + n >= nextn) {
2366 int sc;
2367 if (finishing) {
2368 nextTable = null;
2369 table = nextTab;
2370 sizeCtl = (n << 1) - (n >>> 1);
2371 return;
2372 }
2373 if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, ++sc)) {
2374 if (sc != -1)
2375 return;
2376 finishing = advance = true;
2377 i = n; // recheck before commit
2378 }
2379 }
2380 else if ((f = tabAt(tab, i)) == null)
2381 advance = casTabAt(tab, i, null, fwd);
2382 else if ((fh = f.hash) == MOVED)
2383 advance = true; // already processed
2384 else {
2385 synchronized (f) {
2386 if (tabAt(tab, i) == f) {
2387 Node<K,V> ln, hn;
2388 if (fh >= 0) {
2389 int runBit = fh & n;
2390 Node<K,V> lastRun = f;
2391 for (Node<K,V> p = f.next; p != null; p = p.next) {
2392 int b = p.hash & n;
2393 if (b != runBit) {
2394 runBit = b;
2395 lastRun = p;
2396 }
2397 }
2398 if (runBit == 0) {
2399 ln = lastRun;
2400 hn = null;
2401 }
2402 else {
2403 hn = lastRun;
2404 ln = null;
2405 }
2406 for (Node<K,V> p = f; p != lastRun; p = p.next) {
2407 int ph = p.hash; K pk = p.key; V pv = p.val;
2408 if ((ph & n) == 0)
2409 ln = new Node<K,V>(ph, pk, pv, ln);
2410 else
2411 hn = new Node<K,V>(ph, pk, pv, hn);
2412 }
2413 setTabAt(nextTab, i, ln);
2414 setTabAt(nextTab, i + n, hn);
2415 setTabAt(tab, i, fwd);
2416 advance = true;
2417 }
2418 else if (f instanceof TreeBin) {
2419 TreeBin<K,V> t = (TreeBin<K,V>)f;
2420 TreeNode<K,V> lo = null, loTail = null;
2421 TreeNode<K,V> hi = null, hiTail = null;
2422 int lc = 0, hc = 0;
2423 for (Node<K,V> e = t.first; e != null; e = e.next) {
2424 int h = e.hash;
2425 TreeNode<K,V> p = new TreeNode<K,V>
2426 (h, e.key, e.val, null, null);
2427 if ((h & n) == 0) {
2428 if ((p.prev = loTail) == null)
2429 lo = p;
2430 else
2431 loTail.next = p;
2432 loTail = p;
2433 ++lc;
2434 }
2435 else {
2436 if ((p.prev = hiTail) == null)
2437 hi = p;
2438 else
2439 hiTail.next = p;
2440 hiTail = p;
2441 ++hc;
2442 }
2443 }
2444 ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
2445 (hc != 0) ? new TreeBin<K,V>(lo) : t;
2446 hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
2447 (lc != 0) ? new TreeBin<K,V>(hi) : t;
2448 setTabAt(nextTab, i, ln);
2449 setTabAt(nextTab, i + n, hn);
2450 setTabAt(tab, i, fwd);
2451 advance = true;
2452 }
2453 }
2454 }
2455 }
2456 }
2457 }
2458
2459 /* ---------------- Conversion from/to TreeBins -------------- */
2460
2461 /**
2462 * Replaces all linked nodes in bin at given index unless table is
2463 * too small, in which case resizes instead.
2464 */
2465 private final void treeifyBin(Node<K,V>[] tab, int index) {
2466 Node<K,V> b; int n, sc;
2467 if (tab != null) {
2468 if ((n = tab.length) < MIN_TREEIFY_CAPACITY) {
2469 if (tab == table && (sc = sizeCtl) >= 0 &&
2470 U.compareAndSwapInt(this, SIZECTL, sc, -2))
2471 transfer(tab, null);
2472 }
2473 else if ((b = tabAt(tab, index)) != null && b.hash >= 0) {
2474 synchronized (b) {
2475 if (tabAt(tab, index) == b) {
2476 TreeNode<K,V> hd = null, tl = null;
2477 for (Node<K,V> e = b; e != null; e = e.next) {
2478 TreeNode<K,V> p =
2479 new TreeNode<K,V>(e.hash, e.key, e.val,
2480 null, null);
2481 if ((p.prev = tl) == null)
2482 hd = p;
2483 else
2484 tl.next = p;
2485 tl = p;
2486 }
2487 setTabAt(tab, index, new TreeBin<K,V>(hd));
2488 }
2489 }
2490 }
2491 }
2492 }
2493
2494 /**
2495 * Returns a list on non-TreeNodes replacing those in given list.
2496 */
2497 static <K,V> Node<K,V> untreeify(Node<K,V> b) {
2498 Node<K,V> hd = null, tl = null;
2499 for (Node<K,V> q = b; q != null; q = q.next) {
2500 Node<K,V> p = new Node<K,V>(q.hash, q.key, q.val, null);
2501 if (tl == null)
2502 hd = p;
2503 else
2504 tl.next = p;
2505 tl = p;
2506 }
2507 return hd;
2508 }
2509
2510 /* ---------------- TreeNodes -------------- */
2511
2512 /**
2513 * Nodes for use in TreeBins
2514 */
2515 static final class TreeNode<K,V> extends Node<K,V> {
2516 TreeNode<K,V> parent; // red-black tree links
2517 TreeNode<K,V> left;
2518 TreeNode<K,V> right;
2519 TreeNode<K,V> prev; // needed to unlink next upon deletion
2520 boolean red;
2521
2522 TreeNode(int hash, K key, V val, Node<K,V> next,
2523 TreeNode<K,V> parent) {
2524 super(hash, key, val, next);
2525 this.parent = parent;
2526 }
2527
2528 Node<K,V> find(int h, Object k) {
2529 return findTreeNode(h, k, null);
2530 }
2531
2532 /**
2533 * Returns the TreeNode (or null if not found) for the given key
2534 * starting at given root.
2535 */
2536 final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) {
2537 if (k != null) {
2538 TreeNode<K,V> p = this;
2539 do {
2540 int ph, dir; K pk; TreeNode<K,V> q;
2541 TreeNode<K,V> pl = p.left, pr = p.right;
2542 if ((ph = p.hash) > h)
2543 p = pl;
2544 else if (ph < h)
2545 p = pr;
2546 else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2547 return p;
2548 else if (pl == null)
2549 p = pr;
2550 else if (pr == null)
2551 p = pl;
2552 else if ((kc != null ||
2553 (kc = comparableClassFor(k)) != null) &&
2554 (dir = compareComparables(kc, k, pk)) != 0)
2555 p = (dir < 0) ? pl : pr;
2556 else if ((q = pr.findTreeNode(h, k, kc)) != null)
2557 return q;
2558 else
2559 p = pl;
2560 } while (p != null);
2561 }
2562 return null;
2563 }
2564 }
2565
2566 /* ---------------- TreeBins -------------- */
2567
2568 /**
2569 * TreeNodes used at the heads of bins. TreeBins do not hold user
2570 * keys or values, but instead point to list of TreeNodes and
2571 * their root. They also maintain a parasitic read-write lock
2572 * forcing writers (who hold bin lock) to wait for readers (who do
2573 * not) to complete before tree restructuring operations.
2574 */
2575 static final class TreeBin<K,V> extends Node<K,V> {
2576 TreeNode<K,V> root;
2577 volatile TreeNode<K,V> first;
2578 volatile Thread waiter;
2579 volatile int lockState;
2580 // values for lockState
2581 static final int WRITER = 1; // set while holding write lock
2582 static final int WAITER = 2; // set when waiting for write lock
2583 static final int READER = 4; // increment value for setting read lock
2584
2585 /**
2586 * Tie-breaking utility for ordering insertions when equal
2587 * hashCodes and non-comparable. We don't require a total
2588 * order, just a consistent insertion rule to maintain
2589 * equivalence across rebalancings. Tie-breaking further than
2590 * necessary simplifies testing a bit.
2591 */
2592 static int tieBreakOrder(Object a, Object b) {
2593 int d;
2594 if (a == null || b == null ||
2595 (d = a.getClass().getName().
2596 compareTo(b.getClass().getName())) == 0)
2597 d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
2598 -1 : 1);
2599 return d;
2600 }
2601
2602 /**
2603 * Creates bin with initial set of nodes headed by b.
2604 */
2605 TreeBin(TreeNode<K,V> b) {
2606 super(TREEBIN, null, null, null);
2607 this.first = b;
2608 TreeNode<K,V> r = null;
2609 for (TreeNode<K,V> x = b, next; x != null; x = next) {
2610 next = (TreeNode<K,V>)x.next;
2611 x.left = x.right = null;
2612 if (r == null) {
2613 x.parent = null;
2614 x.red = false;
2615 r = x;
2616 }
2617 else {
2618 K k = x.key;
2619 int h = x.hash;
2620 Class<?> kc = null;
2621 for (TreeNode<K,V> p = r;;) {
2622 int dir, ph;
2623 K pk = p.key;
2624 if ((ph = p.hash) > h)
2625 dir = -1;
2626 else if (ph < h)
2627 dir = 1;
2628 else if ((kc == null &&
2629 (kc = comparableClassFor(k)) == null) ||
2630 (dir = compareComparables(kc, k, pk)) == 0)
2631 dir = tieBreakOrder(k, pk);
2632 TreeNode<K,V> xp = p;
2633 if ((p = (dir <= 0) ? p.left : p.right) == null) {
2634 x.parent = xp;
2635 if (dir <= 0)
2636 xp.left = x;
2637 else
2638 xp.right = x;
2639 r = balanceInsertion(r, x);
2640 break;
2641 }
2642 }
2643 }
2644 }
2645 this.root = r;
2646 assert checkInvariants(root);
2647 }
2648
2649 /**
2650 * Acquires write lock for tree restructuring.
2651 */
2652 private final void lockRoot() {
2653 if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER))
2654 contendedLock(); // offload to separate method
2655 }
2656
2657 /**
2658 * Releases write lock for tree restructuring.
2659 */
2660 private final void unlockRoot() {
2661 lockState = 0;
2662 }
2663
2664 /**
2665 * Possibly blocks awaiting root lock.
2666 */
2667 private final void contendedLock() {
2668 boolean waiting = false;
2669 for (int s;;) {
2670 if (((s = lockState) & WRITER) == 0) {
2671 if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) {
2672 if (waiting)
2673 waiter = null;
2674 return;
2675 }
2676 }
2677 else if ((s & WAITER) == 0) {
2678 if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) {
2679 waiting = true;
2680 waiter = Thread.currentThread();
2681 }
2682 }
2683 else if (waiting)
2684 LockSupport.park(this);
2685 }
2686 }
2687
2688 /**
2689 * Returns matching node or null if none. Tries to search
2690 * using tree comparisons from root, but continues linear
2691 * search when lock not available.
2692 */
2693 final Node<K,V> find(int h, Object k) {
2694 if (k != null) {
2695 for (Node<K,V> e = first; e != null; e = e.next) {
2696 int s; K ek;
2697 if (((s = lockState) & (WAITER|WRITER)) != 0) {
2698 if (e.hash == h &&
2699 ((ek = e.key) == k || (ek != null && k.equals(ek))))
2700 return e;
2701 }
2702 else if (U.compareAndSwapInt(this, LOCKSTATE, s,
2703 s + READER)) {
2704 TreeNode<K,V> r, p;
2705 try {
2706 p = ((r = root) == null ? null :
2707 r.findTreeNode(h, k, null));
2708 } finally {
2709 Thread w;
2710 int ls;
2711 do {} while (!U.compareAndSwapInt
2712 (this, LOCKSTATE,
2713 ls = lockState, ls - READER));
2714 if (ls == (READER|WAITER) && (w = waiter) != null)
2715 LockSupport.unpark(w);
2716 }
2717 return p;
2718 }
2719 }
2720 }
2721 return null;
2722 }
2723
2724 /**
2725 * Finds or adds a node.
2726 * @return null if added
2727 */
2728 final TreeNode<K,V> putTreeVal(int h, K k, V v) {
2729 Class<?> kc = null;
2730 boolean searched = false;
2731 for (TreeNode<K,V> p = root;;) {
2732 int dir, ph; K pk;
2733 if (p == null) {
2734 first = root = new TreeNode<K,V>(h, k, v, null, null);
2735 break;
2736 }
2737 else if ((ph = p.hash) > h)
2738 dir = -1;
2739 else if (ph < h)
2740 dir = 1;
2741 else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2742 return p;
2743 else if ((kc == null &&
2744 (kc = comparableClassFor(k)) == null) ||
2745 (dir = compareComparables(kc, k, pk)) == 0) {
2746 if (!searched) {
2747 TreeNode<K,V> q, ch;
2748 searched = true;
2749 if (((ch = p.left) != null &&
2750 (q = ch.findTreeNode(h, k, kc)) != null) ||
2751 ((ch = p.right) != null &&
2752 (q = ch.findTreeNode(h, k, kc)) != null))
2753 return q;
2754 }
2755 dir = tieBreakOrder(k, pk);
2756 }
2757
2758 TreeNode<K,V> xp = p;
2759 if ((p = (dir <= 0) ? p.left : p.right) == null) {
2760 TreeNode<K,V> x, f = first;
2761 first = x = new TreeNode<K,V>(h, k, v, f, xp);
2762 if (f != null)
2763 f.prev = x;
2764 if (dir <= 0)
2765 xp.left = x;
2766 else
2767 xp.right = x;
2768 if (!xp.red)
2769 x.red = true;
2770 else {
2771 lockRoot();
2772 try {
2773 root = balanceInsertion(root, x);
2774 } finally {
2775 unlockRoot();
2776 }
2777 }
2778 break;
2779 }
2780 }
2781 assert checkInvariants(root);
2782 return null;
2783 }
2784
2785 /**
2786 * Removes the given node, that must be present before this
2787 * call. This is messier than typical red-black deletion code
2788 * because we cannot swap the contents of an interior node
2789 * with a leaf successor that is pinned by "next" pointers
2790 * that are accessible independently of lock. So instead we
2791 * swap the tree linkages.
2792 *
2793 * @return true if now too small, so should be untreeified
2794 */
2795 final boolean removeTreeNode(TreeNode<K,V> p) {
2796 TreeNode<K,V> next = (TreeNode<K,V>)p.next;
2797 TreeNode<K,V> pred = p.prev; // unlink traversal pointers
2798 TreeNode<K,V> r, rl;
2799 if (pred == null)
2800 first = next;
2801 else
2802 pred.next = next;
2803 if (next != null)
2804 next.prev = pred;
2805 if (first == null) {
2806 root = null;
2807 return true;
2808 }
2809 if ((r = root) == null || r.right == null || // too small
2810 (rl = r.left) == null || rl.left == null)
2811 return true;
2812 lockRoot();
2813 try {
2814 TreeNode<K,V> replacement;
2815 TreeNode<K,V> pl = p.left;
2816 TreeNode<K,V> pr = p.right;
2817 if (pl != null && pr != null) {
2818 TreeNode<K,V> s = pr, sl;
2819 while ((sl = s.left) != null) // find successor
2820 s = sl;
2821 boolean c = s.red; s.red = p.red; p.red = c; // swap colors
2822 TreeNode<K,V> sr = s.right;
2823 TreeNode<K,V> pp = p.parent;
2824 if (s == pr) { // p was s's direct parent
2825 p.parent = s;
2826 s.right = p;
2827 }
2828 else {
2829 TreeNode<K,V> sp = s.parent;
2830 if ((p.parent = sp) != null) {
2831 if (s == sp.left)
2832 sp.left = p;
2833 else
2834 sp.right = p;
2835 }
2836 if ((s.right = pr) != null)
2837 pr.parent = s;
2838 }
2839 p.left = null;
2840 if ((p.right = sr) != null)
2841 sr.parent = p;
2842 if ((s.left = pl) != null)
2843 pl.parent = s;
2844 if ((s.parent = pp) == null)
2845 r = s;
2846 else if (p == pp.left)
2847 pp.left = s;
2848 else
2849 pp.right = s;
2850 if (sr != null)
2851 replacement = sr;
2852 else
2853 replacement = p;
2854 }
2855 else if (pl != null)
2856 replacement = pl;
2857 else if (pr != null)
2858 replacement = pr;
2859 else
2860 replacement = p;
2861 if (replacement != p) {
2862 TreeNode<K,V> pp = replacement.parent = p.parent;
2863 if (pp == null)
2864 r = replacement;
2865 else if (p == pp.left)
2866 pp.left = replacement;
2867 else
2868 pp.right = replacement;
2869 p.left = p.right = p.parent = null;
2870 }
2871
2872 root = (p.red) ? r : balanceDeletion(r, replacement);
2873
2874 if (p == replacement) { // detach pointers
2875 TreeNode<K,V> pp;
2876 if ((pp = p.parent) != null) {
2877 if (p == pp.left)
2878 pp.left = null;
2879 else if (p == pp.right)
2880 pp.right = null;
2881 p.parent = null;
2882 }
2883 }
2884 } finally {
2885 unlockRoot();
2886 }
2887 assert checkInvariants(root);
2888 return false;
2889 }
2890
2891 /* ------------------------------------------------------------ */
2892 // Red-black tree methods, all adapted from CLR
2893
2894 static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
2895 TreeNode<K,V> p) {
2896 TreeNode<K,V> r, pp, rl;
2897 if (p != null && (r = p.right) != null) {
2898 if ((rl = p.right = r.left) != null)
2899 rl.parent = p;
2900 if ((pp = r.parent = p.parent) == null)
2901 (root = r).red = false;
2902 else if (pp.left == p)
2903 pp.left = r;
2904 else
2905 pp.right = r;
2906 r.left = p;
2907 p.parent = r;
2908 }
2909 return root;
2910 }
2911
2912 static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
2913 TreeNode<K,V> p) {
2914 TreeNode<K,V> l, pp, lr;
2915 if (p != null && (l = p.left) != null) {
2916 if ((lr = p.left = l.right) != null)
2917 lr.parent = p;
2918 if ((pp = l.parent = p.parent) == null)
2919 (root = l).red = false;
2920 else if (pp.right == p)
2921 pp.right = l;
2922 else
2923 pp.left = l;
2924 l.right = p;
2925 p.parent = l;
2926 }
2927 return root;
2928 }
2929
2930 static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
2931 TreeNode<K,V> x) {
2932 x.red = true;
2933 for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
2934 if ((xp = x.parent) == null) {
2935 x.red = false;
2936 return x;
2937 }
2938 else if (!xp.red || (xpp = xp.parent) == null)
2939 return root;
2940 if (xp == (xppl = xpp.left)) {
2941 if ((xppr = xpp.right) != null && xppr.red) {
2942 xppr.red = false;
2943 xp.red = false;
2944 xpp.red = true;
2945 x = xpp;
2946 }
2947 else {
2948 if (x == xp.right) {
2949 root = rotateLeft(root, x = xp);
2950 xpp = (xp = x.parent) == null ? null : xp.parent;
2951 }
2952 if (xp != null) {
2953 xp.red = false;
2954 if (xpp != null) {
2955 xpp.red = true;
2956 root = rotateRight(root, xpp);
2957 }
2958 }
2959 }
2960 }
2961 else {
2962 if (xppl != null && xppl.red) {
2963 xppl.red = false;
2964 xp.red = false;
2965 xpp.red = true;
2966 x = xpp;
2967 }
2968 else {
2969 if (x == xp.left) {
2970 root = rotateRight(root, x = xp);
2971 xpp = (xp = x.parent) == null ? null : xp.parent;
2972 }
2973 if (xp != null) {
2974 xp.red = false;
2975 if (xpp != null) {
2976 xpp.red = true;
2977 root = rotateLeft(root, xpp);
2978 }
2979 }
2980 }
2981 }
2982 }
2983 }
2984
2985 static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
2986 TreeNode<K,V> x) {
2987 for (TreeNode<K,V> xp, xpl, xpr;;) {
2988 if (x == null || x == root)
2989 return root;
2990 else if ((xp = x.parent) == null) {
2991 x.red = false;
2992 return x;
2993 }
2994 else if (x.red) {
2995 x.red = false;
2996 return root;
2997 }
2998 else if ((xpl = xp.left) == x) {
2999 if ((xpr = xp.right) != null && xpr.red) {
3000 xpr.red = false;
3001 xp.red = true;
3002 root = rotateLeft(root, xp);
3003 xpr = (xp = x.parent) == null ? null : xp.right;
3004 }
3005 if (xpr == null)
3006 x = xp;
3007 else {
3008 TreeNode<K,V> sl = xpr.left, sr = xpr.right;
3009 if ((sr == null || !sr.red) &&
3010 (sl == null || !sl.red)) {
3011 xpr.red = true;
3012 x = xp;
3013 }
3014 else {
3015 if (sr == null || !sr.red) {
3016 if (sl != null)
3017 sl.red = false;
3018 xpr.red = true;
3019 root = rotateRight(root, xpr);
3020 xpr = (xp = x.parent) == null ?
3021 null : xp.right;
3022 }
3023 if (xpr != null) {
3024 xpr.red = (xp == null) ? false : xp.red;
3025 if ((sr = xpr.right) != null)
3026 sr.red = false;
3027 }
3028 if (xp != null) {
3029 xp.red = false;
3030 root = rotateLeft(root, xp);
3031 }
3032 x = root;
3033 }
3034 }
3035 }
3036 else { // symmetric
3037 if (xpl != null && xpl.red) {
3038 xpl.red = false;
3039 xp.red = true;
3040 root = rotateRight(root, xp);
3041 xpl = (xp = x.parent) == null ? null : xp.left;
3042 }
3043 if (xpl == null)
3044 x = xp;
3045 else {
3046 TreeNode<K,V> sl = xpl.left, sr = xpl.right;
3047 if ((sl == null || !sl.red) &&
3048 (sr == null || !sr.red)) {
3049 xpl.red = true;
3050 x = xp;
3051 }
3052 else {
3053 if (sl == null || !sl.red) {
3054 if (sr != null)
3055 sr.red = false;
3056 xpl.red = true;
3057 root = rotateLeft(root, xpl);
3058 xpl = (xp = x.parent) == null ?
3059 null : xp.left;
3060 }
3061 if (xpl != null) {
3062 xpl.red = (xp == null) ? false : xp.red;
3063 if ((sl = xpl.left) != null)
3064 sl.red = false;
3065 }
3066 if (xp != null) {
3067 xp.red = false;
3068 root = rotateRight(root, xp);
3069 }
3070 x = root;
3071 }
3072 }
3073 }
3074 }
3075 }
3076
3077 /**
3078 * Recursive invariant check
3079 */
3080 static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
3081 TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
3082 tb = t.prev, tn = (TreeNode<K,V>)t.next;
3083 if (tb != null && tb.next != t)
3084 return false;
3085 if (tn != null && tn.prev != t)
3086 return false;
3087 if (tp != null && t != tp.left && t != tp.right)
3088 return false;
3089 if (tl != null && (tl.parent != t || tl.hash > t.hash))
3090 return false;
3091 if (tr != null && (tr.parent != t || tr.hash < t.hash))
3092 return false;
3093 if (t.red && tl != null && tl.red && tr != null && tr.red)
3094 return false;
3095 if (tl != null && !checkInvariants(tl))
3096 return false;
3097 if (tr != null && !checkInvariants(tr))
3098 return false;
3099 return true;
3100 }
3101
3102 private static final sun.misc.Unsafe U;
3103 private static final long LOCKSTATE;
3104 static {
3105 try {
3106 U = getUnsafe();
3107 Class<?> k = TreeBin.class;
3108 LOCKSTATE = U.objectFieldOffset
3109 (k.getDeclaredField("lockState"));
3110 } catch (Exception e) {
3111 throw new Error(e);
3112 }
3113 }
3114 }
3115
3116 /* ----------------Table Traversal -------------- */
3117
3118 /**
3119 * Records the table, its length, and current traversal index for a
3120 * traverser that must process a region of a forwarded table before
3121 * proceeding with current table.
3122 */
3123 static final class TableStack<K,V> {
3124 int length;
3125 int index;
3126 Node<K,V>[] tab;
3127 TableStack<K,V> next;
3128 }
3129
3130 /**
3131 * Encapsulates traversal for methods such as containsValue; also
3132 * serves as a base class for other iterators and spliterators.
3133 *
3134 * Method advance visits once each still-valid node that was
3135 * reachable upon iterator construction. It might miss some that
3136 * were added to a bin after the bin was visited, which is OK wrt
3137 * consistency guarantees. Maintaining this property in the face
3138 * of possible ongoing resizes requires a fair amount of
3139 * bookkeeping state that is difficult to optimize away amidst
3140 * volatile accesses. Even so, traversal maintains reasonable
3141 * throughput.
3142 *
3143 * Normally, iteration proceeds bin-by-bin traversing lists.
3144 * However, if the table has been resized, then all future steps
3145 * must traverse both the bin at the current index as well as at
3146 * (index + baseSize); and so on for further resizings. To
3147 * paranoically cope with potential sharing by users of iterators
3148 * across threads, iteration terminates if a bounds checks fails
3149 * for a table read.
3150 */
3151 static class Traverser<K,V> {
3152 Node<K,V>[] tab; // current table; updated if resized
3153 Node<K,V> next; // the next entry to use
3154 TableStack<K,V> stack, spare; // to save/restore on ForwardingNodes
3155 int index; // index of bin to use next
3156 int baseIndex; // current index of initial table
3157 int baseLimit; // index bound for initial table
3158 final int baseSize; // initial table size
3159
3160 Traverser(Node<K,V>[] tab, int size, int index, int limit) {
3161 this.tab = tab;
3162 this.baseSize = size;
3163 this.baseIndex = this.index = index;
3164 this.baseLimit = limit;
3165 this.next = null;
3166 }
3167
3168 /**
3169 * Advances if possible, returning next valid node, or null if none.
3170 */
3171 final Node<K,V> advance() {
3172 Node<K,V> e;
3173 if ((e = next) != null)
3174 e = e.next;
3175 for (;;) {
3176 Node<K,V>[] t; int i, n; // must use locals in checks
3177 if (e != null)
3178 return next = e;
3179 if (baseIndex >= baseLimit || (t = tab) == null ||
3180 (n = t.length) <= (i = index) || i < 0)
3181 return next = null;
3182 if ((e = tabAt(t, i)) != null && e.hash < 0) {
3183 if (e instanceof ForwardingNode) {
3184 tab = ((ForwardingNode<K,V>)e).nextTable;
3185 e = null;
3186 pushState(t, i, n);
3187 continue;
3188 }
3189 else if (e instanceof TreeBin)
3190 e = ((TreeBin<K,V>)e).first;
3191 else
3192 e = null;
3193 }
3194 if (stack != null)
3195 recoverState(n);
3196 else if ((index = i + baseSize) >= n)
3197 index = ++baseIndex; // visit upper slots if present
3198 }
3199 }
3200
3201 /**
3202 * Saves traversal state upon encountering a forwarding node.
3203 */
3204 private void pushState(Node<K,V>[] t, int i, int n) {
3205 TableStack<K,V> s = spare; // reuse if possible
3206 if (s != null)
3207 spare = s.next;
3208 else
3209 s = new TableStack<K,V>();
3210 s.tab = t;
3211 s.length = n;
3212 s.index = i;
3213 s.next = stack;
3214 stack = s;
3215 }
3216
3217 /**
3218 * Possibly pops traversal state.
3219 *
3220 * @param n length of current table
3221 */
3222 private void recoverState(int n) {
3223 TableStack<K,V> s; int len;
3224 while ((s = stack) != null && (index += (len = s.length)) >= n) {
3225 n = len;
3226 index = s.index;
3227 tab = s.tab;
3228 s.tab = null;
3229 TableStack<K,V> next = s.next;
3230 s.next = spare; // save for reuse
3231 stack = next;
3232 spare = s;
3233 }
3234 if (s == null && (index += baseSize) >= n)
3235 index = ++baseIndex;
3236 }
3237 }
3238
3239 /**
3240 * Base of key, value, and entry Iterators. Adds fields to
3241 * Traverser to support iterator.remove.
3242 */
3243 static class BaseIterator<K,V> extends Traverser<K,V> {
3244 final ConcurrentHashMapV8<K,V> map;
3245 Node<K,V> lastReturned;
3246 BaseIterator(Node<K,V>[] tab, int size, int index, int limit,
3247 ConcurrentHashMapV8<K,V> map) {
3248 super(tab, size, index, limit);
3249 this.map = map;
3250 advance();
3251 }
3252
3253 public final boolean hasNext() { return next != null; }
3254 public final boolean hasMoreElements() { return next != null; }
3255
3256 public final void remove() {
3257 Node<K,V> p;
3258 if ((p = lastReturned) == null)
3259 throw new IllegalStateException();
3260 lastReturned = null;
3261 map.replaceNode(p.key, null, null);
3262 }
3263 }
3264
3265 static final class KeyIterator<K,V> extends BaseIterator<K,V>
3266 implements Iterator<K>, Enumeration<K> {
3267 KeyIterator(Node<K,V>[] tab, int index, int size, int limit,
3268 ConcurrentHashMapV8<K,V> map) {
3269 super(tab, index, size, limit, map);
3270 }
3271
3272 public final K next() {
3273 Node<K,V> p;
3274 if ((p = next) == null)
3275 throw new NoSuchElementException();
3276 K k = p.key;
3277 lastReturned = p;
3278 advance();
3279 return k;
3280 }
3281
3282 public final K nextElement() { return next(); }
3283 }
3284
3285 static final class ValueIterator<K,V> extends BaseIterator<K,V>
3286 implements Iterator<V>, Enumeration<V> {
3287 ValueIterator(Node<K,V>[] tab, int index, int size, int limit,
3288 ConcurrentHashMapV8<K,V> map) {
3289 super(tab, index, size, limit, map);
3290 }
3291
3292 public final V next() {
3293 Node<K,V> p;
3294 if ((p = next) == null)
3295 throw new NoSuchElementException();
3296 V v = p.val;
3297 lastReturned = p;
3298 advance();
3299 return v;
3300 }
3301
3302 public final V nextElement() { return next(); }
3303 }
3304
3305 static final class EntryIterator<K,V> extends BaseIterator<K,V>
3306 implements Iterator<Map.Entry<K,V>> {
3307 EntryIterator(Node<K,V>[] tab, int index, int size, int limit,
3308 ConcurrentHashMapV8<K,V> map) {
3309 super(tab, index, size, limit, map);
3310 }
3311
3312 public final Map.Entry<K,V> next() {
3313 Node<K,V> p;
3314 if ((p = next) == null)
3315 throw new NoSuchElementException();
3316 K k = p.key;
3317 V v = p.val;
3318 lastReturned = p;
3319 advance();
3320 return new MapEntry<K,V>(k, v, map);
3321 }
3322 }
3323
3324 /**
3325 * Exported Entry for EntryIterator
3326 */
3327 static final class MapEntry<K,V> implements Map.Entry<K,V> {
3328 final K key; // non-null
3329 V val; // non-null
3330 final ConcurrentHashMapV8<K,V> map;
3331 MapEntry(K key, V val, ConcurrentHashMapV8<K,V> map) {
3332 this.key = key;
3333 this.val = val;
3334 this.map = map;
3335 }
3336 public K getKey() { return key; }
3337 public V getValue() { return val; }
3338 public int hashCode() { return key.hashCode() ^ val.hashCode(); }
3339 public String toString() { return key + "=" + val; }
3340
3341 public boolean equals(Object o) {
3342 Object k, v; Map.Entry<?,?> e;
3343 return ((o instanceof Map.Entry) &&
3344 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3345 (v = e.getValue()) != null &&
3346 (k == key || k.equals(key)) &&
3347 (v == val || v.equals(val)));
3348 }
3349
3350 /**
3351 * Sets our entry's value and writes through to the map. The
3352 * value to return is somewhat arbitrary here. Since we do not
3353 * necessarily track asynchronous changes, the most recent
3354 * "previous" value could be different from what we return (or
3355 * could even have been removed, in which case the put will
3356 * re-establish). We do not and cannot guarantee more.
3357 */
3358 public V setValue(V value) {
3359 if (value == null) throw new NullPointerException();
3360 V v = val;
3361 val = value;
3362 map.put(key, value);
3363 return v;
3364 }
3365 }
3366
3367 static final class KeySpliterator<K,V> extends Traverser<K,V>
3368 implements ConcurrentHashMapSpliterator<K> {
3369 long est; // size estimate
3370 KeySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3371 long est) {
3372 super(tab, size, index, limit);
3373 this.est = est;
3374 }
3375
3376 public ConcurrentHashMapSpliterator<K> trySplit() {
3377 int i, f, h;
3378 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3379 new KeySpliterator<K,V>(tab, baseSize, baseLimit = h,
3380 f, est >>>= 1);
3381 }
3382
3383 public void forEachRemaining(Action<? super K> action) {
3384 if (action == null) throw new NullPointerException();
3385 for (Node<K,V> p; (p = advance()) != null;)
3386 action.apply(p.key);
3387 }
3388
3389 public boolean tryAdvance(Action<? super K> action) {
3390 if (action == null) throw new NullPointerException();
3391 Node<K,V> p;
3392 if ((p = advance()) == null)
3393 return false;
3394 action.apply(p.key);
3395 return true;
3396 }
3397
3398 public long estimateSize() { return est; }
3399
3400 }
3401
3402 static final class ValueSpliterator<K,V> extends Traverser<K,V>
3403 implements ConcurrentHashMapSpliterator<V> {
3404 long est; // size estimate
3405 ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit,
3406 long est) {
3407 super(tab, size, index, limit);
3408 this.est = est;
3409 }
3410
3411 public ConcurrentHashMapSpliterator<V> trySplit() {
3412 int i, f, h;
3413 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3414 new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h,
3415 f, est >>>= 1);
3416 }
3417
3418 public void forEachRemaining(Action<? super V> action) {
3419 if (action == null) throw new NullPointerException();
3420 for (Node<K,V> p; (p = advance()) != null;)
3421 action.apply(p.val);
3422 }
3423
3424 public boolean tryAdvance(Action<? super V> action) {
3425 if (action == null) throw new NullPointerException();
3426 Node<K,V> p;
3427 if ((p = advance()) == null)
3428 return false;
3429 action.apply(p.val);
3430 return true;
3431 }
3432
3433 public long estimateSize() { return est; }
3434
3435 }
3436
3437 static final class EntrySpliterator<K,V> extends Traverser<K,V>
3438 implements ConcurrentHashMapSpliterator<Map.Entry<K,V>> {
3439 final ConcurrentHashMapV8<K,V> map; // To export MapEntry
3440 long est; // size estimate
3441 EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3442 long est, ConcurrentHashMapV8<K,V> map) {
3443 super(tab, size, index, limit);
3444 this.map = map;
3445 this.est = est;
3446 }
3447
3448 public ConcurrentHashMapSpliterator<Map.Entry<K,V>> trySplit() {
3449 int i, f, h;
3450 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3451 new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h,
3452 f, est >>>= 1, map);
3453 }
3454
3455 public void forEachRemaining(Action<? super Map.Entry<K,V>> action) {
3456 if (action == null) throw new NullPointerException();
3457 for (Node<K,V> p; (p = advance()) != null; )
3458 action.apply(new MapEntry<K,V>(p.key, p.val, map));
3459 }
3460
3461 public boolean tryAdvance(Action<? super Map.Entry<K,V>> action) {
3462 if (action == null) throw new NullPointerException();
3463 Node<K,V> p;
3464 if ((p = advance()) == null)
3465 return false;
3466 action.apply(new MapEntry<K,V>(p.key, p.val, map));
3467 return true;
3468 }
3469
3470 public long estimateSize() { return est; }
3471
3472 }
3473
3474 // Parallel bulk operations
3475
3476 /**
3477 * Computes initial batch value for bulk tasks. The returned value
3478 * is approximately exp2 of the number of times (minus one) to
3479 * split task by two before executing leaf action. This value is
3480 * faster to compute and more convenient to use as a guide to
3481 * splitting than is the depth, since it is used while dividing by
3482 * two anyway.
3483 */
3484 final int batchFor(long b) {
3485 long n;
3486 if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b)
3487 return 0;
3488 int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4
3489 return (b <= 0L || (n /= b) >= sp) ? sp : (int)n;
3490 }
3491
3492 /**
3493 * Performs the given action for each (key, value).
3494 *
3495 * @param parallelismThreshold the (estimated) number of elements
3496 * needed for this operation to be executed in parallel
3497 * @param action the action
3498 * @since 1.8
3499 */
3500 public void forEach(long parallelismThreshold,
3501 BiAction<? super K,? super V> action) {
3502 if (action == null) throw new NullPointerException();
3503 new ForEachMappingTask<K,V>
3504 (null, batchFor(parallelismThreshold), 0, 0, table,
3505 action).invoke();
3506 }
3507
3508 /**
3509 * Performs the given action for each non-null transformation
3510 * of each (key, value).
3511 *
3512 * @param parallelismThreshold the (estimated) number of elements
3513 * needed for this operation to be executed in parallel
3514 * @param transformer a function returning the transformation
3515 * for an element, or null if there is no transformation (in
3516 * which case the action is not applied)
3517 * @param action the action
3518 * @since 1.8
3519 */
3520 public <U> void forEach(long parallelismThreshold,
3521 BiFun<? super K, ? super V, ? extends U> transformer,
3522 Action<? super U> action) {
3523 if (transformer == null || action == null)
3524 throw new NullPointerException();
3525 new ForEachTransformedMappingTask<K,V,U>
3526 (null, batchFor(parallelismThreshold), 0, 0, table,
3527 transformer, action).invoke();
3528 }
3529
3530 /**
3531 * Returns a non-null result from applying the given search
3532 * function on each (key, value), or null if none. Upon
3533 * success, further element processing is suppressed and the
3534 * results of any other parallel invocations of the search
3535 * function are ignored.
3536 *
3537 * @param parallelismThreshold the (estimated) number of elements
3538 * needed for this operation to be executed in parallel
3539 * @param searchFunction a function returning a non-null
3540 * result on success, else null
3541 * @return a non-null result from applying the given search
3542 * function on each (key, value), or null if none
3543 * @since 1.8
3544 */
3545 public <U> U search(long parallelismThreshold,
3546 BiFun<? super K, ? super V, ? extends U> searchFunction) {
3547 if (searchFunction == null) throw new NullPointerException();
3548 return new SearchMappingsTask<K,V,U>
3549 (null, batchFor(parallelismThreshold), 0, 0, table,
3550 searchFunction, new AtomicReference<U>()).invoke();
3551 }
3552
3553 /**
3554 * Returns the result of accumulating the given transformation
3555 * of all (key, value) pairs using the given reducer to
3556 * combine values, or null if none.
3557 *
3558 * @param parallelismThreshold the (estimated) number of elements
3559 * needed for this operation to be executed in parallel
3560 * @param transformer a function returning the transformation
3561 * for an element, or null if there is no transformation (in
3562 * which case it is not combined)
3563 * @param reducer a commutative associative combining function
3564 * @return the result of accumulating the given transformation
3565 * of all (key, value) pairs
3566 * @since 1.8
3567 */
3568 public <U> U reduce(long parallelismThreshold,
3569 BiFun<? super K, ? super V, ? extends U> transformer,
3570 BiFun<? super U, ? super U, ? extends U> reducer) {
3571 if (transformer == null || reducer == null)
3572 throw new NullPointerException();
3573 return new MapReduceMappingsTask<K,V,U>
3574 (null, batchFor(parallelismThreshold), 0, 0, table,
3575 null, transformer, reducer).invoke();
3576 }
3577
3578 /**
3579 * Returns the result of accumulating the given transformation
3580 * of all (key, value) pairs using the given reducer to
3581 * combine values, and the given basis as an identity value.
3582 *
3583 * @param parallelismThreshold the (estimated) number of elements
3584 * needed for this operation to be executed in parallel
3585 * @param transformer a function returning the transformation
3586 * for an element
3587 * @param basis the identity (initial default value) for the reduction
3588 * @param reducer a commutative associative combining function
3589 * @return the result of accumulating the given transformation
3590 * of all (key, value) pairs
3591 * @since 1.8
3592 */
3593 public double reduceToDouble(long parallelismThreshold,
3594 ObjectByObjectToDouble<? super K, ? super V> transformer,
3595 double basis,
3596 DoubleByDoubleToDouble reducer) {
3597 if (transformer == null || reducer == null)
3598 throw new NullPointerException();
3599 return new MapReduceMappingsToDoubleTask<K,V>
3600 (null, batchFor(parallelismThreshold), 0, 0, table,
3601 null, transformer, basis, reducer).invoke();
3602 }
3603
3604 /**
3605 * Returns the result of accumulating the given transformation
3606 * of all (key, value) pairs using the given reducer to
3607 * combine values, and the given basis as an identity value.
3608 *
3609 * @param parallelismThreshold the (estimated) number of elements
3610 * needed for this operation to be executed in parallel
3611 * @param transformer a function returning the transformation
3612 * for an element
3613 * @param basis the identity (initial default value) for the reduction
3614 * @param reducer a commutative associative combining function
3615 * @return the result of accumulating the given transformation
3616 * of all (key, value) pairs
3617 * @since 1.8
3618 */
3619 public long reduceToLong(long parallelismThreshold,
3620 ObjectByObjectToLong<? super K, ? super V> transformer,
3621 long basis,
3622 LongByLongToLong reducer) {
3623 if (transformer == null || reducer == null)
3624 throw new NullPointerException();
3625 return new MapReduceMappingsToLongTask<K,V>
3626 (null, batchFor(parallelismThreshold), 0, 0, table,
3627 null, transformer, basis, reducer).invoke();
3628 }
3629
3630 /**
3631 * Returns the result of accumulating the given transformation
3632 * of all (key, value) pairs using the given reducer to
3633 * combine values, and the given basis as an identity value.
3634 *
3635 * @param parallelismThreshold the (estimated) number of elements
3636 * needed for this operation to be executed in parallel
3637 * @param transformer a function returning the transformation
3638 * for an element
3639 * @param basis the identity (initial default value) for the reduction
3640 * @param reducer a commutative associative combining function
3641 * @return the result of accumulating the given transformation
3642 * of all (key, value) pairs
3643 * @since 1.8
3644 */
3645 public int reduceToInt(long parallelismThreshold,
3646 ObjectByObjectToInt<? super K, ? super V> transformer,
3647 int basis,
3648 IntByIntToInt reducer) {
3649 if (transformer == null || reducer == null)
3650 throw new NullPointerException();
3651 return new MapReduceMappingsToIntTask<K,V>
3652 (null, batchFor(parallelismThreshold), 0, 0, table,
3653 null, transformer, basis, reducer).invoke();
3654 }
3655
3656 /**
3657 * Performs the given action for each key.
3658 *
3659 * @param parallelismThreshold the (estimated) number of elements
3660 * needed for this operation to be executed in parallel
3661 * @param action the action
3662 * @since 1.8
3663 */
3664 public void forEachKey(long parallelismThreshold,
3665 Action<? super K> action) {
3666 if (action == null) throw new NullPointerException();
3667 new ForEachKeyTask<K,V>
3668 (null, batchFor(parallelismThreshold), 0, 0, table,
3669 action).invoke();
3670 }
3671
3672 /**
3673 * Performs the given action for each non-null transformation
3674 * of each key.
3675 *
3676 * @param parallelismThreshold the (estimated) number of elements
3677 * needed for this operation to be executed in parallel
3678 * @param transformer a function returning the transformation
3679 * for an element, or null if there is no transformation (in
3680 * which case the action is not applied)
3681 * @param action the action
3682 * @since 1.8
3683 */
3684 public <U> void forEachKey(long parallelismThreshold,
3685 Fun<? super K, ? extends U> transformer,
3686 Action<? super U> action) {
3687 if (transformer == null || action == null)
3688 throw new NullPointerException();
3689 new ForEachTransformedKeyTask<K,V,U>
3690 (null, batchFor(parallelismThreshold), 0, 0, table,
3691 transformer, action).invoke();
3692 }
3693
3694 /**
3695 * Returns a non-null result from applying the given search
3696 * function on each key, or null if none. Upon success,
3697 * further element processing is suppressed and the results of
3698 * any other parallel invocations of the search function are
3699 * ignored.
3700 *
3701 * @param parallelismThreshold the (estimated) number of elements
3702 * needed for this operation to be executed in parallel
3703 * @param searchFunction a function returning a non-null
3704 * result on success, else null
3705 * @return a non-null result from applying the given search
3706 * function on each key, or null if none
3707 * @since 1.8
3708 */
3709 public <U> U searchKeys(long parallelismThreshold,
3710 Fun<? super K, ? extends U> searchFunction) {
3711 if (searchFunction == null) throw new NullPointerException();
3712 return new SearchKeysTask<K,V,U>
3713 (null, batchFor(parallelismThreshold), 0, 0, table,
3714 searchFunction, new AtomicReference<U>()).invoke();
3715 }
3716
3717 /**
3718 * Returns the result of accumulating all keys using the given
3719 * reducer to combine values, or null if none.
3720 *
3721 * @param parallelismThreshold the (estimated) number of elements
3722 * needed for this operation to be executed in parallel
3723 * @param reducer a commutative associative combining function
3724 * @return the result of accumulating all keys using the given
3725 * reducer to combine values, or null if none
3726 * @since 1.8
3727 */
3728 public K reduceKeys(long parallelismThreshold,
3729 BiFun<? super K, ? super K, ? extends K> reducer) {
3730 if (reducer == null) throw new NullPointerException();
3731 return new ReduceKeysTask<K,V>
3732 (null, batchFor(parallelismThreshold), 0, 0, table,
3733 null, reducer).invoke();
3734 }
3735
3736 /**
3737 * Returns the result of accumulating the given transformation
3738 * of all keys using the given reducer to combine values, or
3739 * null if none.
3740 *
3741 * @param parallelismThreshold the (estimated) number of elements
3742 * needed for this operation to be executed in parallel
3743 * @param transformer a function returning the transformation
3744 * for an element, or null if there is no transformation (in
3745 * which case it is not combined)
3746 * @param reducer a commutative associative combining function
3747 * @return the result of accumulating the given transformation
3748 * of all keys
3749 * @since 1.8
3750 */
3751 public <U> U reduceKeys(long parallelismThreshold,
3752 Fun<? super K, ? extends U> transformer,
3753 BiFun<? super U, ? super U, ? extends U> reducer) {
3754 if (transformer == null || reducer == null)
3755 throw new NullPointerException();
3756 return new MapReduceKeysTask<K,V,U>
3757 (null, batchFor(parallelismThreshold), 0, 0, table,
3758 null, transformer, reducer).invoke();
3759 }
3760
3761 /**
3762 * Returns the result of accumulating the given transformation
3763 * of all keys using the given reducer to combine values, and
3764 * the given basis as an identity value.
3765 *
3766 * @param parallelismThreshold the (estimated) number of elements
3767 * needed for this operation to be executed in parallel
3768 * @param transformer a function returning the transformation
3769 * for an element
3770 * @param basis the identity (initial default value) for the reduction
3771 * @param reducer a commutative associative combining function
3772 * @return the result of accumulating the given transformation
3773 * of all keys
3774 * @since 1.8
3775 */
3776 public double reduceKeysToDouble(long parallelismThreshold,
3777 ObjectToDouble<? super K> transformer,
3778 double basis,
3779 DoubleByDoubleToDouble reducer) {
3780 if (transformer == null || reducer == null)
3781 throw new NullPointerException();
3782 return new MapReduceKeysToDoubleTask<K,V>
3783 (null, batchFor(parallelismThreshold), 0, 0, table,
3784 null, transformer, basis, reducer).invoke();
3785 }
3786
3787 /**
3788 * Returns the result of accumulating the given transformation
3789 * of all keys using the given reducer to combine values, and
3790 * the given basis as an identity value.
3791 *
3792 * @param parallelismThreshold the (estimated) number of elements
3793 * needed for this operation to be executed in parallel
3794 * @param transformer a function returning the transformation
3795 * for an element
3796 * @param basis the identity (initial default value) for the reduction
3797 * @param reducer a commutative associative combining function
3798 * @return the result of accumulating the given transformation
3799 * of all keys
3800 * @since 1.8
3801 */
3802 public long reduceKeysToLong(long parallelismThreshold,
3803 ObjectToLong<? super K> transformer,
3804 long basis,
3805 LongByLongToLong reducer) {
3806 if (transformer == null || reducer == null)
3807 throw new NullPointerException();
3808 return new MapReduceKeysToLongTask<K,V>
3809 (null, batchFor(parallelismThreshold), 0, 0, table,
3810 null, transformer, basis, reducer).invoke();
3811 }
3812
3813 /**
3814 * Returns the result of accumulating the given transformation
3815 * of all keys using the given reducer to combine values, and
3816 * the given basis as an identity value.
3817 *
3818 * @param parallelismThreshold the (estimated) number of elements
3819 * needed for this operation to be executed in parallel
3820 * @param transformer a function returning the transformation
3821 * for an element
3822 * @param basis the identity (initial default value) for the reduction
3823 * @param reducer a commutative associative combining function
3824 * @return the result of accumulating the given transformation
3825 * of all keys
3826 * @since 1.8
3827 */
3828 public int reduceKeysToInt(long parallelismThreshold,
3829 ObjectToInt<? super K> transformer,
3830 int basis,
3831 IntByIntToInt reducer) {
3832 if (transformer == null || reducer == null)
3833 throw new NullPointerException();
3834 return new MapReduceKeysToIntTask<K,V>
3835 (null, batchFor(parallelismThreshold), 0, 0, table,
3836 null, transformer, basis, reducer).invoke();
3837 }
3838
3839 /**
3840 * Performs the given action for each value.
3841 *
3842 * @param parallelismThreshold the (estimated) number of elements
3843 * needed for this operation to be executed in parallel
3844 * @param action the action
3845 * @since 1.8
3846 */
3847 public void forEachValue(long parallelismThreshold,
3848 Action<? super V> action) {
3849 if (action == null)
3850 throw new NullPointerException();
3851 new ForEachValueTask<K,V>
3852 (null, batchFor(parallelismThreshold), 0, 0, table,
3853 action).invoke();
3854 }
3855
3856 /**
3857 * Performs the given action for each non-null transformation
3858 * of each value.
3859 *
3860 * @param parallelismThreshold the (estimated) number of elements
3861 * needed for this operation to be executed in parallel
3862 * @param transformer a function returning the transformation
3863 * for an element, or null if there is no transformation (in
3864 * which case the action is not applied)
3865 * @param action the action
3866 * @since 1.8
3867 */
3868 public <U> void forEachValue(long parallelismThreshold,
3869 Fun<? super V, ? extends U> transformer,
3870 Action<? super U> action) {
3871 if (transformer == null || action == null)
3872 throw new NullPointerException();
3873 new ForEachTransformedValueTask<K,V,U>
3874 (null, batchFor(parallelismThreshold), 0, 0, table,
3875 transformer, action).invoke();
3876 }
3877
3878 /**
3879 * Returns a non-null result from applying the given search
3880 * function on each value, or null if none. Upon success,
3881 * further element processing is suppressed and the results of
3882 * any other parallel invocations of the search function are
3883 * ignored.
3884 *
3885 * @param parallelismThreshold the (estimated) number of elements
3886 * needed for this operation to be executed in parallel
3887 * @param searchFunction a function returning a non-null
3888 * result on success, else null
3889 * @return a non-null result from applying the given search
3890 * function on each value, or null if none
3891 * @since 1.8
3892 */
3893 public <U> U searchValues(long parallelismThreshold,
3894 Fun<? super V, ? extends U> searchFunction) {
3895 if (searchFunction == null) throw new NullPointerException();
3896 return new SearchValuesTask<K,V,U>
3897 (null, batchFor(parallelismThreshold), 0, 0, table,
3898 searchFunction, new AtomicReference<U>()).invoke();
3899 }
3900
3901 /**
3902 * Returns the result of accumulating all values using the
3903 * given reducer to combine values, or null if none.
3904 *
3905 * @param parallelismThreshold the (estimated) number of elements
3906 * needed for this operation to be executed in parallel
3907 * @param reducer a commutative associative combining function
3908 * @return the result of accumulating all values
3909 * @since 1.8
3910 */
3911 public V reduceValues(long parallelismThreshold,
3912 BiFun<? super V, ? super V, ? extends V> reducer) {
3913 if (reducer == null) throw new NullPointerException();
3914 return new ReduceValuesTask<K,V>
3915 (null, batchFor(parallelismThreshold), 0, 0, table,
3916 null, reducer).invoke();
3917 }
3918
3919 /**
3920 * Returns the result of accumulating the given transformation
3921 * of all values using the given reducer to combine values, or
3922 * null if none.
3923 *
3924 * @param parallelismThreshold the (estimated) number of elements
3925 * needed for this operation to be executed in parallel
3926 * @param transformer a function returning the transformation
3927 * for an element, or null if there is no transformation (in
3928 * which case it is not combined)
3929 * @param reducer a commutative associative combining function
3930 * @return the result of accumulating the given transformation
3931 * of all values
3932 * @since 1.8
3933 */
3934 public <U> U reduceValues(long parallelismThreshold,
3935 Fun<? super V, ? extends U> transformer,
3936 BiFun<? super U, ? super U, ? extends U> reducer) {
3937 if (transformer == null || reducer == null)
3938 throw new NullPointerException();
3939 return new MapReduceValuesTask<K,V,U>
3940 (null, batchFor(parallelismThreshold), 0, 0, table,
3941 null, transformer, reducer).invoke();
3942 }
3943
3944 /**
3945 * Returns the result of accumulating the given transformation
3946 * of all values using the given reducer to combine values,
3947 * and the given basis as an identity value.
3948 *
3949 * @param parallelismThreshold the (estimated) number of elements
3950 * needed for this operation to be executed in parallel
3951 * @param transformer a function returning the transformation
3952 * for an element
3953 * @param basis the identity (initial default value) for the reduction
3954 * @param reducer a commutative associative combining function
3955 * @return the result of accumulating the given transformation
3956 * of all values
3957 * @since 1.8
3958 */
3959 public double reduceValuesToDouble(long parallelismThreshold,
3960 ObjectToDouble<? super V> transformer,
3961 double basis,
3962 DoubleByDoubleToDouble reducer) {
3963 if (transformer == null || reducer == null)
3964 throw new NullPointerException();
3965 return new MapReduceValuesToDoubleTask<K,V>
3966 (null, batchFor(parallelismThreshold), 0, 0, table,
3967 null, transformer, basis, reducer).invoke();
3968 }
3969
3970 /**
3971 * Returns the result of accumulating the given transformation
3972 * of all values using the given reducer to combine values,
3973 * and the given basis as an identity value.
3974 *
3975 * @param parallelismThreshold the (estimated) number of elements
3976 * needed for this operation to be executed in parallel
3977 * @param transformer a function returning the transformation
3978 * for an element
3979 * @param basis the identity (initial default value) for the reduction
3980 * @param reducer a commutative associative combining function
3981 * @return the result of accumulating the given transformation
3982 * of all values
3983 * @since 1.8
3984 */
3985 public long reduceValuesToLong(long parallelismThreshold,
3986 ObjectToLong<? super V> transformer,
3987 long basis,
3988 LongByLongToLong reducer) {
3989 if (transformer == null || reducer == null)
3990 throw new NullPointerException();
3991 return new MapReduceValuesToLongTask<K,V>
3992 (null, batchFor(parallelismThreshold), 0, 0, table,
3993 null, transformer, basis, reducer).invoke();
3994 }
3995
3996 /**
3997 * Returns the result of accumulating the given transformation
3998 * of all values using the given reducer to combine values,
3999 * and the given basis as an identity value.
4000 *
4001 * @param parallelismThreshold the (estimated) number of elements
4002 * needed for this operation to be executed in parallel
4003 * @param transformer a function returning the transformation
4004 * for an element
4005 * @param basis the identity (initial default value) for the reduction
4006 * @param reducer a commutative associative combining function
4007 * @return the result of accumulating the given transformation
4008 * of all values
4009 * @since 1.8
4010 */
4011 public int reduceValuesToInt(long parallelismThreshold,
4012 ObjectToInt<? super V> transformer,
4013 int basis,
4014 IntByIntToInt reducer) {
4015 if (transformer == null || reducer == null)
4016 throw new NullPointerException();
4017 return new MapReduceValuesToIntTask<K,V>
4018 (null, batchFor(parallelismThreshold), 0, 0, table,
4019 null, transformer, basis, reducer).invoke();
4020 }
4021
4022 /**
4023 * Performs the given action for each entry.
4024 *
4025 * @param parallelismThreshold the (estimated) number of elements
4026 * needed for this operation to be executed in parallel
4027 * @param action the action
4028 * @since 1.8
4029 */
4030 public void forEachEntry(long parallelismThreshold,
4031 Action<? super Map.Entry<K,V>> action) {
4032 if (action == null) throw new NullPointerException();
4033 new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table,
4034 action).invoke();
4035 }
4036
4037 /**
4038 * Performs the given action for each non-null transformation
4039 * of each entry.
4040 *
4041 * @param parallelismThreshold the (estimated) number of elements
4042 * needed for this operation to be executed in parallel
4043 * @param transformer a function returning the transformation
4044 * for an element, or null if there is no transformation (in
4045 * which case the action is not applied)
4046 * @param action the action
4047 * @since 1.8
4048 */
4049 public <U> void forEachEntry(long parallelismThreshold,
4050 Fun<Map.Entry<K,V>, ? extends U> transformer,
4051 Action<? super U> action) {
4052 if (transformer == null || action == null)
4053 throw new NullPointerException();
4054 new ForEachTransformedEntryTask<K,V,U>
4055 (null, batchFor(parallelismThreshold), 0, 0, table,
4056 transformer, action).invoke();
4057 }
4058
4059 /**
4060 * Returns a non-null result from applying the given search
4061 * function on each entry, or null if none. Upon success,
4062 * further element processing is suppressed and the results of
4063 * any other parallel invocations of the search function are
4064 * ignored.
4065 *
4066 * @param parallelismThreshold the (estimated) number of elements
4067 * needed for this operation to be executed in parallel
4068 * @param searchFunction a function returning a non-null
4069 * result on success, else null
4070 * @return a non-null result from applying the given search
4071 * function on each entry, or null if none
4072 * @since 1.8
4073 */
4074 public <U> U searchEntries(long parallelismThreshold,
4075 Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
4076 if (searchFunction == null) throw new NullPointerException();
4077 return new SearchEntriesTask<K,V,U>
4078 (null, batchFor(parallelismThreshold), 0, 0, table,
4079 searchFunction, new AtomicReference<U>()).invoke();
4080 }
4081
4082 /**
4083 * Returns the result of accumulating all entries using the
4084 * given reducer to combine values, or null if none.
4085 *
4086 * @param parallelismThreshold the (estimated) number of elements
4087 * needed for this operation to be executed in parallel
4088 * @param reducer a commutative associative combining function
4089 * @return the result of accumulating all entries
4090 * @since 1.8
4091 */
4092 public Map.Entry<K,V> reduceEntries(long parallelismThreshold,
4093 BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4094 if (reducer == null) throw new NullPointerException();
4095 return new ReduceEntriesTask<K,V>
4096 (null, batchFor(parallelismThreshold), 0, 0, table,
4097 null, reducer).invoke();
4098 }
4099
4100 /**
4101 * Returns the result of accumulating the given transformation
4102 * of all entries using the given reducer to combine values,
4103 * or null if none.
4104 *
4105 * @param parallelismThreshold the (estimated) number of elements
4106 * needed for this operation to be executed in parallel
4107 * @param transformer a function returning the transformation
4108 * for an element, or null if there is no transformation (in
4109 * which case it is not combined)
4110 * @param reducer a commutative associative combining function
4111 * @return the result of accumulating the given transformation
4112 * of all entries
4113 * @since 1.8
4114 */
4115 public <U> U reduceEntries(long parallelismThreshold,
4116 Fun<Map.Entry<K,V>, ? extends U> transformer,
4117 BiFun<? super U, ? super U, ? extends U> reducer) {
4118 if (transformer == null || reducer == null)
4119 throw new NullPointerException();
4120 return new MapReduceEntriesTask<K,V,U>
4121 (null, batchFor(parallelismThreshold), 0, 0, table,
4122 null, transformer, reducer).invoke();
4123 }
4124
4125 /**
4126 * Returns the result of accumulating the given transformation
4127 * of all entries using the given reducer to combine values,
4128 * and the given basis as an identity value.
4129 *
4130 * @param parallelismThreshold the (estimated) number of elements
4131 * needed for this operation to be executed in parallel
4132 * @param transformer a function returning the transformation
4133 * for an element
4134 * @param basis the identity (initial default value) for the reduction
4135 * @param reducer a commutative associative combining function
4136 * @return the result of accumulating the given transformation
4137 * of all entries
4138 * @since 1.8
4139 */
4140 public double reduceEntriesToDouble(long parallelismThreshold,
4141 ObjectToDouble<Map.Entry<K,V>> transformer,
4142 double basis,
4143 DoubleByDoubleToDouble reducer) {
4144 if (transformer == null || reducer == null)
4145 throw new NullPointerException();
4146 return new MapReduceEntriesToDoubleTask<K,V>
4147 (null, batchFor(parallelismThreshold), 0, 0, table,
4148 null, transformer, basis, reducer).invoke();
4149 }
4150
4151 /**
4152 * Returns the result of accumulating the given transformation
4153 * of all entries using the given reducer to combine values,
4154 * and the given basis as an identity value.
4155 *
4156 * @param parallelismThreshold the (estimated) number of elements
4157 * needed for this operation to be executed in parallel
4158 * @param transformer a function returning the transformation
4159 * for an element
4160 * @param basis the identity (initial default value) for the reduction
4161 * @param reducer a commutative associative combining function
4162 * @return the result of accumulating the given transformation
4163 * of all entries
4164 * @since 1.8
4165 */
4166 public long reduceEntriesToLong(long parallelismThreshold,
4167 ObjectToLong<Map.Entry<K,V>> transformer,
4168 long basis,
4169 LongByLongToLong reducer) {
4170 if (transformer == null || reducer == null)
4171 throw new NullPointerException();
4172 return new MapReduceEntriesToLongTask<K,V>
4173 (null, batchFor(parallelismThreshold), 0, 0, table,
4174 null, transformer, basis, reducer).invoke();
4175 }
4176
4177 /**
4178 * Returns the result of accumulating the given transformation
4179 * of all entries using the given reducer to combine values,
4180 * and the given basis as an identity value.
4181 *
4182 * @param parallelismThreshold the (estimated) number of elements
4183 * needed for this operation to be executed in parallel
4184 * @param transformer a function returning the transformation
4185 * for an element
4186 * @param basis the identity (initial default value) for the reduction
4187 * @param reducer a commutative associative combining function
4188 * @return the result of accumulating the given transformation
4189 * of all entries
4190 * @since 1.8
4191 */
4192 public int reduceEntriesToInt(long parallelismThreshold,
4193 ObjectToInt<Map.Entry<K,V>> transformer,
4194 int basis,
4195 IntByIntToInt reducer) {
4196 if (transformer == null || reducer == null)
4197 throw new NullPointerException();
4198 return new MapReduceEntriesToIntTask<K,V>
4199 (null, batchFor(parallelismThreshold), 0, 0, table,
4200 null, transformer, basis, reducer).invoke();
4201 }
4202
4203
4204 /* ----------------Views -------------- */
4205
4206 /**
4207 * Base class for views.
4208 */
4209 abstract static class CollectionView<K,V,E>
4210 implements Collection<E>, java.io.Serializable {
4211 private static final long serialVersionUID = 7249069246763182397L;
4212 final ConcurrentHashMapV8<K,V> map;
4213 CollectionView(ConcurrentHashMapV8<K,V> map) { this.map = map; }
4214
4215 /**
4216 * Returns the map backing this view.
4217 *
4218 * @return the map backing this view
4219 */
4220 public ConcurrentHashMapV8<K,V> getMap() { return map; }
4221
4222 /**
4223 * Removes all of the elements from this view, by removing all
4224 * the mappings from the map backing this view.
4225 */
4226 public final void clear() { map.clear(); }
4227 public final int size() { return map.size(); }
4228 public final boolean isEmpty() { return map.isEmpty(); }
4229
4230 // implementations below rely on concrete classes supplying these
4231 // abstract methods
4232 /**
4233 * Returns a "weakly consistent" iterator that will never
4234 * throw {@link ConcurrentModificationException}, and
4235 * guarantees to traverse elements as they existed upon
4236 * construction of the iterator, and may (but is not
4237 * guaranteed to) reflect any modifications subsequent to
4238 * construction.
4239 */
4240 public abstract Iterator<E> iterator();
4241 public abstract boolean contains(Object o);
4242 public abstract boolean remove(Object o);
4243
4244 private static final String oomeMsg = "Required array size too large";
4245
4246 public final Object[] toArray() {
4247 long sz = map.mappingCount();
4248 if (sz > MAX_ARRAY_SIZE)
4249 throw new OutOfMemoryError(oomeMsg);
4250 int n = (int)sz;
4251 Object[] r = new Object[n];
4252 int i = 0;
4253 for (E e : this) {
4254 if (i == n) {
4255 if (n >= MAX_ARRAY_SIZE)
4256 throw new OutOfMemoryError(oomeMsg);
4257 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4258 n = MAX_ARRAY_SIZE;
4259 else
4260 n += (n >>> 1) + 1;
4261 r = Arrays.copyOf(r, n);
4262 }
4263 r[i++] = e;
4264 }
4265 return (i == n) ? r : Arrays.copyOf(r, i);
4266 }
4267
4268 @SuppressWarnings("unchecked")
4269 public final <T> T[] toArray(T[] a) {
4270 long sz = map.mappingCount();
4271 if (sz > MAX_ARRAY_SIZE)
4272 throw new OutOfMemoryError(oomeMsg);
4273 int m = (int)sz;
4274 T[] r = (a.length >= m) ? a :
4275 (T[])java.lang.reflect.Array
4276 .newInstance(a.getClass().getComponentType(), m);
4277 int n = r.length;
4278 int i = 0;
4279 for (E e : this) {
4280 if (i == n) {
4281 if (n >= MAX_ARRAY_SIZE)
4282 throw new OutOfMemoryError(oomeMsg);
4283 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4284 n = MAX_ARRAY_SIZE;
4285 else
4286 n += (n >>> 1) + 1;
4287 r = Arrays.copyOf(r, n);
4288 }
4289 r[i++] = (T)e;
4290 }
4291 if (a == r && i < n) {
4292 r[i] = null; // null-terminate
4293 return r;
4294 }
4295 return (i == n) ? r : Arrays.copyOf(r, i);
4296 }
4297
4298 /**
4299 * Returns a string representation of this collection.
4300 * The string representation consists of the string representations
4301 * of the collection's elements in the order they are returned by
4302 * its iterator, enclosed in square brackets ({@code "[]"}).
4303 * Adjacent elements are separated by the characters {@code ", "}
4304 * (comma and space). Elements are converted to strings as by
4305 * {@link String#valueOf(Object)}.
4306 *
4307 * @return a string representation of this collection
4308 */
4309 public final String toString() {
4310 StringBuilder sb = new StringBuilder();
4311 sb.append('[');
4312 Iterator<E> it = iterator();
4313 if (it.hasNext()) {
4314 for (;;) {
4315 Object e = it.next();
4316 sb.append(e == this ? "(this Collection)" : e);
4317 if (!it.hasNext())
4318 break;
4319 sb.append(',').append(' ');
4320 }
4321 }
4322 return sb.append(']').toString();
4323 }
4324
4325 public final boolean containsAll(Collection<?> c) {
4326 if (c != this) {
4327 for (Object e : c) {
4328 if (e == null || !contains(e))
4329 return false;
4330 }
4331 }
4332 return true;
4333 }
4334
4335 public final boolean removeAll(Collection<?> c) {
4336 boolean modified = false;
4337 for (Iterator<E> it = iterator(); it.hasNext();) {
4338 if (c.contains(it.next())) {
4339 it.remove();
4340 modified = true;
4341 }
4342 }
4343 return modified;
4344 }
4345
4346 public final boolean retainAll(Collection<?> c) {
4347 boolean modified = false;
4348 for (Iterator<E> it = iterator(); it.hasNext();) {
4349 if (!c.contains(it.next())) {
4350 it.remove();
4351 modified = true;
4352 }
4353 }
4354 return modified;
4355 }
4356
4357 }
4358
4359 /**
4360 * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in
4361 * which additions may optionally be enabled by mapping to a
4362 * common value. This class cannot be directly instantiated.
4363 * See {@link #keySet() keySet()},
4364 * {@link #keySet(Object) keySet(V)},
4365 * {@link #newKeySet() newKeySet()},
4366 * {@link #newKeySet(int) newKeySet(int)}.
4367 *
4368 * @since 1.8
4369 */
4370 public static class KeySetView<K,V> extends CollectionView<K,V,K>
4371 implements Set<K>, java.io.Serializable {
4372 private static final long serialVersionUID = 7249069246763182397L;
4373 private final V value;
4374 KeySetView(ConcurrentHashMapV8<K,V> map, V value) { // non-public
4375 super(map);
4376 this.value = value;
4377 }
4378
4379 /**
4380 * Returns the default mapped value for additions,
4381 * or {@code null} if additions are not supported.
4382 *
4383 * @return the default mapped value for additions, or {@code null}
4384 * if not supported
4385 */
4386 public V getMappedValue() { return value; }
4387
4388 /**
4389 * {@inheritDoc}
4390 * @throws NullPointerException if the specified key is null
4391 */
4392 public boolean contains(Object o) { return map.containsKey(o); }
4393
4394 /**
4395 * Removes the key from this map view, by removing the key (and its
4396 * corresponding value) from the backing map. This method does
4397 * nothing if the key is not in the map.
4398 *
4399 * @param o the key to be removed from the backing map
4400 * @return {@code true} if the backing map contained the specified key
4401 * @throws NullPointerException if the specified key is null
4402 */
4403 public boolean remove(Object o) { return map.remove(o) != null; }
4404
4405 /**
4406 * @return an iterator over the keys of the backing map
4407 */
4408 public Iterator<K> iterator() {
4409 Node<K,V>[] t;
4410 ConcurrentHashMapV8<K,V> m = map;
4411 int f = (t = m.table) == null ? 0 : t.length;
4412 return new KeyIterator<K,V>(t, f, 0, f, m);
4413 }
4414
4415 /**
4416 * Adds the specified key to this set view by mapping the key to
4417 * the default mapped value in the backing map, if defined.
4418 *
4419 * @param e key to be added
4420 * @return {@code true} if this set changed as a result of the call
4421 * @throws NullPointerException if the specified key is null
4422 * @throws UnsupportedOperationException if no default mapped value
4423 * for additions was provided
4424 */
4425 public boolean add(K e) {
4426 V v;
4427 if ((v = value) == null)
4428 throw new UnsupportedOperationException();
4429 return map.putVal(e, v, true) == null;
4430 }
4431
4432 /**
4433 * Adds all of the elements in the specified collection to this set,
4434 * as if by calling {@link #add} on each one.
4435 *
4436 * @param c the elements to be inserted into this set
4437 * @return {@code true} if this set changed as a result of the call
4438 * @throws NullPointerException if the collection or any of its
4439 * elements are {@code null}
4440 * @throws UnsupportedOperationException if no default mapped value
4441 * for additions was provided
4442 */
4443 public boolean addAll(Collection<? extends K> c) {
4444 boolean added = false;
4445 V v;
4446 if ((v = value) == null)
4447 throw new UnsupportedOperationException();
4448 for (K e : c) {
4449 if (map.putVal(e, v, true) == null)
4450 added = true;
4451 }
4452 return added;
4453 }
4454
4455 public int hashCode() {
4456 int h = 0;
4457 for (K e : this)
4458 h += e.hashCode();
4459 return h;
4460 }
4461
4462 public boolean equals(Object o) {
4463 Set<?> c;
4464 return ((o instanceof Set) &&
4465 ((c = (Set<?>)o) == this ||
4466 (containsAll(c) && c.containsAll(this))));
4467 }
4468
4469 public ConcurrentHashMapSpliterator<K> spliterator() {
4470 Node<K,V>[] t;
4471 ConcurrentHashMapV8<K,V> m = map;
4472 long n = m.sumCount();
4473 int f = (t = m.table) == null ? 0 : t.length;
4474 return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4475 }
4476
4477 public void forEach(Action<? super K> action) {
4478 if (action == null) throw new NullPointerException();
4479 Node<K,V>[] t;
4480 if ((t = map.table) != null) {
4481 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4482 for (Node<K,V> p; (p = it.advance()) != null; )
4483 action.apply(p.key);
4484 }
4485 }
4486 }
4487
4488 /**
4489 * A view of a ConcurrentHashMapV8 as a {@link Collection} of
4490 * values, in which additions are disabled. This class cannot be
4491 * directly instantiated. See {@link #values()}.
4492 */
4493 static final class ValuesView<K,V> extends CollectionView<K,V,V>
4494 implements Collection<V>, java.io.Serializable {
4495 private static final long serialVersionUID = 2249069246763182397L;
4496 ValuesView(ConcurrentHashMapV8<K,V> map) { super(map); }
4497 public final boolean contains(Object o) {
4498 return map.containsValue(o);
4499 }
4500
4501 public final boolean remove(Object o) {
4502 if (o != null) {
4503 for (Iterator<V> it = iterator(); it.hasNext();) {
4504 if (o.equals(it.next())) {
4505 it.remove();
4506 return true;
4507 }
4508 }
4509 }
4510 return false;
4511 }
4512
4513 public final Iterator<V> iterator() {
4514 ConcurrentHashMapV8<K,V> m = map;
4515 Node<K,V>[] t;
4516 int f = (t = m.table) == null ? 0 : t.length;
4517 return new ValueIterator<K,V>(t, f, 0, f, m);
4518 }
4519
4520 public final boolean add(V e) {
4521 throw new UnsupportedOperationException();
4522 }
4523 public final boolean addAll(Collection<? extends V> c) {
4524 throw new UnsupportedOperationException();
4525 }
4526
4527 public ConcurrentHashMapSpliterator<V> spliterator() {
4528 Node<K,V>[] t;
4529 ConcurrentHashMapV8<K,V> m = map;
4530 long n = m.sumCount();
4531 int f = (t = m.table) == null ? 0 : t.length;
4532 return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4533 }
4534
4535 public void forEach(Action<? super V> action) {
4536 if (action == null) throw new NullPointerException();
4537 Node<K,V>[] t;
4538 if ((t = map.table) != null) {
4539 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4540 for (Node<K,V> p; (p = it.advance()) != null; )
4541 action.apply(p.val);
4542 }
4543 }
4544 }
4545
4546 /**
4547 * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value)
4548 * entries. This class cannot be directly instantiated. See
4549 * {@link #entrySet()}.
4550 */
4551 static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>>
4552 implements Set<Map.Entry<K,V>>, java.io.Serializable {
4553 private static final long serialVersionUID = 2249069246763182397L;
4554 EntrySetView(ConcurrentHashMapV8<K,V> map) { super(map); }
4555
4556 public boolean contains(Object o) {
4557 Object k, v, r; Map.Entry<?,?> e;
4558 return ((o instanceof Map.Entry) &&
4559 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4560 (r = map.get(k)) != null &&
4561 (v = e.getValue()) != null &&
4562 (v == r || v.equals(r)));
4563 }
4564
4565 public boolean remove(Object o) {
4566 Object k, v; Map.Entry<?,?> e;
4567 return ((o instanceof Map.Entry) &&
4568 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4569 (v = e.getValue()) != null &&
4570 map.remove(k, v));
4571 }
4572
4573 /**
4574 * @return an iterator over the entries of the backing map
4575 */
4576 public Iterator<Map.Entry<K,V>> iterator() {
4577 ConcurrentHashMapV8<K,V> m = map;
4578 Node<K,V>[] t;
4579 int f = (t = m.table) == null ? 0 : t.length;
4580 return new EntryIterator<K,V>(t, f, 0, f, m);
4581 }
4582
4583 public boolean add(Entry<K,V> e) {
4584 return map.putVal(e.getKey(), e.getValue(), false) == null;
4585 }
4586
4587 public boolean addAll(Collection<? extends Entry<K,V>> c) {
4588 boolean added = false;
4589 for (Entry<K,V> e : c) {
4590 if (add(e))
4591 added = true;
4592 }
4593 return added;
4594 }
4595
4596 public final int hashCode() {
4597 int h = 0;
4598 Node<K,V>[] t;
4599 if ((t = map.table) != null) {
4600 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4601 for (Node<K,V> p; (p = it.advance()) != null; ) {
4602 h += p.hashCode();
4603 }
4604 }
4605 return h;
4606 }
4607
4608 public final boolean equals(Object o) {
4609 Set<?> c;
4610 return ((o instanceof Set) &&
4611 ((c = (Set<?>)o) == this ||
4612 (containsAll(c) && c.containsAll(this))));
4613 }
4614
4615 public ConcurrentHashMapSpliterator<Map.Entry<K,V>> spliterator() {
4616 Node<K,V>[] t;
4617 ConcurrentHashMapV8<K,V> m = map;
4618 long n = m.sumCount();
4619 int f = (t = m.table) == null ? 0 : t.length;
4620 return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m);
4621 }
4622
4623 public void forEach(Action<? super Map.Entry<K,V>> action) {
4624 if (action == null) throw new NullPointerException();
4625 Node<K,V>[] t;
4626 if ((t = map.table) != null) {
4627 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4628 for (Node<K,V> p; (p = it.advance()) != null; )
4629 action.apply(new MapEntry<K,V>(p.key, p.val, map));
4630 }
4631 }
4632
4633 }
4634
4635 // -------------------------------------------------------
4636
4637 /**
4638 * Base class for bulk tasks. Repeats some fields and code from
4639 * class Traverser, because we need to subclass CountedCompleter.
4640 */
4641 abstract static class BulkTask<K,V,R> extends CountedCompleter<R> {
4642 Node<K,V>[] tab; // same as Traverser
4643 Node<K,V> next;
4644 int index;
4645 int baseIndex;
4646 int baseLimit;
4647 final int baseSize;
4648 int batch; // split control
4649
4650 BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) {
4651 super(par);
4652 this.batch = b;
4653 this.index = this.baseIndex = i;
4654 if ((this.tab = t) == null)
4655 this.baseSize = this.baseLimit = 0;
4656 else if (par == null)
4657 this.baseSize = this.baseLimit = t.length;
4658 else {
4659 this.baseLimit = f;
4660 this.baseSize = par.baseSize;
4661 }
4662 }
4663
4664 /**
4665 * Same as Traverser version
4666 */
4667 final Node<K,V> advance() {
4668 Node<K,V> e;
4669 if ((e = next) != null)
4670 e = e.next;
4671 for (;;) {
4672 Node<K,V>[] t; int i, n; K ek; // must use locals in checks
4673 if (e != null)
4674 return next = e;
4675 if (baseIndex >= baseLimit || (t = tab) == null ||
4676 (n = t.length) <= (i = index) || i < 0)
4677 return next = null;
4678 if ((e = tabAt(t, index)) != null && e.hash < 0) {
4679 if (e instanceof ForwardingNode) {
4680 tab = ((ForwardingNode<K,V>)e).nextTable;
4681 e = null;
4682 continue;
4683 }
4684 else if (e instanceof TreeBin)
4685 e = ((TreeBin<K,V>)e).first;
4686 else
4687 e = null;
4688 }
4689 if ((index += baseSize) >= n)
4690 index = ++baseIndex; // visit upper slots if present
4691 }
4692 }
4693 }
4694
4695 /*
4696 * Task classes. Coded in a regular but ugly format/style to
4697 * simplify checks that each variant differs in the right way from
4698 * others. The null screenings exist because compilers cannot tell
4699 * that we've already null-checked task arguments, so we force
4700 * simplest hoisted bypass to help avoid convoluted traps.
4701 */
4702 @SuppressWarnings("serial")
4703 static final class ForEachKeyTask<K,V>
4704 extends BulkTask<K,V,Void> {
4705 final Action<? super K> action;
4706 ForEachKeyTask
4707 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4708 Action<? super K> action) {
4709 super(p, b, i, f, t);
4710 this.action = action;
4711 }
4712 public final void compute() {
4713 final Action<? super K> action;
4714 if ((action = this.action) != null) {
4715 for (int i = baseIndex, f, h; batch > 0 &&
4716 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4717 addToPendingCount(1);
4718 new ForEachKeyTask<K,V>
4719 (this, batch >>>= 1, baseLimit = h, f, tab,
4720 action).fork();
4721 }
4722 for (Node<K,V> p; (p = advance()) != null;)
4723 action.apply(p.key);
4724 propagateCompletion();
4725 }
4726 }
4727 }
4728
4729 @SuppressWarnings("serial")
4730 static final class ForEachValueTask<K,V>
4731 extends BulkTask<K,V,Void> {
4732 final Action<? super V> action;
4733 ForEachValueTask
4734 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4735 Action<? super V> action) {
4736 super(p, b, i, f, t);
4737 this.action = action;
4738 }
4739 public final void compute() {
4740 final Action<? super V> action;
4741 if ((action = this.action) != null) {
4742 for (int i = baseIndex, f, h; batch > 0 &&
4743 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4744 addToPendingCount(1);
4745 new ForEachValueTask<K,V>
4746 (this, batch >>>= 1, baseLimit = h, f, tab,
4747 action).fork();
4748 }
4749 for (Node<K,V> p; (p = advance()) != null;)
4750 action.apply(p.val);
4751 propagateCompletion();
4752 }
4753 }
4754 }
4755
4756 @SuppressWarnings("serial")
4757 static final class ForEachEntryTask<K,V>
4758 extends BulkTask<K,V,Void> {
4759 final Action<? super Entry<K,V>> action;
4760 ForEachEntryTask
4761 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4762 Action<? super Entry<K,V>> action) {
4763 super(p, b, i, f, t);
4764 this.action = action;
4765 }
4766 public final void compute() {
4767 final Action<? super Entry<K,V>> action;
4768 if ((action = this.action) != null) {
4769 for (int i = baseIndex, f, h; batch > 0 &&
4770 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4771 addToPendingCount(1);
4772 new ForEachEntryTask<K,V>
4773 (this, batch >>>= 1, baseLimit = h, f, tab,
4774 action).fork();
4775 }
4776 for (Node<K,V> p; (p = advance()) != null; )
4777 action.apply(p);
4778 propagateCompletion();
4779 }
4780 }
4781 }
4782
4783 @SuppressWarnings("serial")
4784 static final class ForEachMappingTask<K,V>
4785 extends BulkTask<K,V,Void> {
4786 final BiAction<? super K, ? super V> action;
4787 ForEachMappingTask
4788 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4789 BiAction<? super K,? super V> action) {
4790 super(p, b, i, f, t);
4791 this.action = action;
4792 }
4793 public final void compute() {
4794 final BiAction<? super K, ? super V> action;
4795 if ((action = this.action) != null) {
4796 for (int i = baseIndex, f, h; batch > 0 &&
4797 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4798 addToPendingCount(1);
4799 new ForEachMappingTask<K,V>
4800 (this, batch >>>= 1, baseLimit = h, f, tab,
4801 action).fork();
4802 }
4803 for (Node<K,V> p; (p = advance()) != null; )
4804 action.apply(p.key, p.val);
4805 propagateCompletion();
4806 }
4807 }
4808 }
4809
4810 @SuppressWarnings("serial")
4811 static final class ForEachTransformedKeyTask<K,V,U>
4812 extends BulkTask<K,V,Void> {
4813 final Fun<? super K, ? extends U> transformer;
4814 final Action<? super U> action;
4815 ForEachTransformedKeyTask
4816 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4817 Fun<? super K, ? extends U> transformer, Action<? super U> action) {
4818 super(p, b, i, f, t);
4819 this.transformer = transformer; this.action = action;
4820 }
4821 public final void compute() {
4822 final Fun<? super K, ? extends U> transformer;
4823 final Action<? super U> action;
4824 if ((transformer = this.transformer) != null &&
4825 (action = this.action) != null) {
4826 for (int i = baseIndex, f, h; batch > 0 &&
4827 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4828 addToPendingCount(1);
4829 new ForEachTransformedKeyTask<K,V,U>
4830 (this, batch >>>= 1, baseLimit = h, f, tab,
4831 transformer, action).fork();
4832 }
4833 for (Node<K,V> p; (p = advance()) != null; ) {
4834 U u;
4835 if ((u = transformer.apply(p.key)) != null)
4836 action.apply(u);
4837 }
4838 propagateCompletion();
4839 }
4840 }
4841 }
4842
4843 @SuppressWarnings("serial")
4844 static final class ForEachTransformedValueTask<K,V,U>
4845 extends BulkTask<K,V,Void> {
4846 final Fun<? super V, ? extends U> transformer;
4847 final Action<? super U> action;
4848 ForEachTransformedValueTask
4849 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4850 Fun<? super V, ? extends U> transformer, Action<? super U> action) {
4851 super(p, b, i, f, t);
4852 this.transformer = transformer; this.action = action;
4853 }
4854 public final void compute() {
4855 final Fun<? super V, ? extends U> transformer;
4856 final Action<? super U> action;
4857 if ((transformer = this.transformer) != null &&
4858 (action = this.action) != null) {
4859 for (int i = baseIndex, f, h; batch > 0 &&
4860 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4861 addToPendingCount(1);
4862 new ForEachTransformedValueTask<K,V,U>
4863 (this, batch >>>= 1, baseLimit = h, f, tab,
4864 transformer, action).fork();
4865 }
4866 for (Node<K,V> p; (p = advance()) != null; ) {
4867 U u;
4868 if ((u = transformer.apply(p.val)) != null)
4869 action.apply(u);
4870 }
4871 propagateCompletion();
4872 }
4873 }
4874 }
4875
4876 @SuppressWarnings("serial")
4877 static final class ForEachTransformedEntryTask<K,V,U>
4878 extends BulkTask<K,V,Void> {
4879 final Fun<Map.Entry<K,V>, ? extends U> transformer;
4880 final Action<? super U> action;
4881 ForEachTransformedEntryTask
4882 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4883 Fun<Map.Entry<K,V>, ? extends U> transformer, Action<? super U> action) {
4884 super(p, b, i, f, t);
4885 this.transformer = transformer; this.action = action;
4886 }
4887 public final void compute() {
4888 final Fun<Map.Entry<K,V>, ? extends U> transformer;
4889 final Action<? super U> action;
4890 if ((transformer = this.transformer) != null &&
4891 (action = this.action) != null) {
4892 for (int i = baseIndex, f, h; batch > 0 &&
4893 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4894 addToPendingCount(1);
4895 new ForEachTransformedEntryTask<K,V,U>
4896 (this, batch >>>= 1, baseLimit = h, f, tab,
4897 transformer, action).fork();
4898 }
4899 for (Node<K,V> p; (p = advance()) != null; ) {
4900 U u;
4901 if ((u = transformer.apply(p)) != null)
4902 action.apply(u);
4903 }
4904 propagateCompletion();
4905 }
4906 }
4907 }
4908
4909 @SuppressWarnings("serial")
4910 static final class ForEachTransformedMappingTask<K,V,U>
4911 extends BulkTask<K,V,Void> {
4912 final BiFun<? super K, ? super V, ? extends U> transformer;
4913 final Action<? super U> action;
4914 ForEachTransformedMappingTask
4915 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4916 BiFun<? super K, ? super V, ? extends U> transformer,
4917 Action<? super U> action) {
4918 super(p, b, i, f, t);
4919 this.transformer = transformer; this.action = action;
4920 }
4921 public final void compute() {
4922 final BiFun<? super K, ? super V, ? extends U> transformer;
4923 final Action<? super U> action;
4924 if ((transformer = this.transformer) != null &&
4925 (action = this.action) != null) {
4926 for (int i = baseIndex, f, h; batch > 0 &&
4927 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4928 addToPendingCount(1);
4929 new ForEachTransformedMappingTask<K,V,U>
4930 (this, batch >>>= 1, baseLimit = h, f, tab,
4931 transformer, action).fork();
4932 }
4933 for (Node<K,V> p; (p = advance()) != null; ) {
4934 U u;
4935 if ((u = transformer.apply(p.key, p.val)) != null)
4936 action.apply(u);
4937 }
4938 propagateCompletion();
4939 }
4940 }
4941 }
4942
4943 @SuppressWarnings("serial")
4944 static final class SearchKeysTask<K,V,U>
4945 extends BulkTask<K,V,U> {
4946 final Fun<? super K, ? extends U> searchFunction;
4947 final AtomicReference<U> result;
4948 SearchKeysTask
4949 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4950 Fun<? super K, ? extends U> searchFunction,
4951 AtomicReference<U> result) {
4952 super(p, b, i, f, t);
4953 this.searchFunction = searchFunction; this.result = result;
4954 }
4955 public final U getRawResult() { return result.get(); }
4956 public final void compute() {
4957 final Fun<? super K, ? extends U> searchFunction;
4958 final AtomicReference<U> result;
4959 if ((searchFunction = this.searchFunction) != null &&
4960 (result = this.result) != null) {
4961 for (int i = baseIndex, f, h; batch > 0 &&
4962 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4963 if (result.get() != null)
4964 return;
4965 addToPendingCount(1);
4966 new SearchKeysTask<K,V,U>
4967 (this, batch >>>= 1, baseLimit = h, f, tab,
4968 searchFunction, result).fork();
4969 }
4970 while (result.get() == null) {
4971 U u;
4972 Node<K,V> p;
4973 if ((p = advance()) == null) {
4974 propagateCompletion();
4975 break;
4976 }
4977 if ((u = searchFunction.apply(p.key)) != null) {
4978 if (result.compareAndSet(null, u))
4979 quietlyCompleteRoot();
4980 break;
4981 }
4982 }
4983 }
4984 }
4985 }
4986
4987 @SuppressWarnings("serial")
4988 static final class SearchValuesTask<K,V,U>
4989 extends BulkTask<K,V,U> {
4990 final Fun<? super V, ? extends U> searchFunction;
4991 final AtomicReference<U> result;
4992 SearchValuesTask
4993 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4994 Fun<? super V, ? extends U> searchFunction,
4995 AtomicReference<U> result) {
4996 super(p, b, i, f, t);
4997 this.searchFunction = searchFunction; this.result = result;
4998 }
4999 public final U getRawResult() { return result.get(); }
5000 public final void compute() {
5001 final Fun<? super V, ? extends U> searchFunction;
5002 final AtomicReference<U> result;
5003 if ((searchFunction = this.searchFunction) != null &&
5004 (result = this.result) != null) {
5005 for (int i = baseIndex, f, h; batch > 0 &&
5006 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5007 if (result.get() != null)
5008 return;
5009 addToPendingCount(1);
5010 new SearchValuesTask<K,V,U>
5011 (this, batch >>>= 1, baseLimit = h, f, tab,
5012 searchFunction, result).fork();
5013 }
5014 while (result.get() == null) {
5015 U u;
5016 Node<K,V> p;
5017 if ((p = advance()) == null) {
5018 propagateCompletion();
5019 break;
5020 }
5021 if ((u = searchFunction.apply(p.val)) != null) {
5022 if (result.compareAndSet(null, u))
5023 quietlyCompleteRoot();
5024 break;
5025 }
5026 }
5027 }
5028 }
5029 }
5030
5031 @SuppressWarnings("serial")
5032 static final class SearchEntriesTask<K,V,U>
5033 extends BulkTask<K,V,U> {
5034 final Fun<Entry<K,V>, ? extends U> searchFunction;
5035 final AtomicReference<U> result;
5036 SearchEntriesTask
5037 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5038 Fun<Entry<K,V>, ? extends U> searchFunction,
5039 AtomicReference<U> result) {
5040 super(p, b, i, f, t);
5041 this.searchFunction = searchFunction; this.result = result;
5042 }
5043 public final U getRawResult() { return result.get(); }
5044 public final void compute() {
5045 final Fun<Entry<K,V>, ? extends U> searchFunction;
5046 final AtomicReference<U> result;
5047 if ((searchFunction = this.searchFunction) != null &&
5048 (result = this.result) != null) {
5049 for (int i = baseIndex, f, h; batch > 0 &&
5050 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5051 if (result.get() != null)
5052 return;
5053 addToPendingCount(1);
5054 new SearchEntriesTask<K,V,U>
5055 (this, batch >>>= 1, baseLimit = h, f, tab,
5056 searchFunction, result).fork();
5057 }
5058 while (result.get() == null) {
5059 U u;
5060 Node<K,V> p;
5061 if ((p = advance()) == null) {
5062 propagateCompletion();
5063 break;
5064 }
5065 if ((u = searchFunction.apply(p)) != null) {
5066 if (result.compareAndSet(null, u))
5067 quietlyCompleteRoot();
5068 return;
5069 }
5070 }
5071 }
5072 }
5073 }
5074
5075 @SuppressWarnings("serial")
5076 static final class SearchMappingsTask<K,V,U>
5077 extends BulkTask<K,V,U> {
5078 final BiFun<? super K, ? super V, ? extends U> searchFunction;
5079 final AtomicReference<U> result;
5080 SearchMappingsTask
5081 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5082 BiFun<? super K, ? super V, ? extends U> searchFunction,
5083 AtomicReference<U> result) {
5084 super(p, b, i, f, t);
5085 this.searchFunction = searchFunction; this.result = result;
5086 }
5087 public final U getRawResult() { return result.get(); }
5088 public final void compute() {
5089 final BiFun<? super K, ? super V, ? extends U> searchFunction;
5090 final AtomicReference<U> result;
5091 if ((searchFunction = this.searchFunction) != null &&
5092 (result = this.result) != null) {
5093 for (int i = baseIndex, f, h; batch > 0 &&
5094 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5095 if (result.get() != null)
5096 return;
5097 addToPendingCount(1);
5098 new SearchMappingsTask<K,V,U>
5099 (this, batch >>>= 1, baseLimit = h, f, tab,
5100 searchFunction, result).fork();
5101 }
5102 while (result.get() == null) {
5103 U u;
5104 Node<K,V> p;
5105 if ((p = advance()) == null) {
5106 propagateCompletion();
5107 break;
5108 }
5109 if ((u = searchFunction.apply(p.key, p.val)) != null) {
5110 if (result.compareAndSet(null, u))
5111 quietlyCompleteRoot();
5112 break;
5113 }
5114 }
5115 }
5116 }
5117 }
5118
5119 @SuppressWarnings("serial")
5120 static final class ReduceKeysTask<K,V>
5121 extends BulkTask<K,V,K> {
5122 final BiFun<? super K, ? super K, ? extends K> reducer;
5123 K result;
5124 ReduceKeysTask<K,V> rights, nextRight;
5125 ReduceKeysTask
5126 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5127 ReduceKeysTask<K,V> nextRight,
5128 BiFun<? super K, ? super K, ? extends K> reducer) {
5129 super(p, b, i, f, t); this.nextRight = nextRight;
5130 this.reducer = reducer;
5131 }
5132 public final K getRawResult() { return result; }
5133 public final void compute() {
5134 final BiFun<? super K, ? super K, ? extends K> reducer;
5135 if ((reducer = this.reducer) != null) {
5136 for (int i = baseIndex, f, h; batch > 0 &&
5137 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5138 addToPendingCount(1);
5139 (rights = new ReduceKeysTask<K,V>
5140 (this, batch >>>= 1, baseLimit = h, f, tab,
5141 rights, reducer)).fork();
5142 }
5143 K r = null;
5144 for (Node<K,V> p; (p = advance()) != null; ) {
5145 K u = p.key;
5146 r = (r == null) ? u : u == null ? r : reducer.apply(r, u);
5147 }
5148 result = r;
5149 CountedCompleter<?> c;
5150 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5151 @SuppressWarnings("unchecked") ReduceKeysTask<K,V>
5152 t = (ReduceKeysTask<K,V>)c,
5153 s = t.rights;
5154 while (s != null) {
5155 K tr, sr;
5156 if ((sr = s.result) != null)
5157 t.result = (((tr = t.result) == null) ? sr :
5158 reducer.apply(tr, sr));
5159 s = t.rights = s.nextRight;
5160 }
5161 }
5162 }
5163 }
5164 }
5165
5166 @SuppressWarnings("serial")
5167 static final class ReduceValuesTask<K,V>
5168 extends BulkTask<K,V,V> {
5169 final BiFun<? super V, ? super V, ? extends V> reducer;
5170 V result;
5171 ReduceValuesTask<K,V> rights, nextRight;
5172 ReduceValuesTask
5173 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5174 ReduceValuesTask<K,V> nextRight,
5175 BiFun<? super V, ? super V, ? extends V> reducer) {
5176 super(p, b, i, f, t); this.nextRight = nextRight;
5177 this.reducer = reducer;
5178 }
5179 public final V getRawResult() { return result; }
5180 public final void compute() {
5181 final BiFun<? super V, ? super V, ? extends V> reducer;
5182 if ((reducer = this.reducer) != null) {
5183 for (int i = baseIndex, f, h; batch > 0 &&
5184 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5185 addToPendingCount(1);
5186 (rights = new ReduceValuesTask<K,V>
5187 (this, batch >>>= 1, baseLimit = h, f, tab,
5188 rights, reducer)).fork();
5189 }
5190 V r = null;
5191 for (Node<K,V> p; (p = advance()) != null; ) {
5192 V v = p.val;
5193 r = (r == null) ? v : reducer.apply(r, v);
5194 }
5195 result = r;
5196 CountedCompleter<?> c;
5197 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5198 @SuppressWarnings("unchecked") ReduceValuesTask<K,V>
5199 t = (ReduceValuesTask<K,V>)c,
5200 s = t.rights;
5201 while (s != null) {
5202 V tr, sr;
5203 if ((sr = s.result) != null)
5204 t.result = (((tr = t.result) == null) ? sr :
5205 reducer.apply(tr, sr));
5206 s = t.rights = s.nextRight;
5207 }
5208 }
5209 }
5210 }
5211 }
5212
5213 @SuppressWarnings("serial")
5214 static final class ReduceEntriesTask<K,V>
5215 extends BulkTask<K,V,Map.Entry<K,V>> {
5216 final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5217 Map.Entry<K,V> result;
5218 ReduceEntriesTask<K,V> rights, nextRight;
5219 ReduceEntriesTask
5220 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5221 ReduceEntriesTask<K,V> nextRight,
5222 BiFun<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5223 super(p, b, i, f, t); this.nextRight = nextRight;
5224 this.reducer = reducer;
5225 }
5226 public final Map.Entry<K,V> getRawResult() { return result; }
5227 public final void compute() {
5228 final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5229 if ((reducer = this.reducer) != null) {
5230 for (int i = baseIndex, f, h; batch > 0 &&
5231 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5232 addToPendingCount(1);
5233 (rights = new ReduceEntriesTask<K,V>
5234 (this, batch >>>= 1, baseLimit = h, f, tab,
5235 rights, reducer)).fork();
5236 }
5237 Map.Entry<K,V> r = null;
5238 for (Node<K,V> p; (p = advance()) != null; )
5239 r = (r == null) ? p : reducer.apply(r, p);
5240 result = r;
5241 CountedCompleter<?> c;
5242 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5243 @SuppressWarnings("unchecked") ReduceEntriesTask<K,V>
5244 t = (ReduceEntriesTask<K,V>)c,
5245 s = t.rights;
5246 while (s != null) {
5247 Map.Entry<K,V> tr, sr;
5248 if ((sr = s.result) != null)
5249 t.result = (((tr = t.result) == null) ? sr :
5250 reducer.apply(tr, sr));
5251 s = t.rights = s.nextRight;
5252 }
5253 }
5254 }
5255 }
5256 }
5257
5258 @SuppressWarnings("serial")
5259 static final class MapReduceKeysTask<K,V,U>
5260 extends BulkTask<K,V,U> {
5261 final Fun<? super K, ? extends U> transformer;
5262 final BiFun<? super U, ? super U, ? extends U> reducer;
5263 U result;
5264 MapReduceKeysTask<K,V,U> rights, nextRight;
5265 MapReduceKeysTask
5266 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5267 MapReduceKeysTask<K,V,U> nextRight,
5268 Fun<? super K, ? extends U> transformer,
5269 BiFun<? super U, ? super U, ? extends U> reducer) {
5270 super(p, b, i, f, t); this.nextRight = nextRight;
5271 this.transformer = transformer;
5272 this.reducer = reducer;
5273 }
5274 public final U getRawResult() { return result; }
5275 public final void compute() {
5276 final Fun<? super K, ? extends U> transformer;
5277 final BiFun<? super U, ? super U, ? extends U> reducer;
5278 if ((transformer = this.transformer) != null &&
5279 (reducer = this.reducer) != null) {
5280 for (int i = baseIndex, f, h; batch > 0 &&
5281 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5282 addToPendingCount(1);
5283 (rights = new MapReduceKeysTask<K,V,U>
5284 (this, batch >>>= 1, baseLimit = h, f, tab,
5285 rights, transformer, reducer)).fork();
5286 }
5287 U r = null;
5288 for (Node<K,V> p; (p = advance()) != null; ) {
5289 U u;
5290 if ((u = transformer.apply(p.key)) != null)
5291 r = (r == null) ? u : reducer.apply(r, u);
5292 }
5293 result = r;
5294 CountedCompleter<?> c;
5295 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5296 @SuppressWarnings("unchecked") MapReduceKeysTask<K,V,U>
5297 t = (MapReduceKeysTask<K,V,U>)c,
5298 s = t.rights;
5299 while (s != null) {
5300 U tr, sr;
5301 if ((sr = s.result) != null)
5302 t.result = (((tr = t.result) == null) ? sr :
5303 reducer.apply(tr, sr));
5304 s = t.rights = s.nextRight;
5305 }
5306 }
5307 }
5308 }
5309 }
5310
5311 @SuppressWarnings("serial")
5312 static final class MapReduceValuesTask<K,V,U>
5313 extends BulkTask<K,V,U> {
5314 final Fun<? super V, ? extends U> transformer;
5315 final BiFun<? super U, ? super U, ? extends U> reducer;
5316 U result;
5317 MapReduceValuesTask<K,V,U> rights, nextRight;
5318 MapReduceValuesTask
5319 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5320 MapReduceValuesTask<K,V,U> nextRight,
5321 Fun<? super V, ? extends U> transformer,
5322 BiFun<? super U, ? super U, ? extends U> reducer) {
5323 super(p, b, i, f, t); this.nextRight = nextRight;
5324 this.transformer = transformer;
5325 this.reducer = reducer;
5326 }
5327 public final U getRawResult() { return result; }
5328 public final void compute() {
5329 final Fun<? super V, ? extends U> transformer;
5330 final BiFun<? super U, ? super U, ? extends U> reducer;
5331 if ((transformer = this.transformer) != null &&
5332 (reducer = this.reducer) != null) {
5333 for (int i = baseIndex, f, h; batch > 0 &&
5334 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5335 addToPendingCount(1);
5336 (rights = new MapReduceValuesTask<K,V,U>
5337 (this, batch >>>= 1, baseLimit = h, f, tab,
5338 rights, transformer, reducer)).fork();
5339 }
5340 U r = null;
5341 for (Node<K,V> p; (p = advance()) != null; ) {
5342 U u;
5343 if ((u = transformer.apply(p.val)) != null)
5344 r = (r == null) ? u : reducer.apply(r, u);
5345 }
5346 result = r;
5347 CountedCompleter<?> c;
5348 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5349 @SuppressWarnings("unchecked") MapReduceValuesTask<K,V,U>
5350 t = (MapReduceValuesTask<K,V,U>)c,
5351 s = t.rights;
5352 while (s != null) {
5353 U tr, sr;
5354 if ((sr = s.result) != null)
5355 t.result = (((tr = t.result) == null) ? sr :
5356 reducer.apply(tr, sr));
5357 s = t.rights = s.nextRight;
5358 }
5359 }
5360 }
5361 }
5362 }
5363
5364 @SuppressWarnings("serial")
5365 static final class MapReduceEntriesTask<K,V,U>
5366 extends BulkTask<K,V,U> {
5367 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5368 final BiFun<? super U, ? super U, ? extends U> reducer;
5369 U result;
5370 MapReduceEntriesTask<K,V,U> rights, nextRight;
5371 MapReduceEntriesTask
5372 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5373 MapReduceEntriesTask<K,V,U> nextRight,
5374 Fun<Map.Entry<K,V>, ? extends U> transformer,
5375 BiFun<? super U, ? super U, ? extends U> reducer) {
5376 super(p, b, i, f, t); this.nextRight = nextRight;
5377 this.transformer = transformer;
5378 this.reducer = reducer;
5379 }
5380 public final U getRawResult() { return result; }
5381 public final void compute() {
5382 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5383 final BiFun<? super U, ? super U, ? extends U> reducer;
5384 if ((transformer = this.transformer) != null &&
5385 (reducer = this.reducer) != null) {
5386 for (int i = baseIndex, f, h; batch > 0 &&
5387 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5388 addToPendingCount(1);
5389 (rights = new MapReduceEntriesTask<K,V,U>
5390 (this, batch >>>= 1, baseLimit = h, f, tab,
5391 rights, transformer, reducer)).fork();
5392 }
5393 U r = null;
5394 for (Node<K,V> p; (p = advance()) != null; ) {
5395 U u;
5396 if ((u = transformer.apply(p)) != null)
5397 r = (r == null) ? u : reducer.apply(r, u);
5398 }
5399 result = r;
5400 CountedCompleter<?> c;
5401 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5402 @SuppressWarnings("unchecked") MapReduceEntriesTask<K,V,U>
5403 t = (MapReduceEntriesTask<K,V,U>)c,
5404 s = t.rights;
5405 while (s != null) {
5406 U tr, sr;
5407 if ((sr = s.result) != null)
5408 t.result = (((tr = t.result) == null) ? sr :
5409 reducer.apply(tr, sr));
5410 s = t.rights = s.nextRight;
5411 }
5412 }
5413 }
5414 }
5415 }
5416
5417 @SuppressWarnings("serial")
5418 static final class MapReduceMappingsTask<K,V,U>
5419 extends BulkTask<K,V,U> {
5420 final BiFun<? super K, ? super V, ? extends U> transformer;
5421 final BiFun<? super U, ? super U, ? extends U> reducer;
5422 U result;
5423 MapReduceMappingsTask<K,V,U> rights, nextRight;
5424 MapReduceMappingsTask
5425 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5426 MapReduceMappingsTask<K,V,U> nextRight,
5427 BiFun<? super K, ? super V, ? extends U> transformer,
5428 BiFun<? super U, ? super U, ? extends U> reducer) {
5429 super(p, b, i, f, t); this.nextRight = nextRight;
5430 this.transformer = transformer;
5431 this.reducer = reducer;
5432 }
5433 public final U getRawResult() { return result; }
5434 public final void compute() {
5435 final BiFun<? super K, ? super V, ? extends U> transformer;
5436 final BiFun<? super U, ? super U, ? extends U> reducer;
5437 if ((transformer = this.transformer) != null &&
5438 (reducer = this.reducer) != null) {
5439 for (int i = baseIndex, f, h; batch > 0 &&
5440 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5441 addToPendingCount(1);
5442 (rights = new MapReduceMappingsTask<K,V,U>
5443 (this, batch >>>= 1, baseLimit = h, f, tab,
5444 rights, transformer, reducer)).fork();
5445 }
5446 U r = null;
5447 for (Node<K,V> p; (p = advance()) != null; ) {
5448 U u;
5449 if ((u = transformer.apply(p.key, p.val)) != null)
5450 r = (r == null) ? u : reducer.apply(r, u);
5451 }
5452 result = r;
5453 CountedCompleter<?> c;
5454 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5455 @SuppressWarnings("unchecked") MapReduceMappingsTask<K,V,U>
5456 t = (MapReduceMappingsTask<K,V,U>)c,
5457 s = t.rights;
5458 while (s != null) {
5459 U tr, sr;
5460 if ((sr = s.result) != null)
5461 t.result = (((tr = t.result) == null) ? sr :
5462 reducer.apply(tr, sr));
5463 s = t.rights = s.nextRight;
5464 }
5465 }
5466 }
5467 }
5468 }
5469
5470 @SuppressWarnings("serial")
5471 static final class MapReduceKeysToDoubleTask<K,V>
5472 extends BulkTask<K,V,Double> {
5473 final ObjectToDouble<? super K> transformer;
5474 final DoubleByDoubleToDouble reducer;
5475 final double basis;
5476 double result;
5477 MapReduceKeysToDoubleTask<K,V> rights, nextRight;
5478 MapReduceKeysToDoubleTask
5479 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5480 MapReduceKeysToDoubleTask<K,V> nextRight,
5481 ObjectToDouble<? super K> transformer,
5482 double basis,
5483 DoubleByDoubleToDouble reducer) {
5484 super(p, b, i, f, t); this.nextRight = nextRight;
5485 this.transformer = transformer;
5486 this.basis = basis; this.reducer = reducer;
5487 }
5488 public final Double getRawResult() { return result; }
5489 public final void compute() {
5490 final ObjectToDouble<? super K> transformer;
5491 final DoubleByDoubleToDouble reducer;
5492 if ((transformer = this.transformer) != null &&
5493 (reducer = this.reducer) != null) {
5494 double r = this.basis;
5495 for (int i = baseIndex, f, h; batch > 0 &&
5496 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5497 addToPendingCount(1);
5498 (rights = new MapReduceKeysToDoubleTask<K,V>
5499 (this, batch >>>= 1, baseLimit = h, f, tab,
5500 rights, transformer, r, reducer)).fork();
5501 }
5502 for (Node<K,V> p; (p = advance()) != null; )
5503 r = reducer.apply(r, transformer.apply(p.key));
5504 result = r;
5505 CountedCompleter<?> c;
5506 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5507 @SuppressWarnings("unchecked") MapReduceKeysToDoubleTask<K,V>
5508 t = (MapReduceKeysToDoubleTask<K,V>)c,
5509 s = t.rights;
5510 while (s != null) {
5511 t.result = reducer.apply(t.result, s.result);
5512 s = t.rights = s.nextRight;
5513 }
5514 }
5515 }
5516 }
5517 }
5518
5519 @SuppressWarnings("serial")
5520 static final class MapReduceValuesToDoubleTask<K,V>
5521 extends BulkTask<K,V,Double> {
5522 final ObjectToDouble<? super V> transformer;
5523 final DoubleByDoubleToDouble reducer;
5524 final double basis;
5525 double result;
5526 MapReduceValuesToDoubleTask<K,V> rights, nextRight;
5527 MapReduceValuesToDoubleTask
5528 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5529 MapReduceValuesToDoubleTask<K,V> nextRight,
5530 ObjectToDouble<? super V> transformer,
5531 double basis,
5532 DoubleByDoubleToDouble reducer) {
5533 super(p, b, i, f, t); this.nextRight = nextRight;
5534 this.transformer = transformer;
5535 this.basis = basis; this.reducer = reducer;
5536 }
5537 public final Double getRawResult() { return result; }
5538 public final void compute() {
5539 final ObjectToDouble<? super V> transformer;
5540 final DoubleByDoubleToDouble reducer;
5541 if ((transformer = this.transformer) != null &&
5542 (reducer = this.reducer) != null) {
5543 double r = this.basis;
5544 for (int i = baseIndex, f, h; batch > 0 &&
5545 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5546 addToPendingCount(1);
5547 (rights = new MapReduceValuesToDoubleTask<K,V>
5548 (this, batch >>>= 1, baseLimit = h, f, tab,
5549 rights, transformer, r, reducer)).fork();
5550 }
5551 for (Node<K,V> p; (p = advance()) != null; )
5552 r = reducer.apply(r, transformer.apply(p.val));
5553 result = r;
5554 CountedCompleter<?> c;
5555 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5556 @SuppressWarnings("unchecked") MapReduceValuesToDoubleTask<K,V>
5557 t = (MapReduceValuesToDoubleTask<K,V>)c,
5558 s = t.rights;
5559 while (s != null) {
5560 t.result = reducer.apply(t.result, s.result);
5561 s = t.rights = s.nextRight;
5562 }
5563 }
5564 }
5565 }
5566 }
5567
5568 @SuppressWarnings("serial")
5569 static final class MapReduceEntriesToDoubleTask<K,V>
5570 extends BulkTask<K,V,Double> {
5571 final ObjectToDouble<Map.Entry<K,V>> transformer;
5572 final DoubleByDoubleToDouble reducer;
5573 final double basis;
5574 double result;
5575 MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
5576 MapReduceEntriesToDoubleTask
5577 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5578 MapReduceEntriesToDoubleTask<K,V> nextRight,
5579 ObjectToDouble<Map.Entry<K,V>> transformer,
5580 double basis,
5581 DoubleByDoubleToDouble reducer) {
5582 super(p, b, i, f, t); this.nextRight = nextRight;
5583 this.transformer = transformer;
5584 this.basis = basis; this.reducer = reducer;
5585 }
5586 public final Double getRawResult() { return result; }
5587 public final void compute() {
5588 final ObjectToDouble<Map.Entry<K,V>> transformer;
5589 final DoubleByDoubleToDouble reducer;
5590 if ((transformer = this.transformer) != null &&
5591 (reducer = this.reducer) != null) {
5592 double r = this.basis;
5593 for (int i = baseIndex, f, h; batch > 0 &&
5594 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5595 addToPendingCount(1);
5596 (rights = new MapReduceEntriesToDoubleTask<K,V>
5597 (this, batch >>>= 1, baseLimit = h, f, tab,
5598 rights, transformer, r, reducer)).fork();
5599 }
5600 for (Node<K,V> p; (p = advance()) != null; )
5601 r = reducer.apply(r, transformer.apply(p));
5602 result = r;
5603 CountedCompleter<?> c;
5604 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5605 @SuppressWarnings("unchecked") MapReduceEntriesToDoubleTask<K,V>
5606 t = (MapReduceEntriesToDoubleTask<K,V>)c,
5607 s = t.rights;
5608 while (s != null) {
5609 t.result = reducer.apply(t.result, s.result);
5610 s = t.rights = s.nextRight;
5611 }
5612 }
5613 }
5614 }
5615 }
5616
5617 @SuppressWarnings("serial")
5618 static final class MapReduceMappingsToDoubleTask<K,V>
5619 extends BulkTask<K,V,Double> {
5620 final ObjectByObjectToDouble<? super K, ? super V> transformer;
5621 final DoubleByDoubleToDouble reducer;
5622 final double basis;
5623 double result;
5624 MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
5625 MapReduceMappingsToDoubleTask
5626 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5627 MapReduceMappingsToDoubleTask<K,V> nextRight,
5628 ObjectByObjectToDouble<? super K, ? super V> transformer,
5629 double basis,
5630 DoubleByDoubleToDouble reducer) {
5631 super(p, b, i, f, t); this.nextRight = nextRight;
5632 this.transformer = transformer;
5633 this.basis = basis; this.reducer = reducer;
5634 }
5635 public final Double getRawResult() { return result; }
5636 public final void compute() {
5637 final ObjectByObjectToDouble<? super K, ? super V> transformer;
5638 final DoubleByDoubleToDouble reducer;
5639 if ((transformer = this.transformer) != null &&
5640 (reducer = this.reducer) != null) {
5641 double r = this.basis;
5642 for (int i = baseIndex, f, h; batch > 0 &&
5643 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5644 addToPendingCount(1);
5645 (rights = new MapReduceMappingsToDoubleTask<K,V>
5646 (this, batch >>>= 1, baseLimit = h, f, tab,
5647 rights, transformer, r, reducer)).fork();
5648 }
5649 for (Node<K,V> p; (p = advance()) != null; )
5650 r = reducer.apply(r, transformer.apply(p.key, p.val));
5651 result = r;
5652 CountedCompleter<?> c;
5653 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5654 @SuppressWarnings("unchecked") MapReduceMappingsToDoubleTask<K,V>
5655 t = (MapReduceMappingsToDoubleTask<K,V>)c,
5656 s = t.rights;
5657 while (s != null) {
5658 t.result = reducer.apply(t.result, s.result);
5659 s = t.rights = s.nextRight;
5660 }
5661 }
5662 }
5663 }
5664 }
5665
5666 @SuppressWarnings("serial")
5667 static final class MapReduceKeysToLongTask<K,V>
5668 extends BulkTask<K,V,Long> {
5669 final ObjectToLong<? super K> transformer;
5670 final LongByLongToLong reducer;
5671 final long basis;
5672 long result;
5673 MapReduceKeysToLongTask<K,V> rights, nextRight;
5674 MapReduceKeysToLongTask
5675 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5676 MapReduceKeysToLongTask<K,V> nextRight,
5677 ObjectToLong<? super K> transformer,
5678 long basis,
5679 LongByLongToLong reducer) {
5680 super(p, b, i, f, t); this.nextRight = nextRight;
5681 this.transformer = transformer;
5682 this.basis = basis; this.reducer = reducer;
5683 }
5684 public final Long getRawResult() { return result; }
5685 public final void compute() {
5686 final ObjectToLong<? super K> transformer;
5687 final LongByLongToLong reducer;
5688 if ((transformer = this.transformer) != null &&
5689 (reducer = this.reducer) != null) {
5690 long r = this.basis;
5691 for (int i = baseIndex, f, h; batch > 0 &&
5692 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5693 addToPendingCount(1);
5694 (rights = new MapReduceKeysToLongTask<K,V>
5695 (this, batch >>>= 1, baseLimit = h, f, tab,
5696 rights, transformer, r, reducer)).fork();
5697 }
5698 for (Node<K,V> p; (p = advance()) != null; )
5699 r = reducer.apply(r, transformer.apply(p.key));
5700 result = r;
5701 CountedCompleter<?> c;
5702 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5703 @SuppressWarnings("unchecked") MapReduceKeysToLongTask<K,V>
5704 t = (MapReduceKeysToLongTask<K,V>)c,
5705 s = t.rights;
5706 while (s != null) {
5707 t.result = reducer.apply(t.result, s.result);
5708 s = t.rights = s.nextRight;
5709 }
5710 }
5711 }
5712 }
5713 }
5714
5715 @SuppressWarnings("serial")
5716 static final class MapReduceValuesToLongTask<K,V>
5717 extends BulkTask<K,V,Long> {
5718 final ObjectToLong<? super V> transformer;
5719 final LongByLongToLong reducer;
5720 final long basis;
5721 long result;
5722 MapReduceValuesToLongTask<K,V> rights, nextRight;
5723 MapReduceValuesToLongTask
5724 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5725 MapReduceValuesToLongTask<K,V> nextRight,
5726 ObjectToLong<? super V> transformer,
5727 long basis,
5728 LongByLongToLong reducer) {
5729 super(p, b, i, f, t); this.nextRight = nextRight;
5730 this.transformer = transformer;
5731 this.basis = basis; this.reducer = reducer;
5732 }
5733 public final Long getRawResult() { return result; }
5734 public final void compute() {
5735 final ObjectToLong<? super V> transformer;
5736 final LongByLongToLong reducer;
5737 if ((transformer = this.transformer) != null &&
5738 (reducer = this.reducer) != null) {
5739 long r = this.basis;
5740 for (int i = baseIndex, f, h; batch > 0 &&
5741 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5742 addToPendingCount(1);
5743 (rights = new MapReduceValuesToLongTask<K,V>
5744 (this, batch >>>= 1, baseLimit = h, f, tab,
5745 rights, transformer, r, reducer)).fork();
5746 }
5747 for (Node<K,V> p; (p = advance()) != null; )
5748 r = reducer.apply(r, transformer.apply(p.val));
5749 result = r;
5750 CountedCompleter<?> c;
5751 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5752 @SuppressWarnings("unchecked") MapReduceValuesToLongTask<K,V>
5753 t = (MapReduceValuesToLongTask<K,V>)c,
5754 s = t.rights;
5755 while (s != null) {
5756 t.result = reducer.apply(t.result, s.result);
5757 s = t.rights = s.nextRight;
5758 }
5759 }
5760 }
5761 }
5762 }
5763
5764 @SuppressWarnings("serial")
5765 static final class MapReduceEntriesToLongTask<K,V>
5766 extends BulkTask<K,V,Long> {
5767 final ObjectToLong<Map.Entry<K,V>> transformer;
5768 final LongByLongToLong reducer;
5769 final long basis;
5770 long result;
5771 MapReduceEntriesToLongTask<K,V> rights, nextRight;
5772 MapReduceEntriesToLongTask
5773 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5774 MapReduceEntriesToLongTask<K,V> nextRight,
5775 ObjectToLong<Map.Entry<K,V>> transformer,
5776 long basis,
5777 LongByLongToLong reducer) {
5778 super(p, b, i, f, t); this.nextRight = nextRight;
5779 this.transformer = transformer;
5780 this.basis = basis; this.reducer = reducer;
5781 }
5782 public final Long getRawResult() { return result; }
5783 public final void compute() {
5784 final ObjectToLong<Map.Entry<K,V>> transformer;
5785 final LongByLongToLong reducer;
5786 if ((transformer = this.transformer) != null &&
5787 (reducer = this.reducer) != null) {
5788 long r = this.basis;
5789 for (int i = baseIndex, f, h; batch > 0 &&
5790 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5791 addToPendingCount(1);
5792 (rights = new MapReduceEntriesToLongTask<K,V>
5793 (this, batch >>>= 1, baseLimit = h, f, tab,
5794 rights, transformer, r, reducer)).fork();
5795 }
5796 for (Node<K,V> p; (p = advance()) != null; )
5797 r = reducer.apply(r, transformer.apply(p));
5798 result = r;
5799 CountedCompleter<?> c;
5800 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5801 @SuppressWarnings("unchecked") MapReduceEntriesToLongTask<K,V>
5802 t = (MapReduceEntriesToLongTask<K,V>)c,
5803 s = t.rights;
5804 while (s != null) {
5805 t.result = reducer.apply(t.result, s.result);
5806 s = t.rights = s.nextRight;
5807 }
5808 }
5809 }
5810 }
5811 }
5812
5813 @SuppressWarnings("serial")
5814 static final class MapReduceMappingsToLongTask<K,V>
5815 extends BulkTask<K,V,Long> {
5816 final ObjectByObjectToLong<? super K, ? super V> transformer;
5817 final LongByLongToLong reducer;
5818 final long basis;
5819 long result;
5820 MapReduceMappingsToLongTask<K,V> rights, nextRight;
5821 MapReduceMappingsToLongTask
5822 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5823 MapReduceMappingsToLongTask<K,V> nextRight,
5824 ObjectByObjectToLong<? super K, ? super V> transformer,
5825 long basis,
5826 LongByLongToLong reducer) {
5827 super(p, b, i, f, t); this.nextRight = nextRight;
5828 this.transformer = transformer;
5829 this.basis = basis; this.reducer = reducer;
5830 }
5831 public final Long getRawResult() { return result; }
5832 public final void compute() {
5833 final ObjectByObjectToLong<? super K, ? super V> transformer;
5834 final LongByLongToLong reducer;
5835 if ((transformer = this.transformer) != null &&
5836 (reducer = this.reducer) != null) {
5837 long r = this.basis;
5838 for (int i = baseIndex, f, h; batch > 0 &&
5839 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5840 addToPendingCount(1);
5841 (rights = new MapReduceMappingsToLongTask<K,V>
5842 (this, batch >>>= 1, baseLimit = h, f, tab,
5843 rights, transformer, r, reducer)).fork();
5844 }
5845 for (Node<K,V> p; (p = advance()) != null; )
5846 r = reducer.apply(r, transformer.apply(p.key, p.val));
5847 result = r;
5848 CountedCompleter<?> c;
5849 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5850 @SuppressWarnings("unchecked") MapReduceMappingsToLongTask<K,V>
5851 t = (MapReduceMappingsToLongTask<K,V>)c,
5852 s = t.rights;
5853 while (s != null) {
5854 t.result = reducer.apply(t.result, s.result);
5855 s = t.rights = s.nextRight;
5856 }
5857 }
5858 }
5859 }
5860 }
5861
5862 @SuppressWarnings("serial")
5863 static final class MapReduceKeysToIntTask<K,V>
5864 extends BulkTask<K,V,Integer> {
5865 final ObjectToInt<? super K> transformer;
5866 final IntByIntToInt reducer;
5867 final int basis;
5868 int result;
5869 MapReduceKeysToIntTask<K,V> rights, nextRight;
5870 MapReduceKeysToIntTask
5871 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5872 MapReduceKeysToIntTask<K,V> nextRight,
5873 ObjectToInt<? super K> transformer,
5874 int basis,
5875 IntByIntToInt reducer) {
5876 super(p, b, i, f, t); this.nextRight = nextRight;
5877 this.transformer = transformer;
5878 this.basis = basis; this.reducer = reducer;
5879 }
5880 public final Integer getRawResult() { return result; }
5881 public final void compute() {
5882 final ObjectToInt<? super K> transformer;
5883 final IntByIntToInt reducer;
5884 if ((transformer = this.transformer) != null &&
5885 (reducer = this.reducer) != null) {
5886 int r = this.basis;
5887 for (int i = baseIndex, f, h; batch > 0 &&
5888 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5889 addToPendingCount(1);
5890 (rights = new MapReduceKeysToIntTask<K,V>
5891 (this, batch >>>= 1, baseLimit = h, f, tab,
5892 rights, transformer, r, reducer)).fork();
5893 }
5894 for (Node<K,V> p; (p = advance()) != null; )
5895 r = reducer.apply(r, transformer.apply(p.key));
5896 result = r;
5897 CountedCompleter<?> c;
5898 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5899 @SuppressWarnings("unchecked") MapReduceKeysToIntTask<K,V>
5900 t = (MapReduceKeysToIntTask<K,V>)c,
5901 s = t.rights;
5902 while (s != null) {
5903 t.result = reducer.apply(t.result, s.result);
5904 s = t.rights = s.nextRight;
5905 }
5906 }
5907 }
5908 }
5909 }
5910
5911 @SuppressWarnings("serial")
5912 static final class MapReduceValuesToIntTask<K,V>
5913 extends BulkTask<K,V,Integer> {
5914 final ObjectToInt<? super V> transformer;
5915 final IntByIntToInt reducer;
5916 final int basis;
5917 int result;
5918 MapReduceValuesToIntTask<K,V> rights, nextRight;
5919 MapReduceValuesToIntTask
5920 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5921 MapReduceValuesToIntTask<K,V> nextRight,
5922 ObjectToInt<? super V> transformer,
5923 int basis,
5924 IntByIntToInt reducer) {
5925 super(p, b, i, f, t); this.nextRight = nextRight;
5926 this.transformer = transformer;
5927 this.basis = basis; this.reducer = reducer;
5928 }
5929 public final Integer getRawResult() { return result; }
5930 public final void compute() {
5931 final ObjectToInt<? super V> transformer;
5932 final IntByIntToInt reducer;
5933 if ((transformer = this.transformer) != null &&
5934 (reducer = this.reducer) != null) {
5935 int r = this.basis;
5936 for (int i = baseIndex, f, h; batch > 0 &&
5937 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5938 addToPendingCount(1);
5939 (rights = new MapReduceValuesToIntTask<K,V>
5940 (this, batch >>>= 1, baseLimit = h, f, tab,
5941 rights, transformer, r, reducer)).fork();
5942 }
5943 for (Node<K,V> p; (p = advance()) != null; )
5944 r = reducer.apply(r, transformer.apply(p.val));
5945 result = r;
5946 CountedCompleter<?> c;
5947 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5948 @SuppressWarnings("unchecked") MapReduceValuesToIntTask<K,V>
5949 t = (MapReduceValuesToIntTask<K,V>)c,
5950 s = t.rights;
5951 while (s != null) {
5952 t.result = reducer.apply(t.result, s.result);
5953 s = t.rights = s.nextRight;
5954 }
5955 }
5956 }
5957 }
5958 }
5959
5960 @SuppressWarnings("serial")
5961 static final class MapReduceEntriesToIntTask<K,V>
5962 extends BulkTask<K,V,Integer> {
5963 final ObjectToInt<Map.Entry<K,V>> transformer;
5964 final IntByIntToInt reducer;
5965 final int basis;
5966 int result;
5967 MapReduceEntriesToIntTask<K,V> rights, nextRight;
5968 MapReduceEntriesToIntTask
5969 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5970 MapReduceEntriesToIntTask<K,V> nextRight,
5971 ObjectToInt<Map.Entry<K,V>> transformer,
5972 int basis,
5973 IntByIntToInt reducer) {
5974 super(p, b, i, f, t); this.nextRight = nextRight;
5975 this.transformer = transformer;
5976 this.basis = basis; this.reducer = reducer;
5977 }
5978 public final Integer getRawResult() { return result; }
5979 public final void compute() {
5980 final ObjectToInt<Map.Entry<K,V>> transformer;
5981 final IntByIntToInt reducer;
5982 if ((transformer = this.transformer) != null &&
5983 (reducer = this.reducer) != null) {
5984 int r = this.basis;
5985 for (int i = baseIndex, f, h; batch > 0 &&
5986 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5987 addToPendingCount(1);
5988 (rights = new MapReduceEntriesToIntTask<K,V>
5989 (this, batch >>>= 1, baseLimit = h, f, tab,
5990 rights, transformer, r, reducer)).fork();
5991 }
5992 for (Node<K,V> p; (p = advance()) != null; )
5993 r = reducer.apply(r, transformer.apply(p));
5994 result = r;
5995 CountedCompleter<?> c;
5996 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5997 @SuppressWarnings("unchecked") MapReduceEntriesToIntTask<K,V>
5998 t = (MapReduceEntriesToIntTask<K,V>)c,
5999 s = t.rights;
6000 while (s != null) {
6001 t.result = reducer.apply(t.result, s.result);
6002 s = t.rights = s.nextRight;
6003 }
6004 }
6005 }
6006 }
6007 }
6008
6009 @SuppressWarnings("serial")
6010 static final class MapReduceMappingsToIntTask<K,V>
6011 extends BulkTask<K,V,Integer> {
6012 final ObjectByObjectToInt<? super K, ? super V> transformer;
6013 final IntByIntToInt reducer;
6014 final int basis;
6015 int result;
6016 MapReduceMappingsToIntTask<K,V> rights, nextRight;
6017 MapReduceMappingsToIntTask
6018 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6019 MapReduceMappingsToIntTask<K,V> nextRight,
6020 ObjectByObjectToInt<? super K, ? super V> transformer,
6021 int basis,
6022 IntByIntToInt reducer) {
6023 super(p, b, i, f, t); this.nextRight = nextRight;
6024 this.transformer = transformer;
6025 this.basis = basis; this.reducer = reducer;
6026 }
6027 public final Integer getRawResult() { return result; }
6028 public final void compute() {
6029 final ObjectByObjectToInt<? super K, ? super V> transformer;
6030 final IntByIntToInt reducer;
6031 if ((transformer = this.transformer) != null &&
6032 (reducer = this.reducer) != null) {
6033 int r = this.basis;
6034 for (int i = baseIndex, f, h; batch > 0 &&
6035 (h = ((f = baseLimit) + i) >>> 1) > i;) {
6036 addToPendingCount(1);
6037 (rights = new MapReduceMappingsToIntTask<K,V>
6038 (this, batch >>>= 1, baseLimit = h, f, tab,
6039 rights, transformer, r, reducer)).fork();
6040 }
6041 for (Node<K,V> p; (p = advance()) != null; )
6042 r = reducer.apply(r, transformer.apply(p.key, p.val));
6043 result = r;
6044 CountedCompleter<?> c;
6045 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6046 @SuppressWarnings("unchecked") MapReduceMappingsToIntTask<K,V>
6047 t = (MapReduceMappingsToIntTask<K,V>)c,
6048 s = t.rights;
6049 while (s != null) {
6050 t.result = reducer.apply(t.result, s.result);
6051 s = t.rights = s.nextRight;
6052 }
6053 }
6054 }
6055 }
6056 }
6057
6058 /* ---------------- Counters -------------- */
6059
6060 // Adapted from LongAdder and Striped64.
6061 // See their internal docs for explanation.
6062
6063 // A padded cell for distributing counts
6064 static final class CounterCell {
6065 volatile long p0, p1, p2, p3, p4, p5, p6;
6066 volatile long value;
6067 volatile long q0, q1, q2, q3, q4, q5, q6;
6068 CounterCell(long x) { value = x; }
6069 }
6070
6071 /**
6072 * Holder for the thread-local hash code determining which
6073 * CounterCell to use. The code is initialized via the
6074 * counterHashCodeGenerator, but may be moved upon collisions.
6075 */
6076 static final class CounterHashCode {
6077 int code;
6078 }
6079
6080 /**
6081 * Generates initial value for per-thread CounterHashCodes.
6082 */
6083 static final AtomicInteger counterHashCodeGenerator = new AtomicInteger();
6084
6085 /**
6086 * Increment for counterHashCodeGenerator. See class ThreadLocal
6087 * for explanation.
6088 */
6089 static final int SEED_INCREMENT = 0x61c88647;
6090
6091 /**
6092 * Per-thread counter hash codes. Shared across all instances.
6093 */
6094 static final ThreadLocal<CounterHashCode> threadCounterHashCode =
6095 new ThreadLocal<CounterHashCode>();
6096
6097
6098 final long sumCount() {
6099 CounterCell[] as = counterCells; CounterCell a;
6100 long sum = baseCount;
6101 if (as != null) {
6102 for (int i = 0; i < as.length; ++i) {
6103 if ((a = as[i]) != null)
6104 sum += a.value;
6105 }
6106 }
6107 return sum;
6108 }
6109
6110 // See LongAdder version for explanation
6111 private final void fullAddCount(long x, CounterHashCode hc,
6112 boolean wasUncontended) {
6113 int h;
6114 if (hc == null) {
6115 hc = new CounterHashCode();
6116 int s = counterHashCodeGenerator.addAndGet(SEED_INCREMENT);
6117 h = hc.code = (s == 0) ? 1 : s; // Avoid zero
6118 threadCounterHashCode.set(hc);
6119 }
6120 else
6121 h = hc.code;
6122 boolean collide = false; // True if last slot nonempty
6123 for (;;) {
6124 CounterCell[] as; CounterCell a; int n; long v;
6125 if ((as = counterCells) != null && (n = as.length) > 0) {
6126 if ((a = as[(n - 1) & h]) == null) {
6127 if (cellsBusy == 0) { // Try to attach new Cell
6128 CounterCell r = new CounterCell(x); // Optimistic create
6129 if (cellsBusy == 0 &&
6130 U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
6131 boolean created = false;
6132 try { // Recheck under lock
6133 CounterCell[] rs; int m, j;
6134 if ((rs = counterCells) != null &&
6135 (m = rs.length) > 0 &&
6136 rs[j = (m - 1) & h] == null) {
6137 rs[j] = r;
6138 created = true;
6139 }
6140 } finally {
6141 cellsBusy = 0;
6142 }
6143 if (created)
6144 break;
6145 continue; // Slot is now non-empty
6146 }
6147 }
6148 collide = false;
6149 }
6150 else if (!wasUncontended) // CAS already known to fail
6151 wasUncontended = true; // Continue after rehash
6152 else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))
6153 break;
6154 else if (counterCells != as || n >= NCPU)
6155 collide = false; // At max size or stale
6156 else if (!collide)
6157 collide = true;
6158 else if (cellsBusy == 0 &&
6159 U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
6160 try {
6161 if (counterCells == as) {// Expand table unless stale
6162 CounterCell[] rs = new CounterCell[n << 1];
6163 for (int i = 0; i < n; ++i)
6164 rs[i] = as[i];
6165 counterCells = rs;
6166 }
6167 } finally {
6168 cellsBusy = 0;
6169 }
6170 collide = false;
6171 continue; // Retry with expanded table
6172 }
6173 h ^= h << 13; // Rehash
6174 h ^= h >>> 17;
6175 h ^= h << 5;
6176 }
6177 else if (cellsBusy == 0 && counterCells == as &&
6178 U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
6179 boolean init = false;
6180 try { // Initialize table
6181 if (counterCells == as) {
6182 CounterCell[] rs = new CounterCell[2];
6183 rs[h & 1] = new CounterCell(x);
6184 counterCells = rs;
6185 init = true;
6186 }
6187 } finally {
6188 cellsBusy = 0;
6189 }
6190 if (init)
6191 break;
6192 }
6193 else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x))
6194 break; // Fall back on using base
6195 }
6196 hc.code = h; // Record index for next time
6197 }
6198
6199 // Unsafe mechanics
6200 private static final sun.misc.Unsafe U;
6201 private static final long SIZECTL;
6202 private static final long TRANSFERINDEX;
6203 private static final long BASECOUNT;
6204 private static final long CELLSBUSY;
6205 private static final long CELLVALUE;
6206 private static final long ABASE;
6207 private static final int ASHIFT;
6208
6209 static {
6210 try {
6211 U = getUnsafe();
6212 Class<?> k = ConcurrentHashMapV8.class;
6213 SIZECTL = U.objectFieldOffset
6214 (k.getDeclaredField("sizeCtl"));
6215 TRANSFERINDEX = U.objectFieldOffset
6216 (k.getDeclaredField("transferIndex"));
6217 BASECOUNT = U.objectFieldOffset
6218 (k.getDeclaredField("baseCount"));
6219 CELLSBUSY = U.objectFieldOffset
6220 (k.getDeclaredField("cellsBusy"));
6221 Class<?> ck = CounterCell.class;
6222 CELLVALUE = U.objectFieldOffset
6223 (ck.getDeclaredField("value"));
6224 Class<?> ak = Node[].class;
6225 ABASE = U.arrayBaseOffset(ak);
6226 int scale = U.arrayIndexScale(ak);
6227 if ((scale & (scale - 1)) != 0)
6228 throw new Error("data type scale not a power of two");
6229 ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
6230 } catch (Exception e) {
6231 throw new Error(e);
6232 }
6233 }
6234
6235 /**
6236 * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
6237 * Replace with a simple call to Unsafe.getUnsafe when integrating
6238 * into a jdk.
6239 *
6240 * @return a sun.misc.Unsafe
6241 */
6242 private static sun.misc.Unsafe getUnsafe() {
6243 try {
6244 return sun.misc.Unsafe.getUnsafe();
6245 } catch (SecurityException tryReflectionInstead) {}
6246 try {
6247 return java.security.AccessController.doPrivileged
6248 (new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
6249 public sun.misc.Unsafe run() throws Exception {
6250 Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
6251 for (java.lang.reflect.Field f : k.getDeclaredFields()) {
6252 f.setAccessible(true);
6253 Object x = f.get(null);
6254 if (k.isInstance(x))
6255 return k.cast(x);
6256 }
6257 throw new NoSuchFieldError("the Unsafe");
6258 }});
6259 } catch (java.security.PrivilegedActionException e) {
6260 throw new RuntimeException("Could not initialize intrinsics",
6261 e.getCause());
6262 }
6263 }
6264 }