ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/jsr166e/ConcurrentHashMapV8.java
Revision: 1.126
Committed: Mon Mar 7 23:55:31 2016 UTC (8 years, 1 month ago) by jsr166
Branch: MAIN
CVS Tags: HEAD
Changes since 1.125: +1 -1 lines
Log Message:
typo

File Contents

# Content
1 /*
2 * Written by Doug Lea with assistance from members of JCP JSR-166
3 * Expert Group and released to the public domain, as explained at
4 * http://creativecommons.org/publicdomain/zero/1.0/
5 */
6
7 package jsr166e;
8
9 import jsr166e.ForkJoinPool;
10
11 import java.io.ObjectStreamField;
12 import java.io.Serializable;
13 import java.lang.reflect.ParameterizedType;
14 import java.lang.reflect.Type;
15 import java.util.AbstractMap;
16 import java.util.Arrays;
17 import java.util.Collection;
18 import java.util.ConcurrentModificationException;
19 import java.util.Enumeration;
20 import java.util.HashMap;
21 import java.util.Hashtable;
22 import java.util.Iterator;
23 import java.util.Map;
24 import java.util.NoSuchElementException;
25 import java.util.Set;
26 import java.util.concurrent.ConcurrentMap;
27 import java.util.concurrent.atomic.AtomicReference;
28 import java.util.concurrent.atomic.AtomicInteger;
29 import java.util.concurrent.locks.LockSupport;
30 import java.util.concurrent.locks.ReentrantLock;
31
32 /**
33 * A hash table supporting full concurrency of retrievals and
34 * high expected concurrency for updates. This class obeys the
35 * same functional specification as {@link java.util.Hashtable}, and
36 * includes versions of methods corresponding to each method of
37 * {@code Hashtable}. However, even though all operations are
38 * thread-safe, retrieval operations do <em>not</em> entail locking,
39 * and there is <em>not</em> any support for locking the entire table
40 * in a way that prevents all access. This class is fully
41 * interoperable with {@code Hashtable} in programs that rely on its
42 * thread safety but not on its synchronization details.
43 *
44 * <p>Retrieval operations (including {@code get}) generally do not
45 * block, so may overlap with update operations (including {@code put}
46 * and {@code remove}). Retrievals reflect the results of the most
47 * recently <em>completed</em> update operations holding upon their
48 * onset. (More formally, an update operation for a given key bears a
49 * <em>happens-before</em> relation with any (non-null) retrieval for
50 * that key reporting the updated value.) For aggregate operations
51 * such as {@code putAll} and {@code clear}, concurrent retrievals may
52 * reflect insertion or removal of only some entries. Similarly,
53 * Iterators and Enumerations return elements reflecting the state of
54 * the hash table at some point at or since the creation of the
55 * iterator/enumeration. They do <em>not</em> throw {@link
56 * ConcurrentModificationException}. However, iterators are designed
57 * to be used by only one thread at a time. Bear in mind that the
58 * results of aggregate status methods including {@code size}, {@code
59 * isEmpty}, and {@code containsValue} are typically useful only when
60 * a map is not undergoing concurrent updates in other threads.
61 * Otherwise the results of these methods reflect transient states
62 * that may be adequate for monitoring or estimation purposes, but not
63 * for program control.
64 *
65 * <p>The table is dynamically expanded when there are too many
66 * collisions (i.e., keys that have distinct hash codes but fall into
67 * the same slot modulo the table size), with the expected average
68 * effect of maintaining roughly two bins per mapping (corresponding
69 * to a 0.75 load factor threshold for resizing). There may be much
70 * variance around this average as mappings are added and removed, but
71 * overall, this maintains a commonly accepted time/space tradeoff for
72 * hash tables. However, resizing this or any other kind of hash
73 * table may be a relatively slow operation. When possible, it is a
74 * good idea to provide a size estimate as an optional {@code
75 * initialCapacity} constructor argument. An additional optional
76 * {@code loadFactor} constructor argument provides a further means of
77 * customizing initial table capacity by specifying the table density
78 * to be used in calculating the amount of space to allocate for the
79 * given number of elements. Also, for compatibility with previous
80 * versions of this class, constructors may optionally specify an
81 * expected {@code concurrencyLevel} as an additional hint for
82 * internal sizing. Note that using many keys with exactly the same
83 * {@code hashCode()} is a sure way to slow down performance of any
84 * hash table. To ameliorate impact, when keys are {@link Comparable},
85 * this class may use comparison order among keys to help break ties.
86 *
87 * <p>A {@link Set} projection of a ConcurrentHashMapV8 may be created
88 * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
89 * (using {@link #keySet(Object)} when only keys are of interest, and the
90 * mapped values are (perhaps transiently) not used or all take the
91 * same mapping value.
92 *
93 * <p>This class and its views and iterators implement all of the
94 * <em>optional</em> methods of the {@link Map} and {@link Iterator}
95 * interfaces.
96 *
97 * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
98 * does <em>not</em> allow {@code null} to be used as a key or value.
99 *
100 * <p>ConcurrentHashMapV8s support a set of sequential and parallel bulk
101 * operations that are designed
102 * to be safely, and often sensibly, applied even with maps that are
103 * being concurrently updated by other threads; for example, when
104 * computing a snapshot summary of the values in a shared registry.
105 * There are three kinds of operation, each with four forms, accepting
106 * functions with Keys, Values, Entries, and (Key, Value) arguments
107 * and/or return values. Because the elements of a ConcurrentHashMapV8
108 * are not ordered in any particular way, and may be processed in
109 * different orders in different parallel executions, the correctness
110 * of supplied functions should not depend on any ordering, or on any
111 * other objects or values that may transiently change while
112 * computation is in progress; and except for forEach actions, should
113 * ideally be side-effect-free. Bulk operations on {@link java.util.Map.Entry}
114 * objects do not support method {@code setValue}.
115 *
116 * <ul>
117 * <li>forEach: Perform a given action on each element.
118 * A variant form applies a given transformation on each element
119 * before performing the action.
120 *
121 * <li>search: Return the first available non-null result of
122 * applying a given function on each element; skipping further
123 * search when a result is found.
124 *
125 * <li>reduce: Accumulate each element. The supplied reduction
126 * function cannot rely on ordering (more formally, it should be
127 * both associative and commutative). There are five variants:
128 *
129 * <ul>
130 *
131 * <li>Plain reductions. (There is not a form of this method for
132 * (key, value) function arguments since there is no corresponding
133 * return type.)
134 *
135 * <li>Mapped reductions that accumulate the results of a given
136 * function applied to each element.
137 *
138 * <li>Reductions to scalar doubles, longs, and ints, using a
139 * given basis value.
140 *
141 * </ul>
142 * </ul>
143 *
144 * <p>These bulk operations accept a {@code parallelismThreshold}
145 * argument. Methods proceed sequentially if the current map size is
146 * estimated to be less than the given threshold. Using a value of
147 * {@code Long.MAX_VALUE} suppresses all parallelism. Using a value
148 * of {@code 1} results in maximal parallelism by partitioning into
149 * enough subtasks to fully utilize the {@link
150 * ForkJoinPool#commonPool()} that is used for all parallel
151 * computations. Normally, you would initially choose one of these
152 * extreme values, and then measure performance of using in-between
153 * values that trade off overhead versus throughput.
154 *
155 * <p>The concurrency properties of bulk operations follow
156 * from those of ConcurrentHashMapV8: Any non-null result returned
157 * from {@code get(key)} and related access methods bears a
158 * happens-before relation with the associated insertion or
159 * update. The result of any bulk operation reflects the
160 * composition of these per-element relations (but is not
161 * necessarily atomic with respect to the map as a whole unless it
162 * is somehow known to be quiescent). Conversely, because keys
163 * and values in the map are never null, null serves as a reliable
164 * atomic indicator of the current lack of any result. To
165 * maintain this property, null serves as an implicit basis for
166 * all non-scalar reduction operations. For the double, long, and
167 * int versions, the basis should be one that, when combined with
168 * any other value, returns that other value (more formally, it
169 * should be the identity element for the reduction). Most common
170 * reductions have these properties; for example, computing a sum
171 * with basis 0 or a minimum with basis MAX_VALUE.
172 *
173 * <p>Search and transformation functions provided as arguments
174 * should similarly return null to indicate the lack of any result
175 * (in which case it is not used). In the case of mapped
176 * reductions, this also enables transformations to serve as
177 * filters, returning null (or, in the case of primitive
178 * specializations, the identity basis) if the element should not
179 * be combined. You can create compound transformations and
180 * filterings by composing them yourself under this "null means
181 * there is nothing there now" rule before using them in search or
182 * reduce operations.
183 *
184 * <p>Methods accepting and/or returning Entry arguments maintain
185 * key-value associations. They may be useful for example when
186 * finding the key for the greatest value. Note that "plain" Entry
187 * arguments can be supplied using {@code new
188 * AbstractMap.SimpleEntry(k,v)}.
189 *
190 * <p>Bulk operations may complete abruptly, throwing an
191 * exception encountered in the application of a supplied
192 * function. Bear in mind when handling such exceptions that other
193 * concurrently executing functions could also have thrown
194 * exceptions, or would have done so if the first exception had
195 * not occurred.
196 *
197 * <p>Speedups for parallel compared to sequential forms are common
198 * but not guaranteed. Parallel operations involving brief functions
199 * on small maps may execute more slowly than sequential forms if the
200 * underlying work to parallelize the computation is more expensive
201 * than the computation itself. Similarly, parallelization may not
202 * lead to much actual parallelism if all processors are busy
203 * performing unrelated tasks.
204 *
205 * <p>All arguments to all task methods must be non-null.
206 *
207 * <p><em>jsr166e note: During transition, this class
208 * uses nested functional interfaces with different names but the
209 * same forms as those expected for JDK8.</em>
210 *
211 * <p>This class is a member of the
212 * <a href="{@docRoot}/../technotes/guides/collections/index.html">
213 * Java Collections Framework</a>.
214 *
215 * @since 1.5
216 * @author Doug Lea
217 * @param <K> the type of keys maintained by this map
218 * @param <V> the type of mapped values
219 */
220 public class ConcurrentHashMapV8<K,V> extends AbstractMap<K,V>
221 implements ConcurrentMap<K,V>, Serializable {
222 private static final long serialVersionUID = 7249069246763182397L;
223
224 /**
225 * An object for traversing and partitioning elements of a source.
226 * This interface provides a subset of the functionality of JDK8
227 * java.util.Spliterator.
228 */
229 public static interface ConcurrentHashMapSpliterator<T> {
230 /**
231 * If possible, returns a new spliterator covering
232 * approximately one half of the elements, which will not be
233 * covered by this spliterator. Returns null if cannot be
234 * split.
235 */
236 ConcurrentHashMapSpliterator<T> trySplit();
237 /**
238 * Returns an estimate of the number of elements covered by
239 * this Spliterator.
240 */
241 long estimateSize();
242
243 /** Applies the action to each untraversed element */
244 void forEachRemaining(Action<? super T> action);
245 /** If an element remains, applies the action and returns true. */
246 boolean tryAdvance(Action<? super T> action);
247 }
248
249 // Sams
250 /** Interface describing a void action of one argument */
251 public interface Action<A> { void apply(A a); }
252 /** Interface describing a void action of two arguments */
253 public interface BiAction<A,B> { void apply(A a, B b); }
254 /** Interface describing a function of one argument */
255 public interface Fun<A,T> { T apply(A a); }
256 /** Interface describing a function of two arguments */
257 public interface BiFun<A,B,T> { T apply(A a, B b); }
258 /** Interface describing a function mapping its argument to a double */
259 public interface ObjectToDouble<A> { double apply(A a); }
260 /** Interface describing a function mapping its argument to a long */
261 public interface ObjectToLong<A> { long apply(A a); }
262 /** Interface describing a function mapping its argument to an int */
263 public interface ObjectToInt<A> {int apply(A a); }
264 /** Interface describing a function mapping two arguments to a double */
265 public interface ObjectByObjectToDouble<A,B> { double apply(A a, B b); }
266 /** Interface describing a function mapping two arguments to a long */
267 public interface ObjectByObjectToLong<A,B> { long apply(A a, B b); }
268 /** Interface describing a function mapping two arguments to an int */
269 public interface ObjectByObjectToInt<A,B> {int apply(A a, B b); }
270 /** Interface describing a function mapping two doubles to a double */
271 public interface DoubleByDoubleToDouble { double apply(double a, double b); }
272 /** Interface describing a function mapping two longs to a long */
273 public interface LongByLongToLong { long apply(long a, long b); }
274 /** Interface describing a function mapping two ints to an int */
275 public interface IntByIntToInt { int apply(int a, int b); }
276
277
278 /*
279 * Overview:
280 *
281 * The primary design goal of this hash table is to maintain
282 * concurrent readability (typically method get(), but also
283 * iterators and related methods) while minimizing update
284 * contention. Secondary goals are to keep space consumption about
285 * the same or better than java.util.HashMap, and to support high
286 * initial insertion rates on an empty table by many threads.
287 *
288 * This map usually acts as a binned (bucketed) hash table. Each
289 * key-value mapping is held in a Node. Most nodes are instances
290 * of the basic Node class with hash, key, value, and next
291 * fields. However, various subclasses exist: TreeNodes are
292 * arranged in balanced trees, not lists. TreeBins hold the roots
293 * of sets of TreeNodes. ForwardingNodes are placed at the heads
294 * of bins during resizing. ReservationNodes are used as
295 * placeholders while establishing values in computeIfAbsent and
296 * related methods. The types TreeBin, ForwardingNode, and
297 * ReservationNode do not hold normal user keys, values, or
298 * hashes, and are readily distinguishable during search etc
299 * because they have negative hash fields and null key and value
300 * fields. (These special nodes are either uncommon or transient,
301 * so the impact of carrying around some unused fields is
302 * insignificant.)
303 *
304 * The table is lazily initialized to a power-of-two size upon the
305 * first insertion. Each bin in the table normally contains a
306 * list of Nodes (most often, the list has only zero or one Node).
307 * Table accesses require volatile/atomic reads, writes, and
308 * CASes. Because there is no other way to arrange this without
309 * adding further indirections, we use intrinsics
310 * (sun.misc.Unsafe) operations.
311 *
312 * We use the top (sign) bit of Node hash fields for control
313 * purposes -- it is available anyway because of addressing
314 * constraints. Nodes with negative hash fields are specially
315 * handled or ignored in map methods.
316 *
317 * Insertion (via put or its variants) of the first node in an
318 * empty bin is performed by just CASing it to the bin. This is
319 * by far the most common case for put operations under most
320 * key/hash distributions. Other update operations (insert,
321 * delete, and replace) require locks. We do not want to waste
322 * the space required to associate a distinct lock object with
323 * each bin, so instead use the first node of a bin list itself as
324 * a lock. Locking support for these locks relies on builtin
325 * "synchronized" monitors.
326 *
327 * Using the first node of a list as a lock does not by itself
328 * suffice though: When a node is locked, any update must first
329 * validate that it is still the first node after locking it, and
330 * retry if not. Because new nodes are always appended to lists,
331 * once a node is first in a bin, it remains first until deleted
332 * or the bin becomes invalidated (upon resizing).
333 *
334 * The main disadvantage of per-bin locks is that other update
335 * operations on other nodes in a bin list protected by the same
336 * lock can stall, for example when user equals() or mapping
337 * functions take a long time. However, statistically, under
338 * random hash codes, this is not a common problem. Ideally, the
339 * frequency of nodes in bins follows a Poisson distribution
340 * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
341 * parameter of about 0.5 on average, given the resizing threshold
342 * of 0.75, although with a large variance because of resizing
343 * granularity. Ignoring variance, the expected occurrences of
344 * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
345 * first values are:
346 *
347 * 0: 0.60653066
348 * 1: 0.30326533
349 * 2: 0.07581633
350 * 3: 0.01263606
351 * 4: 0.00157952
352 * 5: 0.00015795
353 * 6: 0.00001316
354 * 7: 0.00000094
355 * 8: 0.00000006
356 * more: less than 1 in ten million
357 *
358 * Lock contention probability for two threads accessing distinct
359 * elements is roughly 1 / (8 * #elements) under random hashes.
360 *
361 * Actual hash code distributions encountered in practice
362 * sometimes deviate significantly from uniform randomness. This
363 * includes the case when N > (1<<30), so some keys MUST collide.
364 * Similarly for dumb or hostile usages in which multiple keys are
365 * designed to have identical hash codes or ones that differs only
366 * in masked-out high bits. So we use a secondary strategy that
367 * applies when the number of nodes in a bin exceeds a
368 * threshold. These TreeBins use a balanced tree to hold nodes (a
369 * specialized form of red-black trees), bounding search time to
370 * O(log N). Each search step in a TreeBin is at least twice as
371 * slow as in a regular list, but given that N cannot exceed
372 * (1<<64) (before running out of addresses) this bounds search
373 * steps, lock hold times, etc, to reasonable constants (roughly
374 * 100 nodes inspected per operation worst case) so long as keys
375 * are Comparable (which is very common -- String, Long, etc).
376 * TreeBin nodes (TreeNodes) also maintain the same "next"
377 * traversal pointers as regular nodes, so can be traversed in
378 * iterators in the same way.
379 *
380 * The table is resized when occupancy exceeds a percentage
381 * threshold (nominally, 0.75, but see below). Any thread
382 * noticing an overfull bin may assist in resizing after the
383 * initiating thread allocates and sets up the replacement array.
384 * However, rather than stalling, these other threads may proceed
385 * with insertions etc. The use of TreeBins shields us from the
386 * worst case effects of overfilling while resizes are in
387 * progress. Resizing proceeds by transferring bins, one by one,
388 * from the table to the next table. However, threads claim small
389 * blocks of indices to transfer (via field transferIndex) before
390 * doing so, reducing contention. A generation stamp in field
391 * sizeCtl ensures that resizings do not overlap. Because we are
392 * using power-of-two expansion, the elements from each bin must
393 * either stay at same index, or move with a power of two
394 * offset. We eliminate unnecessary node creation by catching
395 * cases where old nodes can be reused because their next fields
396 * won't change. On average, only about one-sixth of them need
397 * cloning when a table doubles. The nodes they replace will be
398 * garbage collectable as soon as they are no longer referenced by
399 * any reader thread that may be in the midst of concurrently
400 * traversing table. Upon transfer, the old table bin contains
401 * only a special forwarding node (with hash field "MOVED") that
402 * contains the next table as its key. On encountering a
403 * forwarding node, access and update operations restart, using
404 * the new table.
405 *
406 * Each bin transfer requires its bin lock, which can stall
407 * waiting for locks while resizing. However, because other
408 * threads can join in and help resize rather than contend for
409 * locks, average aggregate waits become shorter as resizing
410 * progresses. The transfer operation must also ensure that all
411 * accessible bins in both the old and new table are usable by any
412 * traversal. This is arranged in part by proceeding from the
413 * last bin (table.length - 1) up towards the first. Upon seeing
414 * a forwarding node, traversals (see class Traverser) arrange to
415 * move to the new table without revisiting nodes. To ensure that
416 * no intervening nodes are skipped even when moved out of order,
417 * a stack (see class TableStack) is created on first encounter of
418 * a forwarding node during a traversal, to maintain its place if
419 * later processing the current table. The need for these
420 * save/restore mechanics is relatively rare, but when one
421 * forwarding node is encountered, typically many more will be.
422 * So Traversers use a simple caching scheme to avoid creating so
423 * many new TableStack nodes. (Thanks to Peter Levart for
424 * suggesting use of a stack here.)
425 *
426 * The traversal scheme also applies to partial traversals of
427 * ranges of bins (via an alternate Traverser constructor)
428 * to support partitioned aggregate operations. Also, read-only
429 * operations give up if ever forwarded to a null table, which
430 * provides support for shutdown-style clearing, which is also not
431 * currently implemented.
432 *
433 * Lazy table initialization minimizes footprint until first use,
434 * and also avoids resizings when the first operation is from a
435 * putAll, constructor with map argument, or deserialization.
436 * These cases attempt to override the initial capacity settings,
437 * but harmlessly fail to take effect in cases of races.
438 *
439 * The element count is maintained using a specialization of
440 * LongAdder. We need to incorporate a specialization rather than
441 * just use a LongAdder in order to access implicit
442 * contention-sensing that leads to creation of multiple
443 * CounterCells. The counter mechanics avoid contention on
444 * updates but can encounter cache thrashing if read too
445 * frequently during concurrent access. To avoid reading so often,
446 * resizing under contention is attempted only upon adding to a
447 * bin already holding two or more nodes. Under uniform hash
448 * distributions, the probability of this occurring at threshold
449 * is around 13%, meaning that only about 1 in 8 puts check
450 * threshold (and after resizing, many fewer do so).
451 *
452 * TreeBins use a special form of comparison for search and
453 * related operations (which is the main reason we cannot use
454 * existing collections such as TreeMaps). TreeBins contain
455 * Comparable elements, but may contain others, as well as
456 * elements that are Comparable but not necessarily Comparable for
457 * the same T, so we cannot invoke compareTo among them. To handle
458 * this, the tree is ordered primarily by hash value, then by
459 * Comparable.compareTo order if applicable. On lookup at a node,
460 * if elements are not comparable or compare as 0 then both left
461 * and right children may need to be searched in the case of tied
462 * hash values. (This corresponds to the full list search that
463 * would be necessary if all elements were non-Comparable and had
464 * tied hashes.) On insertion, to keep a total ordering (or as
465 * close as is required here) across rebalancings, we compare
466 * classes and identityHashCodes as tie-breakers. The red-black
467 * balancing code is updated from pre-jdk-collections
468 * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
469 * based in turn on Cormen, Leiserson, and Rivest "Introduction to
470 * Algorithms" (CLR).
471 *
472 * TreeBins also require an additional locking mechanism. While
473 * list traversal is always possible by readers even during
474 * updates, tree traversal is not, mainly because of tree-rotations
475 * that may change the root node and/or its linkages. TreeBins
476 * include a simple read-write lock mechanism parasitic on the
477 * main bin-synchronization strategy: Structural adjustments
478 * associated with an insertion or removal are already bin-locked
479 * (and so cannot conflict with other writers) but must wait for
480 * ongoing readers to finish. Since there can be only one such
481 * waiter, we use a simple scheme using a single "waiter" field to
482 * block writers. However, readers need never block. If the root
483 * lock is held, they proceed along the slow traversal path (via
484 * next-pointers) until the lock becomes available or the list is
485 * exhausted, whichever comes first. These cases are not fast, but
486 * maximize aggregate expected throughput.
487 *
488 * Maintaining API and serialization compatibility with previous
489 * versions of this class introduces several oddities. Mainly: We
490 * leave untouched but unused constructor arguments referring to
491 * concurrencyLevel. We accept a loadFactor constructor argument,
492 * but apply it only to initial table capacity (which is the only
493 * time that we can guarantee to honor it.) We also declare an
494 * unused "Segment" class that is instantiated in minimal form
495 * only when serializing.
496 *
497 * Also, solely for compatibility with previous versions of this
498 * class, it extends AbstractMap, even though all of its methods
499 * are overridden, so it is just useless baggage.
500 *
501 * This file is organized to make things a little easier to follow
502 * while reading than they might otherwise: First the main static
503 * declarations and utilities, then fields, then main public
504 * methods (with a few factorings of multiple public methods into
505 * internal ones), then sizing methods, trees, traversers, and
506 * bulk operations.
507 */
508
509 /* ---------------- Constants -------------- */
510
511 /**
512 * The largest possible table capacity. This value must be
513 * exactly 1<<30 to stay within Java array allocation and indexing
514 * bounds for power of two table sizes, and is further required
515 * because the top two bits of 32bit hash fields are used for
516 * control purposes.
517 */
518 private static final int MAXIMUM_CAPACITY = 1 << 30;
519
520 /**
521 * The default initial table capacity. Must be a power of 2
522 * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
523 */
524 private static final int DEFAULT_CAPACITY = 16;
525
526 /**
527 * The largest possible (non-power of two) array size.
528 * Needed by toArray and related methods.
529 */
530 static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
531
532 /**
533 * The default concurrency level for this table. Unused but
534 * defined for compatibility with previous versions of this class.
535 */
536 private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
537
538 /**
539 * The load factor for this table. Overrides of this value in
540 * constructors affect only the initial table capacity. The
541 * actual floating point value isn't normally used -- it is
542 * simpler to use expressions such as {@code n - (n >>> 2)} for
543 * the associated resizing threshold.
544 */
545 private static final float LOAD_FACTOR = 0.75f;
546
547 /**
548 * The bin count threshold for using a tree rather than list for a
549 * bin. Bins are converted to trees when adding an element to a
550 * bin with at least this many nodes. The value must be greater
551 * than 2, and should be at least 8 to mesh with assumptions in
552 * tree removal about conversion back to plain bins upon
553 * shrinkage.
554 */
555 static final int TREEIFY_THRESHOLD = 8;
556
557 /**
558 * The bin count threshold for untreeifying a (split) bin during a
559 * resize operation. Should be less than TREEIFY_THRESHOLD, and at
560 * most 6 to mesh with shrinkage detection under removal.
561 */
562 static final int UNTREEIFY_THRESHOLD = 6;
563
564 /**
565 * The smallest table capacity for which bins may be treeified.
566 * (Otherwise the table is resized if too many nodes in a bin.)
567 * The value should be at least 4 * TREEIFY_THRESHOLD to avoid
568 * conflicts between resizing and treeification thresholds.
569 */
570 static final int MIN_TREEIFY_CAPACITY = 64;
571
572 /**
573 * Minimum number of rebinnings per transfer step. Ranges are
574 * subdivided to allow multiple resizer threads. This value
575 * serves as a lower bound to avoid resizers encountering
576 * excessive memory contention. The value should be at least
577 * DEFAULT_CAPACITY.
578 */
579 private static final int MIN_TRANSFER_STRIDE = 16;
580
581 /**
582 * The number of bits used for generation stamp in sizeCtl.
583 * Must be at least 6 for 32bit arrays.
584 */
585 private static int RESIZE_STAMP_BITS = 16;
586
587 /**
588 * The maximum number of threads that can help resize.
589 * Must fit in 32 - RESIZE_STAMP_BITS bits.
590 */
591 private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;
592
593 /**
594 * The bit shift for recording size stamp in sizeCtl.
595 */
596 private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;
597
598 /*
599 * Encodings for Node hash fields. See above for explanation.
600 */
601 static final int MOVED = -1; // hash for forwarding nodes
602 static final int TREEBIN = -2; // hash for roots of trees
603 static final int RESERVED = -3; // hash for transient reservations
604 static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
605
606 /** Number of CPUS, to place bounds on some sizings */
607 static final int NCPU = Runtime.getRuntime().availableProcessors();
608
609 /** For serialization compatibility. */
610 private static final ObjectStreamField[] serialPersistentFields = {
611 new ObjectStreamField("segments", Segment[].class),
612 new ObjectStreamField("segmentMask", Integer.TYPE),
613 new ObjectStreamField("segmentShift", Integer.TYPE)
614 };
615
616 /* ---------------- Nodes -------------- */
617
618 /**
619 * Key-value entry. This class is never exported out as a
620 * user-mutable Map.Entry (i.e., one supporting setValue; see
621 * MapEntry below), but can be used for read-only traversals used
622 * in bulk tasks. Subclasses of Node with a negative hash field
623 * are special, and contain null keys and values (but are never
624 * exported). Otherwise, keys and vals are never null.
625 */
626 static class Node<K,V> implements Map.Entry<K,V> {
627 final int hash;
628 final K key;
629 volatile V val;
630 volatile Node<K,V> next;
631
632 Node(int hash, K key, V val, Node<K,V> next) {
633 this.hash = hash;
634 this.key = key;
635 this.val = val;
636 this.next = next;
637 }
638
639 public final K getKey() { return key; }
640 public final V getValue() { return val; }
641 public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
642 public final String toString() { return key + "=" + val; }
643 public final V setValue(V value) {
644 throw new UnsupportedOperationException();
645 }
646
647 public final boolean equals(Object o) {
648 Object k, v, u; Map.Entry<?,?> e;
649 return ((o instanceof Map.Entry) &&
650 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
651 (v = e.getValue()) != null &&
652 (k == key || k.equals(key)) &&
653 (v == (u = val) || v.equals(u)));
654 }
655
656 /**
657 * Virtualized support for map.get(); overridden in subclasses.
658 */
659 Node<K,V> find(int h, Object k) {
660 Node<K,V> e = this;
661 if (k != null) {
662 do {
663 K ek;
664 if (e.hash == h &&
665 ((ek = e.key) == k || (ek != null && k.equals(ek))))
666 return e;
667 } while ((e = e.next) != null);
668 }
669 return null;
670 }
671 }
672
673 /* ---------------- Static utilities -------------- */
674
675 /**
676 * Spreads (XORs) higher bits of hash to lower and also forces top
677 * bit to 0. Because the table uses power-of-two masking, sets of
678 * hashes that vary only in bits above the current mask will
679 * always collide. (Among known examples are sets of Float keys
680 * holding consecutive whole numbers in small tables.) So we
681 * apply a transform that spreads the impact of higher bits
682 * downward. There is a tradeoff between speed, utility, and
683 * quality of bit-spreading. Because many common sets of hashes
684 * are already reasonably distributed (so don't benefit from
685 * spreading), and because we use trees to handle large sets of
686 * collisions in bins, we just XOR some shifted bits in the
687 * cheapest possible way to reduce systematic lossage, as well as
688 * to incorporate impact of the highest bits that would otherwise
689 * never be used in index calculations because of table bounds.
690 */
691 static final int spread(int h) {
692 return (h ^ (h >>> 16)) & HASH_BITS;
693 }
694
695 /**
696 * Returns a power of two table size for the given desired capacity.
697 * See Hackers Delight, sec 3.2
698 */
699 private static final int tableSizeFor(int c) {
700 int n = c - 1;
701 n |= n >>> 1;
702 n |= n >>> 2;
703 n |= n >>> 4;
704 n |= n >>> 8;
705 n |= n >>> 16;
706 return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
707 }
708
709 /**
710 * Returns x's Class if it is of the form "class C implements
711 * Comparable<C>", else null.
712 */
713 static Class<?> comparableClassFor(Object x) {
714 if (x instanceof Comparable) {
715 Class<?> c; Type[] ts, as; Type t; ParameterizedType p;
716 if ((c = x.getClass()) == String.class) // bypass checks
717 return c;
718 if ((ts = c.getGenericInterfaces()) != null) {
719 for (int i = 0; i < ts.length; ++i) {
720 if (((t = ts[i]) instanceof ParameterizedType) &&
721 ((p = (ParameterizedType)t).getRawType() ==
722 Comparable.class) &&
723 (as = p.getActualTypeArguments()) != null &&
724 as.length == 1 && as[0] == c) // type arg is c
725 return c;
726 }
727 }
728 }
729 return null;
730 }
731
732 /**
733 * Returns k.compareTo(x) if x matches kc (k's screened comparable
734 * class), else 0.
735 */
736 @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
737 static int compareComparables(Class<?> kc, Object k, Object x) {
738 return (x == null || x.getClass() != kc ? 0 :
739 ((Comparable)k).compareTo(x));
740 }
741
742 /* ---------------- Table element access -------------- */
743
744 /*
745 * Volatile access methods are used for table elements as well as
746 * elements of in-progress next table while resizing. All uses of
747 * the tab arguments must be null checked by callers. All callers
748 * also paranoically precheck that tab's length is not zero (or an
749 * equivalent check), thus ensuring that any index argument taking
750 * the form of a hash value anded with (length - 1) is a valid
751 * index. Note that, to be correct wrt arbitrary concurrency
752 * errors by users, these checks must operate on local variables,
753 * which accounts for some odd-looking inline assignments below.
754 * Note that calls to setTabAt always occur within locked regions,
755 * and so in principle require only release ordering, not
756 * full volatile semantics, but are currently coded as volatile
757 * writes to be conservative.
758 */
759
760 @SuppressWarnings("unchecked")
761 static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) {
762 return (Node<K,V>)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE);
763 }
764
765 static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i,
766 Node<K,V> c, Node<K,V> v) {
767 return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
768 }
769
770 static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) {
771 U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v);
772 }
773
774 /* ---------------- Fields -------------- */
775
776 /**
777 * The array of bins. Lazily initialized upon first insertion.
778 * Size is always a power of two. Accessed directly by iterators.
779 */
780 transient volatile Node<K,V>[] table;
781
782 /**
783 * The next table to use; non-null only while resizing.
784 */
785 private transient volatile Node<K,V>[] nextTable;
786
787 /**
788 * Base counter value, used mainly when there is no contention,
789 * but also as a fallback during table initialization
790 * races. Updated via CAS.
791 */
792 private transient volatile long baseCount;
793
794 /**
795 * Table initialization and resizing control. When negative, the
796 * table is being initialized or resized: -1 for initialization,
797 * else -(1 + the number of active resizing threads). Otherwise,
798 * when table is null, holds the initial table size to use upon
799 * creation, or 0 for default. After initialization, holds the
800 * next element count value upon which to resize the table.
801 */
802 private transient volatile int sizeCtl;
803
804 /**
805 * The next table index (plus one) to split while resizing.
806 */
807 private transient volatile int transferIndex;
808
809 /**
810 * Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
811 */
812 private transient volatile int cellsBusy;
813
814 /**
815 * Table of counter cells. When non-null, size is a power of 2.
816 */
817 private transient volatile CounterCell[] counterCells;
818
819 // views
820 private transient KeySetView<K,V> keySet;
821 private transient ValuesView<K,V> values;
822 private transient EntrySetView<K,V> entrySet;
823
824
825 /* ---------------- Public operations -------------- */
826
827 /**
828 * Creates a new, empty map with the default initial table size (16).
829 */
830 public ConcurrentHashMapV8() {
831 }
832
833 /**
834 * Creates a new, empty map with an initial table size
835 * accommodating the specified number of elements without the need
836 * to dynamically resize.
837 *
838 * @param initialCapacity The implementation performs internal
839 * sizing to accommodate this many elements.
840 * @throws IllegalArgumentException if the initial capacity of
841 * elements is negative
842 */
843 public ConcurrentHashMapV8(int initialCapacity) {
844 if (initialCapacity < 0)
845 throw new IllegalArgumentException();
846 int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
847 MAXIMUM_CAPACITY :
848 tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
849 this.sizeCtl = cap;
850 }
851
852 /**
853 * Creates a new map with the same mappings as the given map.
854 *
855 * @param m the map
856 */
857 public ConcurrentHashMapV8(Map<? extends K, ? extends V> m) {
858 this.sizeCtl = DEFAULT_CAPACITY;
859 putAll(m);
860 }
861
862 /**
863 * Creates a new, empty map with an initial table size based on
864 * the given number of elements ({@code initialCapacity}) and
865 * initial table density ({@code loadFactor}).
866 *
867 * @param initialCapacity the initial capacity. The implementation
868 * performs internal sizing to accommodate this many elements,
869 * given the specified load factor.
870 * @param loadFactor the load factor (table density) for
871 * establishing the initial table size
872 * @throws IllegalArgumentException if the initial capacity of
873 * elements is negative or the load factor is nonpositive
874 *
875 * @since 1.6
876 */
877 public ConcurrentHashMapV8(int initialCapacity, float loadFactor) {
878 this(initialCapacity, loadFactor, 1);
879 }
880
881 /**
882 * Creates a new, empty map with an initial table size based on
883 * the given number of elements ({@code initialCapacity}), table
884 * density ({@code loadFactor}), and number of concurrently
885 * updating threads ({@code concurrencyLevel}).
886 *
887 * @param initialCapacity the initial capacity. The implementation
888 * performs internal sizing to accommodate this many elements,
889 * given the specified load factor.
890 * @param loadFactor the load factor (table density) for
891 * establishing the initial table size
892 * @param concurrencyLevel the estimated number of concurrently
893 * updating threads. The implementation may use this value as
894 * a sizing hint.
895 * @throws IllegalArgumentException if the initial capacity is
896 * negative or the load factor or concurrencyLevel are
897 * nonpositive
898 */
899 public ConcurrentHashMapV8(int initialCapacity,
900 float loadFactor, int concurrencyLevel) {
901 if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
902 throw new IllegalArgumentException();
903 if (initialCapacity < concurrencyLevel) // Use at least as many bins
904 initialCapacity = concurrencyLevel; // as estimated threads
905 long size = (long)(1.0 + (long)initialCapacity / loadFactor);
906 int cap = (size >= (long)MAXIMUM_CAPACITY) ?
907 MAXIMUM_CAPACITY : tableSizeFor((int)size);
908 this.sizeCtl = cap;
909 }
910
911 // Original (since JDK1.2) Map methods
912
913 /**
914 * {@inheritDoc}
915 */
916 public int size() {
917 long n = sumCount();
918 return ((n < 0L) ? 0 :
919 (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
920 (int)n);
921 }
922
923 /**
924 * {@inheritDoc}
925 */
926 public boolean isEmpty() {
927 return sumCount() <= 0L; // ignore transient negative values
928 }
929
930 /**
931 * Returns the value to which the specified key is mapped,
932 * or {@code null} if this map contains no mapping for the key.
933 *
934 * <p>More formally, if this map contains a mapping from a key
935 * {@code k} to a value {@code v} such that {@code key.equals(k)},
936 * then this method returns {@code v}; otherwise it returns
937 * {@code null}. (There can be at most one such mapping.)
938 *
939 * @throws NullPointerException if the specified key is null
940 */
941 public V get(Object key) {
942 Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
943 int h = spread(key.hashCode());
944 if ((tab = table) != null && (n = tab.length) > 0 &&
945 (e = tabAt(tab, (n - 1) & h)) != null) {
946 if ((eh = e.hash) == h) {
947 if ((ek = e.key) == key || (ek != null && key.equals(ek)))
948 return e.val;
949 }
950 else if (eh < 0)
951 return (p = e.find(h, key)) != null ? p.val : null;
952 while ((e = e.next) != null) {
953 if (e.hash == h &&
954 ((ek = e.key) == key || (ek != null && key.equals(ek))))
955 return e.val;
956 }
957 }
958 return null;
959 }
960
961 /**
962 * Tests if the specified object is a key in this table.
963 *
964 * @param key possible key
965 * @return {@code true} if and only if the specified object
966 * is a key in this table, as determined by the
967 * {@code equals} method; {@code false} otherwise
968 * @throws NullPointerException if the specified key is null
969 */
970 public boolean containsKey(Object key) {
971 return get(key) != null;
972 }
973
974 /**
975 * Returns {@code true} if this map maps one or more keys to the
976 * specified value. Note: This method may require a full traversal
977 * of the map, and is much slower than method {@code containsKey}.
978 *
979 * @param value value whose presence in this map is to be tested
980 * @return {@code true} if this map maps one or more keys to the
981 * specified value
982 * @throws NullPointerException if the specified value is null
983 */
984 public boolean containsValue(Object value) {
985 if (value == null)
986 throw new NullPointerException();
987 Node<K,V>[] t;
988 if ((t = table) != null) {
989 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
990 for (Node<K,V> p; (p = it.advance()) != null; ) {
991 V v;
992 if ((v = p.val) == value || (v != null && value.equals(v)))
993 return true;
994 }
995 }
996 return false;
997 }
998
999 /**
1000 * Maps the specified key to the specified value in this table.
1001 * Neither the key nor the value can be null.
1002 *
1003 * <p>The value can be retrieved by calling the {@code get} method
1004 * with a key that is equal to the original key.
1005 *
1006 * @param key key with which the specified value is to be associated
1007 * @param value value to be associated with the specified key
1008 * @return the previous value associated with {@code key}, or
1009 * {@code null} if there was no mapping for {@code key}
1010 * @throws NullPointerException if the specified key or value is null
1011 */
1012 public V put(K key, V value) {
1013 return putVal(key, value, false);
1014 }
1015
1016 /** Implementation for put and putIfAbsent */
1017 final V putVal(K key, V value, boolean onlyIfAbsent) {
1018 if (key == null || value == null) throw new NullPointerException();
1019 int hash = spread(key.hashCode());
1020 int binCount = 0;
1021 for (Node<K,V>[] tab = table;;) {
1022 Node<K,V> f; int n, i, fh;
1023 if (tab == null || (n = tab.length) == 0)
1024 tab = initTable();
1025 else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
1026 if (casTabAt(tab, i, null,
1027 new Node<K,V>(hash, key, value, null)))
1028 break; // no lock when adding to empty bin
1029 }
1030 else if ((fh = f.hash) == MOVED)
1031 tab = helpTransfer(tab, f);
1032 else {
1033 V oldVal = null;
1034 synchronized (f) {
1035 if (tabAt(tab, i) == f) {
1036 if (fh >= 0) {
1037 binCount = 1;
1038 for (Node<K,V> e = f;; ++binCount) {
1039 K ek;
1040 if (e.hash == hash &&
1041 ((ek = e.key) == key ||
1042 (ek != null && key.equals(ek)))) {
1043 oldVal = e.val;
1044 if (!onlyIfAbsent)
1045 e.val = value;
1046 break;
1047 }
1048 Node<K,V> pred = e;
1049 if ((e = e.next) == null) {
1050 pred.next = new Node<K,V>(hash, key,
1051 value, null);
1052 break;
1053 }
1054 }
1055 }
1056 else if (f instanceof TreeBin) {
1057 Node<K,V> p;
1058 binCount = 2;
1059 if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
1060 value)) != null) {
1061 oldVal = p.val;
1062 if (!onlyIfAbsent)
1063 p.val = value;
1064 }
1065 }
1066 }
1067 }
1068 if (binCount != 0) {
1069 if (binCount >= TREEIFY_THRESHOLD)
1070 treeifyBin(tab, i);
1071 if (oldVal != null)
1072 return oldVal;
1073 break;
1074 }
1075 }
1076 }
1077 addCount(1L, binCount);
1078 return null;
1079 }
1080
1081 /**
1082 * Copies all of the mappings from the specified map to this one.
1083 * These mappings replace any mappings that this map had for any of the
1084 * keys currently in the specified map.
1085 *
1086 * @param m mappings to be stored in this map
1087 */
1088 public void putAll(Map<? extends K, ? extends V> m) {
1089 tryPresize(m.size());
1090 for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
1091 putVal(e.getKey(), e.getValue(), false);
1092 }
1093
1094 /**
1095 * Removes the key (and its corresponding value) from this map.
1096 * This method does nothing if the key is not in the map.
1097 *
1098 * @param key the key that needs to be removed
1099 * @return the previous value associated with {@code key}, or
1100 * {@code null} if there was no mapping for {@code key}
1101 * @throws NullPointerException if the specified key is null
1102 */
1103 public V remove(Object key) {
1104 return replaceNode(key, null, null);
1105 }
1106
1107 /**
1108 * Implementation for the four public remove/replace methods:
1109 * Replaces node value with v, conditional upon match of cv if
1110 * non-null. If resulting value is null, delete.
1111 */
1112 final V replaceNode(Object key, V value, Object cv) {
1113 int hash = spread(key.hashCode());
1114 for (Node<K,V>[] tab = table;;) {
1115 Node<K,V> f; int n, i, fh;
1116 if (tab == null || (n = tab.length) == 0 ||
1117 (f = tabAt(tab, i = (n - 1) & hash)) == null)
1118 break;
1119 else if ((fh = f.hash) == MOVED)
1120 tab = helpTransfer(tab, f);
1121 else {
1122 V oldVal = null;
1123 boolean validated = false;
1124 synchronized (f) {
1125 if (tabAt(tab, i) == f) {
1126 if (fh >= 0) {
1127 validated = true;
1128 for (Node<K,V> e = f, pred = null;;) {
1129 K ek;
1130 if (e.hash == hash &&
1131 ((ek = e.key) == key ||
1132 (ek != null && key.equals(ek)))) {
1133 V ev = e.val;
1134 if (cv == null || cv == ev ||
1135 (ev != null && cv.equals(ev))) {
1136 oldVal = ev;
1137 if (value != null)
1138 e.val = value;
1139 else if (pred != null)
1140 pred.next = e.next;
1141 else
1142 setTabAt(tab, i, e.next);
1143 }
1144 break;
1145 }
1146 pred = e;
1147 if ((e = e.next) == null)
1148 break;
1149 }
1150 }
1151 else if (f instanceof TreeBin) {
1152 validated = true;
1153 TreeBin<K,V> t = (TreeBin<K,V>)f;
1154 TreeNode<K,V> r, p;
1155 if ((r = t.root) != null &&
1156 (p = r.findTreeNode(hash, key, null)) != null) {
1157 V pv = p.val;
1158 if (cv == null || cv == pv ||
1159 (pv != null && cv.equals(pv))) {
1160 oldVal = pv;
1161 if (value != null)
1162 p.val = value;
1163 else if (t.removeTreeNode(p))
1164 setTabAt(tab, i, untreeify(t.first));
1165 }
1166 }
1167 }
1168 }
1169 }
1170 if (validated) {
1171 if (oldVal != null) {
1172 if (value == null)
1173 addCount(-1L, -1);
1174 return oldVal;
1175 }
1176 break;
1177 }
1178 }
1179 }
1180 return null;
1181 }
1182
1183 /**
1184 * Removes all of the mappings from this map.
1185 */
1186 public void clear() {
1187 long delta = 0L; // negative number of deletions
1188 int i = 0;
1189 Node<K,V>[] tab = table;
1190 while (tab != null && i < tab.length) {
1191 int fh;
1192 Node<K,V> f = tabAt(tab, i);
1193 if (f == null)
1194 ++i;
1195 else if ((fh = f.hash) == MOVED) {
1196 tab = helpTransfer(tab, f);
1197 i = 0; // restart
1198 }
1199 else {
1200 synchronized (f) {
1201 if (tabAt(tab, i) == f) {
1202 Node<K,V> p = (fh >= 0 ? f :
1203 (f instanceof TreeBin) ?
1204 ((TreeBin<K,V>)f).first : null);
1205 while (p != null) {
1206 --delta;
1207 p = p.next;
1208 }
1209 setTabAt(tab, i++, null);
1210 }
1211 }
1212 }
1213 }
1214 if (delta != 0L)
1215 addCount(delta, -1);
1216 }
1217
1218 /**
1219 * Returns a {@link Set} view of the keys contained in this map.
1220 * The set is backed by the map, so changes to the map are
1221 * reflected in the set, and vice-versa. The set supports element
1222 * removal, which removes the corresponding mapping from this map,
1223 * via the {@code Iterator.remove}, {@code Set.remove},
1224 * {@code removeAll}, {@code retainAll}, and {@code clear}
1225 * operations. It does not support the {@code add} or
1226 * {@code addAll} operations.
1227 *
1228 * <p>The view's {@code iterator} is a "weakly consistent" iterator
1229 * that will never throw {@link ConcurrentModificationException},
1230 * and guarantees to traverse elements as they existed upon
1231 * construction of the iterator, and may (but is not guaranteed to)
1232 * reflect any modifications subsequent to construction.
1233 *
1234 * @return the set view
1235 */
1236 public KeySetView<K,V> keySet() {
1237 KeySetView<K,V> ks;
1238 return (ks = keySet) != null ? ks : (keySet = new KeySetView<K,V>(this, null));
1239 }
1240
1241 /**
1242 * Returns a {@link Collection} view of the values contained in this map.
1243 * The collection is backed by the map, so changes to the map are
1244 * reflected in the collection, and vice-versa. The collection
1245 * supports element removal, which removes the corresponding
1246 * mapping from this map, via the {@code Iterator.remove},
1247 * {@code Collection.remove}, {@code removeAll},
1248 * {@code retainAll}, and {@code clear} operations. It does not
1249 * support the {@code add} or {@code addAll} operations.
1250 *
1251 * <p>The view's {@code iterator} is a "weakly consistent" iterator
1252 * that will never throw {@link ConcurrentModificationException},
1253 * and guarantees to traverse elements as they existed upon
1254 * construction of the iterator, and may (but is not guaranteed to)
1255 * reflect any modifications subsequent to construction.
1256 *
1257 * @return the collection view
1258 */
1259 public Collection<V> values() {
1260 ValuesView<K,V> vs;
1261 return (vs = values) != null ? vs : (values = new ValuesView<K,V>(this));
1262 }
1263
1264 /**
1265 * Returns a {@link Set} view of the mappings contained in this map.
1266 * The set is backed by the map, so changes to the map are
1267 * reflected in the set, and vice-versa. The set supports element
1268 * removal, which removes the corresponding mapping from the map,
1269 * via the {@code Iterator.remove}, {@code Set.remove},
1270 * {@code removeAll}, {@code retainAll}, and {@code clear}
1271 * operations.
1272 *
1273 * <p>The view's {@code iterator} is a "weakly consistent" iterator
1274 * that will never throw {@link ConcurrentModificationException},
1275 * and guarantees to traverse elements as they existed upon
1276 * construction of the iterator, and may (but is not guaranteed to)
1277 * reflect any modifications subsequent to construction.
1278 *
1279 * @return the set view
1280 */
1281 public Set<Map.Entry<K,V>> entrySet() {
1282 EntrySetView<K,V> es;
1283 return (es = entrySet) != null ? es : (entrySet = new EntrySetView<K,V>(this));
1284 }
1285
1286 /**
1287 * Returns the hash code value for this {@link Map}, i.e.,
1288 * the sum of, for each key-value pair in the map,
1289 * {@code key.hashCode() ^ value.hashCode()}.
1290 *
1291 * @return the hash code value for this map
1292 */
1293 public int hashCode() {
1294 int h = 0;
1295 Node<K,V>[] t;
1296 if ((t = table) != null) {
1297 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1298 for (Node<K,V> p; (p = it.advance()) != null; )
1299 h += p.key.hashCode() ^ p.val.hashCode();
1300 }
1301 return h;
1302 }
1303
1304 /**
1305 * Returns a string representation of this map. The string
1306 * representation consists of a list of key-value mappings (in no
1307 * particular order) enclosed in braces ("{@code {}}"). Adjacent
1308 * mappings are separated by the characters {@code ", "} (comma
1309 * and space). Each key-value mapping is rendered as the key
1310 * followed by an equals sign ("{@code =}") followed by the
1311 * associated value.
1312 *
1313 * @return a string representation of this map
1314 */
1315 public String toString() {
1316 Node<K,V>[] t;
1317 int f = (t = table) == null ? 0 : t.length;
1318 Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1319 StringBuilder sb = new StringBuilder();
1320 sb.append('{');
1321 Node<K,V> p;
1322 if ((p = it.advance()) != null) {
1323 for (;;) {
1324 K k = p.key;
1325 V v = p.val;
1326 sb.append(k == this ? "(this Map)" : k);
1327 sb.append('=');
1328 sb.append(v == this ? "(this Map)" : v);
1329 if ((p = it.advance()) == null)
1330 break;
1331 sb.append(',').append(' ');
1332 }
1333 }
1334 return sb.append('}').toString();
1335 }
1336
1337 /**
1338 * Compares the specified object with this map for equality.
1339 * Returns {@code true} if the given object is a map with the same
1340 * mappings as this map. This operation may return misleading
1341 * results if either map is concurrently modified during execution
1342 * of this method.
1343 *
1344 * @param o object to be compared for equality with this map
1345 * @return {@code true} if the specified object is equal to this map
1346 */
1347 public boolean equals(Object o) {
1348 if (o != this) {
1349 if (!(o instanceof Map))
1350 return false;
1351 Map<?,?> m = (Map<?,?>) o;
1352 Node<K,V>[] t;
1353 int f = (t = table) == null ? 0 : t.length;
1354 Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1355 for (Node<K,V> p; (p = it.advance()) != null; ) {
1356 V val = p.val;
1357 Object v = m.get(p.key);
1358 if (v == null || (v != val && !v.equals(val)))
1359 return false;
1360 }
1361 for (Map.Entry<?,?> e : m.entrySet()) {
1362 Object mk, mv, v;
1363 if ((mk = e.getKey()) == null ||
1364 (mv = e.getValue()) == null ||
1365 (v = get(mk)) == null ||
1366 (mv != v && !mv.equals(v)))
1367 return false;
1368 }
1369 }
1370 return true;
1371 }
1372
1373 /**
1374 * Stripped-down version of helper class used in previous version,
1375 * declared for the sake of serialization compatibility
1376 */
1377 static class Segment<K,V> extends ReentrantLock implements Serializable {
1378 private static final long serialVersionUID = 2249069246763182397L;
1379 final float loadFactor;
1380 Segment(float lf) { this.loadFactor = lf; }
1381 }
1382
1383 /**
1384 * Saves the state of the {@code ConcurrentHashMapV8} instance to a
1385 * stream (i.e., serializes it).
1386 * @param s the stream
1387 * @throws java.io.IOException if an I/O error occurs
1388 * @serialData
1389 * the key (Object) and value (Object)
1390 * for each key-value mapping, followed by a null pair.
1391 * The key-value mappings are emitted in no particular order.
1392 */
1393 private void writeObject(java.io.ObjectOutputStream s)
1394 throws java.io.IOException {
1395 // For serialization compatibility
1396 // Emulate segment calculation from previous version of this class
1397 int sshift = 0;
1398 int ssize = 1;
1399 while (ssize < DEFAULT_CONCURRENCY_LEVEL) {
1400 ++sshift;
1401 ssize <<= 1;
1402 }
1403 int segmentShift = 32 - sshift;
1404 int segmentMask = ssize - 1;
1405 @SuppressWarnings("unchecked") Segment<K,V>[] segments = (Segment<K,V>[])
1406 new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
1407 for (int i = 0; i < segments.length; ++i)
1408 segments[i] = new Segment<K,V>(LOAD_FACTOR);
1409 s.putFields().put("segments", segments);
1410 s.putFields().put("segmentShift", segmentShift);
1411 s.putFields().put("segmentMask", segmentMask);
1412 s.writeFields();
1413
1414 Node<K,V>[] t;
1415 if ((t = table) != null) {
1416 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1417 for (Node<K,V> p; (p = it.advance()) != null; ) {
1418 s.writeObject(p.key);
1419 s.writeObject(p.val);
1420 }
1421 }
1422 s.writeObject(null);
1423 s.writeObject(null);
1424 segments = null; // throw away
1425 }
1426
1427 /**
1428 * Reconstitutes the instance from a stream (that is, deserializes it).
1429 * @param s the stream
1430 * @throws ClassNotFoundException if the class of a serialized object
1431 * could not be found
1432 * @throws java.io.IOException if an I/O error occurs
1433 */
1434 private void readObject(java.io.ObjectInputStream s)
1435 throws java.io.IOException, ClassNotFoundException {
1436 /*
1437 * To improve performance in typical cases, we create nodes
1438 * while reading, then place in table once size is known.
1439 * However, we must also validate uniqueness and deal with
1440 * overpopulated bins while doing so, which requires
1441 * specialized versions of putVal mechanics.
1442 */
1443 sizeCtl = -1; // force exclusion for table construction
1444 s.defaultReadObject();
1445 long size = 0L;
1446 Node<K,V> p = null;
1447 for (;;) {
1448 @SuppressWarnings("unchecked") K k = (K) s.readObject();
1449 @SuppressWarnings("unchecked") V v = (V) s.readObject();
1450 if (k != null && v != null) {
1451 p = new Node<K,V>(spread(k.hashCode()), k, v, p);
1452 ++size;
1453 }
1454 else
1455 break;
1456 }
1457 if (size == 0L)
1458 sizeCtl = 0;
1459 else {
1460 int n;
1461 if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
1462 n = MAXIMUM_CAPACITY;
1463 else {
1464 int sz = (int)size;
1465 n = tableSizeFor(sz + (sz >>> 1) + 1);
1466 }
1467 @SuppressWarnings("unchecked")
1468 Node<K,V>[] tab = (Node<K,V>[])new Node<?,?>[n];
1469 int mask = n - 1;
1470 long added = 0L;
1471 while (p != null) {
1472 boolean insertAtFront;
1473 Node<K,V> next = p.next, first;
1474 int h = p.hash, j = h & mask;
1475 if ((first = tabAt(tab, j)) == null)
1476 insertAtFront = true;
1477 else {
1478 K k = p.key;
1479 if (first.hash < 0) {
1480 TreeBin<K,V> t = (TreeBin<K,V>)first;
1481 if (t.putTreeVal(h, k, p.val) == null)
1482 ++added;
1483 insertAtFront = false;
1484 }
1485 else {
1486 int binCount = 0;
1487 insertAtFront = true;
1488 Node<K,V> q; K qk;
1489 for (q = first; q != null; q = q.next) {
1490 if (q.hash == h &&
1491 ((qk = q.key) == k ||
1492 (qk != null && k.equals(qk)))) {
1493 insertAtFront = false;
1494 break;
1495 }
1496 ++binCount;
1497 }
1498 if (insertAtFront && binCount >= TREEIFY_THRESHOLD) {
1499 insertAtFront = false;
1500 ++added;
1501 p.next = first;
1502 TreeNode<K,V> hd = null, tl = null;
1503 for (q = p; q != null; q = q.next) {
1504 TreeNode<K,V> t = new TreeNode<K,V>
1505 (q.hash, q.key, q.val, null, null);
1506 if ((t.prev = tl) == null)
1507 hd = t;
1508 else
1509 tl.next = t;
1510 tl = t;
1511 }
1512 setTabAt(tab, j, new TreeBin<K,V>(hd));
1513 }
1514 }
1515 }
1516 if (insertAtFront) {
1517 ++added;
1518 p.next = first;
1519 setTabAt(tab, j, p);
1520 }
1521 p = next;
1522 }
1523 table = tab;
1524 sizeCtl = n - (n >>> 2);
1525 baseCount = added;
1526 }
1527 }
1528
1529 // ConcurrentMap methods
1530
1531 /**
1532 * {@inheritDoc}
1533 *
1534 * @return the previous value associated with the specified key,
1535 * or {@code null} if there was no mapping for the key
1536 * @throws NullPointerException if the specified key or value is null
1537 */
1538 public V putIfAbsent(K key, V value) {
1539 return putVal(key, value, true);
1540 }
1541
1542 /**
1543 * {@inheritDoc}
1544 *
1545 * @throws NullPointerException if the specified key is null
1546 */
1547 public boolean remove(Object key, Object value) {
1548 if (key == null)
1549 throw new NullPointerException();
1550 return value != null && replaceNode(key, null, value) != null;
1551 }
1552
1553 /**
1554 * {@inheritDoc}
1555 *
1556 * @throws NullPointerException if any of the arguments are null
1557 */
1558 public boolean replace(K key, V oldValue, V newValue) {
1559 if (key == null || oldValue == null || newValue == null)
1560 throw new NullPointerException();
1561 return replaceNode(key, newValue, oldValue) != null;
1562 }
1563
1564 /**
1565 * {@inheritDoc}
1566 *
1567 * @return the previous value associated with the specified key,
1568 * or {@code null} if there was no mapping for the key
1569 * @throws NullPointerException if the specified key or value is null
1570 */
1571 public V replace(K key, V value) {
1572 if (key == null || value == null)
1573 throw new NullPointerException();
1574 return replaceNode(key, value, null);
1575 }
1576
1577 // Overrides of JDK8+ Map extension method defaults
1578
1579 /**
1580 * Returns the value to which the specified key is mapped, or the
1581 * given default value if this map contains no mapping for the
1582 * key.
1583 *
1584 * @param key the key whose associated value is to be returned
1585 * @param defaultValue the value to return if this map contains
1586 * no mapping for the given key
1587 * @return the mapping for the key, if present; else the default value
1588 * @throws NullPointerException if the specified key is null
1589 */
1590 public V getOrDefault(Object key, V defaultValue) {
1591 V v;
1592 return (v = get(key)) == null ? defaultValue : v;
1593 }
1594
1595 public void forEach(BiAction<? super K, ? super V> action) {
1596 if (action == null) throw new NullPointerException();
1597 Node<K,V>[] t;
1598 if ((t = table) != null) {
1599 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1600 for (Node<K,V> p; (p = it.advance()) != null; ) {
1601 action.apply(p.key, p.val);
1602 }
1603 }
1604 }
1605
1606 public void replaceAll(BiFun<? super K, ? super V, ? extends V> function) {
1607 if (function == null) throw new NullPointerException();
1608 Node<K,V>[] t;
1609 if ((t = table) != null) {
1610 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1611 for (Node<K,V> p; (p = it.advance()) != null; ) {
1612 V oldValue = p.val;
1613 for (K key = p.key;;) {
1614 V newValue = function.apply(key, oldValue);
1615 if (newValue == null)
1616 throw new NullPointerException();
1617 if (replaceNode(key, newValue, oldValue) != null ||
1618 (oldValue = get(key)) == null)
1619 break;
1620 }
1621 }
1622 }
1623 }
1624
1625 /**
1626 * If the specified key is not already associated with a value,
1627 * attempts to compute its value using the given mapping function
1628 * and enters it into this map unless {@code null}. The entire
1629 * method invocation is performed atomically, so the function is
1630 * applied at most once per key. Some attempted update operations
1631 * on this map by other threads may be blocked while computation
1632 * is in progress, so the computation should be short and simple,
1633 * and must not attempt to update any other mappings of this map.
1634 *
1635 * @param key key with which the specified value is to be associated
1636 * @param mappingFunction the function to compute a value
1637 * @return the current (existing or computed) value associated with
1638 * the specified key, or null if the computed value is null
1639 * @throws NullPointerException if the specified key or mappingFunction
1640 * is null
1641 * @throws IllegalStateException if the computation detectably
1642 * attempts a recursive update to this map that would
1643 * otherwise never complete
1644 * @throws RuntimeException or Error if the mappingFunction does so,
1645 * in which case the mapping is left unestablished
1646 */
1647 public V computeIfAbsent(K key, Fun<? super K, ? extends V> mappingFunction) {
1648 if (key == null || mappingFunction == null)
1649 throw new NullPointerException();
1650 int h = spread(key.hashCode());
1651 V val = null;
1652 int binCount = 0;
1653 for (Node<K,V>[] tab = table;;) {
1654 Node<K,V> f; int n, i, fh;
1655 if (tab == null || (n = tab.length) == 0)
1656 tab = initTable();
1657 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1658 Node<K,V> r = new ReservationNode<K,V>();
1659 synchronized (r) {
1660 if (casTabAt(tab, i, null, r)) {
1661 binCount = 1;
1662 Node<K,V> node = null;
1663 try {
1664 if ((val = mappingFunction.apply(key)) != null)
1665 node = new Node<K,V>(h, key, val, null);
1666 } finally {
1667 setTabAt(tab, i, node);
1668 }
1669 }
1670 }
1671 if (binCount != 0)
1672 break;
1673 }
1674 else if ((fh = f.hash) == MOVED)
1675 tab = helpTransfer(tab, f);
1676 else {
1677 boolean added = false;
1678 synchronized (f) {
1679 if (tabAt(tab, i) == f) {
1680 if (fh >= 0) {
1681 binCount = 1;
1682 for (Node<K,V> e = f;; ++binCount) {
1683 K ek; V ev;
1684 if (e.hash == h &&
1685 ((ek = e.key) == key ||
1686 (ek != null && key.equals(ek)))) {
1687 val = e.val;
1688 break;
1689 }
1690 Node<K,V> pred = e;
1691 if ((e = e.next) == null) {
1692 if ((val = mappingFunction.apply(key)) != null) {
1693 added = true;
1694 pred.next = new Node<K,V>(h, key, val, null);
1695 }
1696 break;
1697 }
1698 }
1699 }
1700 else if (f instanceof TreeBin) {
1701 binCount = 2;
1702 TreeBin<K,V> t = (TreeBin<K,V>)f;
1703 TreeNode<K,V> r, p;
1704 if ((r = t.root) != null &&
1705 (p = r.findTreeNode(h, key, null)) != null)
1706 val = p.val;
1707 else if ((val = mappingFunction.apply(key)) != null) {
1708 added = true;
1709 t.putTreeVal(h, key, val);
1710 }
1711 }
1712 }
1713 }
1714 if (binCount != 0) {
1715 if (binCount >= TREEIFY_THRESHOLD)
1716 treeifyBin(tab, i);
1717 if (!added)
1718 return val;
1719 break;
1720 }
1721 }
1722 }
1723 if (val != null)
1724 addCount(1L, binCount);
1725 return val;
1726 }
1727
1728 /**
1729 * If the value for the specified key is present, attempts to
1730 * compute a new mapping given the key and its current mapped
1731 * value. The entire method invocation is performed atomically.
1732 * Some attempted update operations on this map by other threads
1733 * may be blocked while computation is in progress, so the
1734 * computation should be short and simple, and must not attempt to
1735 * update any other mappings of this map.
1736 *
1737 * @param key key with which a value may be associated
1738 * @param remappingFunction the function to compute a value
1739 * @return the new value associated with the specified key, or null if none
1740 * @throws NullPointerException if the specified key or remappingFunction
1741 * is null
1742 * @throws IllegalStateException if the computation detectably
1743 * attempts a recursive update to this map that would
1744 * otherwise never complete
1745 * @throws RuntimeException or Error if the remappingFunction does so,
1746 * in which case the mapping is unchanged
1747 */
1748 public V computeIfPresent(K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
1749 if (key == null || remappingFunction == null)
1750 throw new NullPointerException();
1751 int h = spread(key.hashCode());
1752 V val = null;
1753 int delta = 0;
1754 int binCount = 0;
1755 for (Node<K,V>[] tab = table;;) {
1756 Node<K,V> f; int n, i, fh;
1757 if (tab == null || (n = tab.length) == 0)
1758 tab = initTable();
1759 else if ((f = tabAt(tab, i = (n - 1) & h)) == null)
1760 break;
1761 else if ((fh = f.hash) == MOVED)
1762 tab = helpTransfer(tab, f);
1763 else {
1764 synchronized (f) {
1765 if (tabAt(tab, i) == f) {
1766 if (fh >= 0) {
1767 binCount = 1;
1768 for (Node<K,V> e = f, pred = null;; ++binCount) {
1769 K ek;
1770 if (e.hash == h &&
1771 ((ek = e.key) == key ||
1772 (ek != null && key.equals(ek)))) {
1773 val = remappingFunction.apply(key, e.val);
1774 if (val != null)
1775 e.val = val;
1776 else {
1777 delta = -1;
1778 Node<K,V> en = e.next;
1779 if (pred != null)
1780 pred.next = en;
1781 else
1782 setTabAt(tab, i, en);
1783 }
1784 break;
1785 }
1786 pred = e;
1787 if ((e = e.next) == null)
1788 break;
1789 }
1790 }
1791 else if (f instanceof TreeBin) {
1792 binCount = 2;
1793 TreeBin<K,V> t = (TreeBin<K,V>)f;
1794 TreeNode<K,V> r, p;
1795 if ((r = t.root) != null &&
1796 (p = r.findTreeNode(h, key, null)) != null) {
1797 val = remappingFunction.apply(key, p.val);
1798 if (val != null)
1799 p.val = val;
1800 else {
1801 delta = -1;
1802 if (t.removeTreeNode(p))
1803 setTabAt(tab, i, untreeify(t.first));
1804 }
1805 }
1806 }
1807 }
1808 }
1809 if (binCount != 0)
1810 break;
1811 }
1812 }
1813 if (delta != 0)
1814 addCount((long)delta, binCount);
1815 return val;
1816 }
1817
1818 /**
1819 * Attempts to compute a mapping for the specified key and its
1820 * current mapped value (or {@code null} if there is no current
1821 * mapping). The entire method invocation is performed atomically.
1822 * Some attempted update operations on this map by other threads
1823 * may be blocked while computation is in progress, so the
1824 * computation should be short and simple, and must not attempt to
1825 * update any other mappings of this Map.
1826 *
1827 * @param key key with which the specified value is to be associated
1828 * @param remappingFunction the function to compute a value
1829 * @return the new value associated with the specified key, or null if none
1830 * @throws NullPointerException if the specified key or remappingFunction
1831 * is null
1832 * @throws IllegalStateException if the computation detectably
1833 * attempts a recursive update to this map that would
1834 * otherwise never complete
1835 * @throws RuntimeException or Error if the remappingFunction does so,
1836 * in which case the mapping is unchanged
1837 */
1838 public V compute(K key,
1839 BiFun<? super K, ? super V, ? extends V> remappingFunction) {
1840 if (key == null || remappingFunction == null)
1841 throw new NullPointerException();
1842 int h = spread(key.hashCode());
1843 V val = null;
1844 int delta = 0;
1845 int binCount = 0;
1846 for (Node<K,V>[] tab = table;;) {
1847 Node<K,V> f; int n, i, fh;
1848 if (tab == null || (n = tab.length) == 0)
1849 tab = initTable();
1850 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1851 Node<K,V> r = new ReservationNode<K,V>();
1852 synchronized (r) {
1853 if (casTabAt(tab, i, null, r)) {
1854 binCount = 1;
1855 Node<K,V> node = null;
1856 try {
1857 if ((val = remappingFunction.apply(key, null)) != null) {
1858 delta = 1;
1859 node = new Node<K,V>(h, key, val, null);
1860 }
1861 } finally {
1862 setTabAt(tab, i, node);
1863 }
1864 }
1865 }
1866 if (binCount != 0)
1867 break;
1868 }
1869 else if ((fh = f.hash) == MOVED)
1870 tab = helpTransfer(tab, f);
1871 else {
1872 synchronized (f) {
1873 if (tabAt(tab, i) == f) {
1874 if (fh >= 0) {
1875 binCount = 1;
1876 for (Node<K,V> e = f, pred = null;; ++binCount) {
1877 K ek;
1878 if (e.hash == h &&
1879 ((ek = e.key) == key ||
1880 (ek != null && key.equals(ek)))) {
1881 val = remappingFunction.apply(key, e.val);
1882 if (val != null)
1883 e.val = val;
1884 else {
1885 delta = -1;
1886 Node<K,V> en = e.next;
1887 if (pred != null)
1888 pred.next = en;
1889 else
1890 setTabAt(tab, i, en);
1891 }
1892 break;
1893 }
1894 pred = e;
1895 if ((e = e.next) == null) {
1896 val = remappingFunction.apply(key, null);
1897 if (val != null) {
1898 delta = 1;
1899 pred.next =
1900 new Node<K,V>(h, key, val, null);
1901 }
1902 break;
1903 }
1904 }
1905 }
1906 else if (f instanceof TreeBin) {
1907 binCount = 1;
1908 TreeBin<K,V> t = (TreeBin<K,V>)f;
1909 TreeNode<K,V> r, p;
1910 if ((r = t.root) != null)
1911 p = r.findTreeNode(h, key, null);
1912 else
1913 p = null;
1914 V pv = (p == null) ? null : p.val;
1915 val = remappingFunction.apply(key, pv);
1916 if (val != null) {
1917 if (p != null)
1918 p.val = val;
1919 else {
1920 delta = 1;
1921 t.putTreeVal(h, key, val);
1922 }
1923 }
1924 else if (p != null) {
1925 delta = -1;
1926 if (t.removeTreeNode(p))
1927 setTabAt(tab, i, untreeify(t.first));
1928 }
1929 }
1930 }
1931 }
1932 if (binCount != 0) {
1933 if (binCount >= TREEIFY_THRESHOLD)
1934 treeifyBin(tab, i);
1935 break;
1936 }
1937 }
1938 }
1939 if (delta != 0)
1940 addCount((long)delta, binCount);
1941 return val;
1942 }
1943
1944 /**
1945 * If the specified key is not already associated with a
1946 * (non-null) value, associates it with the given value.
1947 * Otherwise, replaces the value with the results of the given
1948 * remapping function, or removes if {@code null}. The entire
1949 * method invocation is performed atomically. Some attempted
1950 * update operations on this map by other threads may be blocked
1951 * while computation is in progress, so the computation should be
1952 * short and simple, and must not attempt to update any other
1953 * mappings of this Map.
1954 *
1955 * @param key key with which the specified value is to be associated
1956 * @param value the value to use if absent
1957 * @param remappingFunction the function to recompute a value if present
1958 * @return the new value associated with the specified key, or null if none
1959 * @throws NullPointerException if the specified key or the
1960 * remappingFunction is null
1961 * @throws RuntimeException or Error if the remappingFunction does so,
1962 * in which case the mapping is unchanged
1963 */
1964 public V merge(K key, V value, BiFun<? super V, ? super V, ? extends V> remappingFunction) {
1965 if (key == null || value == null || remappingFunction == null)
1966 throw new NullPointerException();
1967 int h = spread(key.hashCode());
1968 V val = null;
1969 int delta = 0;
1970 int binCount = 0;
1971 for (Node<K,V>[] tab = table;;) {
1972 Node<K,V> f; int n, i, fh;
1973 if (tab == null || (n = tab.length) == 0)
1974 tab = initTable();
1975 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1976 if (casTabAt(tab, i, null, new Node<K,V>(h, key, value, null))) {
1977 delta = 1;
1978 val = value;
1979 break;
1980 }
1981 }
1982 else if ((fh = f.hash) == MOVED)
1983 tab = helpTransfer(tab, f);
1984 else {
1985 synchronized (f) {
1986 if (tabAt(tab, i) == f) {
1987 if (fh >= 0) {
1988 binCount = 1;
1989 for (Node<K,V> e = f, pred = null;; ++binCount) {
1990 K ek;
1991 if (e.hash == h &&
1992 ((ek = e.key) == key ||
1993 (ek != null && key.equals(ek)))) {
1994 val = remappingFunction.apply(e.val, value);
1995 if (val != null)
1996 e.val = val;
1997 else {
1998 delta = -1;
1999 Node<K,V> en = e.next;
2000 if (pred != null)
2001 pred.next = en;
2002 else
2003 setTabAt(tab, i, en);
2004 }
2005 break;
2006 }
2007 pred = e;
2008 if ((e = e.next) == null) {
2009 delta = 1;
2010 val = value;
2011 pred.next =
2012 new Node<K,V>(h, key, val, null);
2013 break;
2014 }
2015 }
2016 }
2017 else if (f instanceof TreeBin) {
2018 binCount = 2;
2019 TreeBin<K,V> t = (TreeBin<K,V>)f;
2020 TreeNode<K,V> r = t.root;
2021 TreeNode<K,V> p = (r == null) ? null :
2022 r.findTreeNode(h, key, null);
2023 val = (p == null) ? value :
2024 remappingFunction.apply(p.val, value);
2025 if (val != null) {
2026 if (p != null)
2027 p.val = val;
2028 else {
2029 delta = 1;
2030 t.putTreeVal(h, key, val);
2031 }
2032 }
2033 else if (p != null) {
2034 delta = -1;
2035 if (t.removeTreeNode(p))
2036 setTabAt(tab, i, untreeify(t.first));
2037 }
2038 }
2039 }
2040 }
2041 if (binCount != 0) {
2042 if (binCount >= TREEIFY_THRESHOLD)
2043 treeifyBin(tab, i);
2044 break;
2045 }
2046 }
2047 }
2048 if (delta != 0)
2049 addCount((long)delta, binCount);
2050 return val;
2051 }
2052
2053 // Hashtable legacy methods
2054
2055 /**
2056 * Legacy method testing if some key maps into the specified value
2057 * in this table. This method is identical in functionality to
2058 * {@link #containsValue(Object)}, and exists solely to ensure
2059 * full compatibility with class {@link java.util.Hashtable},
2060 * which supported this method prior to introduction of the
2061 * Java Collections framework.
2062 *
2063 * @param value a value to search for
2064 * @return {@code true} if and only if some key maps to the
2065 * {@code value} argument in this table as
2066 * determined by the {@code equals} method;
2067 * {@code false} otherwise
2068 * @throws NullPointerException if the specified value is null
2069 */
2070 @Deprecated public boolean contains(Object value) {
2071 return containsValue(value);
2072 }
2073
2074 /**
2075 * Returns an enumeration of the keys in this table.
2076 *
2077 * @return an enumeration of the keys in this table
2078 * @see #keySet()
2079 */
2080 public Enumeration<K> keys() {
2081 Node<K,V>[] t;
2082 int f = (t = table) == null ? 0 : t.length;
2083 return new KeyIterator<K,V>(t, f, 0, f, this);
2084 }
2085
2086 /**
2087 * Returns an enumeration of the values in this table.
2088 *
2089 * @return an enumeration of the values in this table
2090 * @see #values()
2091 */
2092 public Enumeration<V> elements() {
2093 Node<K,V>[] t;
2094 int f = (t = table) == null ? 0 : t.length;
2095 return new ValueIterator<K,V>(t, f, 0, f, this);
2096 }
2097
2098 // ConcurrentHashMapV8-only methods
2099
2100 /**
2101 * Returns the number of mappings. This method should be used
2102 * instead of {@link #size} because a ConcurrentHashMapV8 may
2103 * contain more mappings than can be represented as an int. The
2104 * value returned is an estimate; the actual count may differ if
2105 * there are concurrent insertions or removals.
2106 *
2107 * @return the number of mappings
2108 * @since 1.8
2109 */
2110 public long mappingCount() {
2111 long n = sumCount();
2112 return (n < 0L) ? 0L : n; // ignore transient negative values
2113 }
2114
2115 /**
2116 * Creates a new {@link Set} backed by a ConcurrentHashMapV8
2117 * from the given type to {@code Boolean.TRUE}.
2118 *
2119 * @return the new set
2120 * @since 1.8
2121 */
2122 public static <K> KeySetView<K,Boolean> newKeySet() {
2123 return new KeySetView<K,Boolean>
2124 (new ConcurrentHashMapV8<K,Boolean>(), Boolean.TRUE);
2125 }
2126
2127 /**
2128 * Creates a new {@link Set} backed by a ConcurrentHashMapV8
2129 * from the given type to {@code Boolean.TRUE}.
2130 *
2131 * @param initialCapacity The implementation performs internal
2132 * sizing to accommodate this many elements.
2133 * @return the new set
2134 * @throws IllegalArgumentException if the initial capacity of
2135 * elements is negative
2136 * @since 1.8
2137 */
2138 public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2139 return new KeySetView<K,Boolean>
2140 (new ConcurrentHashMapV8<K,Boolean>(initialCapacity), Boolean.TRUE);
2141 }
2142
2143 /**
2144 * Returns a {@link Set} view of the keys in this map, using the
2145 * given common mapped value for any additions (i.e., {@link
2146 * Collection#add} and {@link Collection#addAll(Collection)}).
2147 * This is of course only appropriate if it is acceptable to use
2148 * the same value for all additions from this view.
2149 *
2150 * @param mappedValue the mapped value to use for any additions
2151 * @return the set view
2152 * @throws NullPointerException if the mappedValue is null
2153 */
2154 public KeySetView<K,V> keySet(V mappedValue) {
2155 if (mappedValue == null)
2156 throw new NullPointerException();
2157 return new KeySetView<K,V>(this, mappedValue);
2158 }
2159
2160 /* ---------------- Special Nodes -------------- */
2161
2162 /**
2163 * A node inserted at head of bins during transfer operations.
2164 */
2165 static final class ForwardingNode<K,V> extends Node<K,V> {
2166 final Node<K,V>[] nextTable;
2167 ForwardingNode(Node<K,V>[] tab) {
2168 super(MOVED, null, null, null);
2169 this.nextTable = tab;
2170 }
2171
2172 Node<K,V> find(int h, Object k) {
2173 // loop to avoid arbitrarily deep recursion on forwarding nodes
2174 outer: for (Node<K,V>[] tab = nextTable;;) {
2175 Node<K,V> e; int n;
2176 if (k == null || tab == null || (n = tab.length) == 0 ||
2177 (e = tabAt(tab, (n - 1) & h)) == null)
2178 return null;
2179 for (;;) {
2180 int eh; K ek;
2181 if ((eh = e.hash) == h &&
2182 ((ek = e.key) == k || (ek != null && k.equals(ek))))
2183 return e;
2184 if (eh < 0) {
2185 if (e instanceof ForwardingNode) {
2186 tab = ((ForwardingNode<K,V>)e).nextTable;
2187 continue outer;
2188 }
2189 else
2190 return e.find(h, k);
2191 }
2192 if ((e = e.next) == null)
2193 return null;
2194 }
2195 }
2196 }
2197 }
2198
2199 /**
2200 * A place-holder node used in computeIfAbsent and compute
2201 */
2202 static final class ReservationNode<K,V> extends Node<K,V> {
2203 ReservationNode() {
2204 super(RESERVED, null, null, null);
2205 }
2206
2207 Node<K,V> find(int h, Object k) {
2208 return null;
2209 }
2210 }
2211
2212 /* ---------------- Table Initialization and Resizing -------------- */
2213
2214 /**
2215 * Returns the stamp bits for resizing a table of size n.
2216 * Must be negative when shifted left by RESIZE_STAMP_SHIFT.
2217 */
2218 static final int resizeStamp(int n) {
2219 return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1));
2220 }
2221
2222 /**
2223 * Initializes table, using the size recorded in sizeCtl.
2224 */
2225 private final Node<K,V>[] initTable() {
2226 Node<K,V>[] tab; int sc;
2227 while ((tab = table) == null || tab.length == 0) {
2228 if ((sc = sizeCtl) < 0)
2229 Thread.yield(); // lost initialization race; just spin
2230 else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
2231 try {
2232 if ((tab = table) == null || tab.length == 0) {
2233 int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
2234 @SuppressWarnings("unchecked")
2235 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
2236 table = tab = nt;
2237 sc = n - (n >>> 2);
2238 }
2239 } finally {
2240 sizeCtl = sc;
2241 }
2242 break;
2243 }
2244 }
2245 return tab;
2246 }
2247
2248 /**
2249 * Adds to count, and if table is too small and not already
2250 * resizing, initiates transfer. If already resizing, helps
2251 * perform transfer if work is available. Rechecks occupancy
2252 * after a transfer to see if another resize is already needed
2253 * because resizings are lagging additions.
2254 *
2255 * @param x the count to add
2256 * @param check if <0, don't check resize, if <= 1 only check if uncontended
2257 */
2258 private final void addCount(long x, int check) {
2259 CounterCell[] as; long b, s;
2260 if ((as = counterCells) != null ||
2261 !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
2262 CounterHashCode hc; CounterCell a; long v; int m;
2263 boolean uncontended = true;
2264 if ((hc = threadCounterHashCode.get()) == null ||
2265 as == null || (m = as.length - 1) < 0 ||
2266 (a = as[m & hc.code]) == null ||
2267 !(uncontended =
2268 U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
2269 fullAddCount(x, hc, uncontended);
2270 return;
2271 }
2272 if (check <= 1)
2273 return;
2274 s = sumCount();
2275 }
2276 if (check >= 0) {
2277 Node<K,V>[] tab, nt; int n, sc;
2278 while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
2279 (n = tab.length) < MAXIMUM_CAPACITY) {
2280 int rs = resizeStamp(n);
2281 if (sc < 0) {
2282 if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
2283 sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
2284 transferIndex <= 0)
2285 break;
2286 if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
2287 transfer(tab, nt);
2288 }
2289 else if (U.compareAndSwapInt(this, SIZECTL, sc,
2290 (rs << RESIZE_STAMP_SHIFT) + 2))
2291 transfer(tab, null);
2292 s = sumCount();
2293 }
2294 }
2295 }
2296
2297 /**
2298 * Helps transfer if a resize is in progress.
2299 */
2300 final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) {
2301 Node<K,V>[] nextTab; int sc;
2302 if (tab != null && (f instanceof ForwardingNode) &&
2303 (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) {
2304 int rs = resizeStamp(tab.length);
2305 while (nextTab == nextTable && table == tab &&
2306 (sc = sizeCtl) < 0) {
2307 if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
2308 sc == rs + MAX_RESIZERS || transferIndex <= 0)
2309 break;
2310 if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) {
2311 transfer(tab, nextTab);
2312 break;
2313 }
2314 }
2315 return nextTab;
2316 }
2317 return table;
2318 }
2319
2320 /**
2321 * Tries to presize table to accommodate the given number of elements.
2322 *
2323 * @param size number of elements (doesn't need to be perfectly accurate)
2324 */
2325 private final void tryPresize(int size) {
2326 int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
2327 tableSizeFor(size + (size >>> 1) + 1);
2328 int sc;
2329 while ((sc = sizeCtl) >= 0) {
2330 Node<K,V>[] tab = table; int n;
2331 if (tab == null || (n = tab.length) == 0) {
2332 n = (sc > c) ? sc : c;
2333 if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
2334 try {
2335 if (table == tab) {
2336 @SuppressWarnings("unchecked")
2337 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
2338 table = nt;
2339 sc = n - (n >>> 2);
2340 }
2341 } finally {
2342 sizeCtl = sc;
2343 }
2344 }
2345 }
2346 else if (c <= sc || n >= MAXIMUM_CAPACITY)
2347 break;
2348 else if (tab == table) {
2349 int rs = resizeStamp(n);
2350 if (sc < 0) {
2351 Node<K,V>[] nt;
2352 if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
2353 sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
2354 transferIndex <= 0)
2355 break;
2356 if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
2357 transfer(tab, nt);
2358 }
2359 else if (U.compareAndSwapInt(this, SIZECTL, sc,
2360 (rs << RESIZE_STAMP_SHIFT) + 2))
2361 transfer(tab, null);
2362 }
2363 }
2364 }
2365
2366 /**
2367 * Moves and/or copies the nodes in each bin to new table. See
2368 * above for explanation.
2369 */
2370 private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
2371 int n = tab.length, stride;
2372 if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
2373 stride = MIN_TRANSFER_STRIDE; // subdivide range
2374 if (nextTab == null) { // initiating
2375 try {
2376 @SuppressWarnings("unchecked")
2377 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1];
2378 nextTab = nt;
2379 } catch (Throwable ex) { // try to cope with OOME
2380 sizeCtl = Integer.MAX_VALUE;
2381 return;
2382 }
2383 nextTable = nextTab;
2384 transferIndex = n;
2385 }
2386 int nextn = nextTab.length;
2387 ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
2388 boolean advance = true;
2389 boolean finishing = false; // to ensure sweep before committing nextTab
2390 for (int i = 0, bound = 0;;) {
2391 Node<K,V> f; int fh;
2392 while (advance) {
2393 int nextIndex, nextBound;
2394 if (--i >= bound || finishing)
2395 advance = false;
2396 else if ((nextIndex = transferIndex) <= 0) {
2397 i = -1;
2398 advance = false;
2399 }
2400 else if (U.compareAndSwapInt
2401 (this, TRANSFERINDEX, nextIndex,
2402 nextBound = (nextIndex > stride ?
2403 nextIndex - stride : 0))) {
2404 bound = nextBound;
2405 i = nextIndex - 1;
2406 advance = false;
2407 }
2408 }
2409 if (i < 0 || i >= n || i + n >= nextn) {
2410 int sc;
2411 if (finishing) {
2412 nextTable = null;
2413 table = nextTab;
2414 sizeCtl = (n << 1) - (n >>> 1);
2415 return;
2416 }
2417 if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
2418 if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
2419 return;
2420 finishing = advance = true;
2421 i = n; // recheck before commit
2422 }
2423 }
2424 else if ((f = tabAt(tab, i)) == null)
2425 advance = casTabAt(tab, i, null, fwd);
2426 else if ((fh = f.hash) == MOVED)
2427 advance = true; // already processed
2428 else {
2429 synchronized (f) {
2430 if (tabAt(tab, i) == f) {
2431 Node<K,V> ln, hn;
2432 if (fh >= 0) {
2433 int runBit = fh & n;
2434 Node<K,V> lastRun = f;
2435 for (Node<K,V> p = f.next; p != null; p = p.next) {
2436 int b = p.hash & n;
2437 if (b != runBit) {
2438 runBit = b;
2439 lastRun = p;
2440 }
2441 }
2442 if (runBit == 0) {
2443 ln = lastRun;
2444 hn = null;
2445 }
2446 else {
2447 hn = lastRun;
2448 ln = null;
2449 }
2450 for (Node<K,V> p = f; p != lastRun; p = p.next) {
2451 int ph = p.hash; K pk = p.key; V pv = p.val;
2452 if ((ph & n) == 0)
2453 ln = new Node<K,V>(ph, pk, pv, ln);
2454 else
2455 hn = new Node<K,V>(ph, pk, pv, hn);
2456 }
2457 setTabAt(nextTab, i, ln);
2458 setTabAt(nextTab, i + n, hn);
2459 setTabAt(tab, i, fwd);
2460 advance = true;
2461 }
2462 else if (f instanceof TreeBin) {
2463 TreeBin<K,V> t = (TreeBin<K,V>)f;
2464 TreeNode<K,V> lo = null, loTail = null;
2465 TreeNode<K,V> hi = null, hiTail = null;
2466 int lc = 0, hc = 0;
2467 for (Node<K,V> e = t.first; e != null; e = e.next) {
2468 int h = e.hash;
2469 TreeNode<K,V> p = new TreeNode<K,V>
2470 (h, e.key, e.val, null, null);
2471 if ((h & n) == 0) {
2472 if ((p.prev = loTail) == null)
2473 lo = p;
2474 else
2475 loTail.next = p;
2476 loTail = p;
2477 ++lc;
2478 }
2479 else {
2480 if ((p.prev = hiTail) == null)
2481 hi = p;
2482 else
2483 hiTail.next = p;
2484 hiTail = p;
2485 ++hc;
2486 }
2487 }
2488 ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
2489 (hc != 0) ? new TreeBin<K,V>(lo) : t;
2490 hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
2491 (lc != 0) ? new TreeBin<K,V>(hi) : t;
2492 setTabAt(nextTab, i, ln);
2493 setTabAt(nextTab, i + n, hn);
2494 setTabAt(tab, i, fwd);
2495 advance = true;
2496 }
2497 }
2498 }
2499 }
2500 }
2501 }
2502
2503 /* ---------------- Conversion from/to TreeBins -------------- */
2504
2505 /**
2506 * Replaces all linked nodes in bin at given index unless table is
2507 * too small, in which case resizes instead.
2508 */
2509 private final void treeifyBin(Node<K,V>[] tab, int index) {
2510 Node<K,V> b; int n, sc;
2511 if (tab != null) {
2512 if ((n = tab.length) < MIN_TREEIFY_CAPACITY)
2513 tryPresize(n << 1);
2514 else if ((b = tabAt(tab, index)) != null && b.hash >= 0) {
2515 synchronized (b) {
2516 if (tabAt(tab, index) == b) {
2517 TreeNode<K,V> hd = null, tl = null;
2518 for (Node<K,V> e = b; e != null; e = e.next) {
2519 TreeNode<K,V> p =
2520 new TreeNode<K,V>(e.hash, e.key, e.val,
2521 null, null);
2522 if ((p.prev = tl) == null)
2523 hd = p;
2524 else
2525 tl.next = p;
2526 tl = p;
2527 }
2528 setTabAt(tab, index, new TreeBin<K,V>(hd));
2529 }
2530 }
2531 }
2532 }
2533 }
2534
2535 /**
2536 * Returns a list of non-TreeNodes replacing those in given list.
2537 */
2538 static <K,V> Node<K,V> untreeify(Node<K,V> b) {
2539 Node<K,V> hd = null, tl = null;
2540 for (Node<K,V> q = b; q != null; q = q.next) {
2541 Node<K,V> p = new Node<K,V>(q.hash, q.key, q.val, null);
2542 if (tl == null)
2543 hd = p;
2544 else
2545 tl.next = p;
2546 tl = p;
2547 }
2548 return hd;
2549 }
2550
2551 /* ---------------- TreeNodes -------------- */
2552
2553 /**
2554 * Nodes for use in TreeBins
2555 */
2556 static final class TreeNode<K,V> extends Node<K,V> {
2557 TreeNode<K,V> parent; // red-black tree links
2558 TreeNode<K,V> left;
2559 TreeNode<K,V> right;
2560 TreeNode<K,V> prev; // needed to unlink next upon deletion
2561 boolean red;
2562
2563 TreeNode(int hash, K key, V val, Node<K,V> next,
2564 TreeNode<K,V> parent) {
2565 super(hash, key, val, next);
2566 this.parent = parent;
2567 }
2568
2569 Node<K,V> find(int h, Object k) {
2570 return findTreeNode(h, k, null);
2571 }
2572
2573 /**
2574 * Returns the TreeNode (or null if not found) for the given key
2575 * starting at given root.
2576 */
2577 final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) {
2578 if (k != null) {
2579 TreeNode<K,V> p = this;
2580 do {
2581 int ph, dir; K pk; TreeNode<K,V> q;
2582 TreeNode<K,V> pl = p.left, pr = p.right;
2583 if ((ph = p.hash) > h)
2584 p = pl;
2585 else if (ph < h)
2586 p = pr;
2587 else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2588 return p;
2589 else if (pl == null)
2590 p = pr;
2591 else if (pr == null)
2592 p = pl;
2593 else if ((kc != null ||
2594 (kc = comparableClassFor(k)) != null) &&
2595 (dir = compareComparables(kc, k, pk)) != 0)
2596 p = (dir < 0) ? pl : pr;
2597 else if ((q = pr.findTreeNode(h, k, kc)) != null)
2598 return q;
2599 else
2600 p = pl;
2601 } while (p != null);
2602 }
2603 return null;
2604 }
2605 }
2606
2607 /* ---------------- TreeBins -------------- */
2608
2609 /**
2610 * TreeNodes used at the heads of bins. TreeBins do not hold user
2611 * keys or values, but instead point to list of TreeNodes and
2612 * their root. They also maintain a parasitic read-write lock
2613 * forcing writers (who hold bin lock) to wait for readers (who do
2614 * not) to complete before tree restructuring operations.
2615 */
2616 static final class TreeBin<K,V> extends Node<K,V> {
2617 TreeNode<K,V> root;
2618 volatile TreeNode<K,V> first;
2619 volatile Thread waiter;
2620 volatile int lockState;
2621 // values for lockState
2622 static final int WRITER = 1; // set while holding write lock
2623 static final int WAITER = 2; // set when waiting for write lock
2624 static final int READER = 4; // increment value for setting read lock
2625
2626 /**
2627 * Tie-breaking utility for ordering insertions when equal
2628 * hashCodes and non-comparable. We don't require a total
2629 * order, just a consistent insertion rule to maintain
2630 * equivalence across rebalancings. Tie-breaking further than
2631 * necessary simplifies testing a bit.
2632 */
2633 static int tieBreakOrder(Object a, Object b) {
2634 int d;
2635 if (a == null || b == null ||
2636 (d = a.getClass().getName().
2637 compareTo(b.getClass().getName())) == 0)
2638 d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
2639 -1 : 1);
2640 return d;
2641 }
2642
2643 /**
2644 * Creates bin with initial set of nodes headed by b.
2645 */
2646 TreeBin(TreeNode<K,V> b) {
2647 super(TREEBIN, null, null, null);
2648 this.first = b;
2649 TreeNode<K,V> r = null;
2650 for (TreeNode<K,V> x = b, next; x != null; x = next) {
2651 next = (TreeNode<K,V>)x.next;
2652 x.left = x.right = null;
2653 if (r == null) {
2654 x.parent = null;
2655 x.red = false;
2656 r = x;
2657 }
2658 else {
2659 K k = x.key;
2660 int h = x.hash;
2661 Class<?> kc = null;
2662 for (TreeNode<K,V> p = r;;) {
2663 int dir, ph;
2664 K pk = p.key;
2665 if ((ph = p.hash) > h)
2666 dir = -1;
2667 else if (ph < h)
2668 dir = 1;
2669 else if ((kc == null &&
2670 (kc = comparableClassFor(k)) == null) ||
2671 (dir = compareComparables(kc, k, pk)) == 0)
2672 dir = tieBreakOrder(k, pk);
2673 TreeNode<K,V> xp = p;
2674 if ((p = (dir <= 0) ? p.left : p.right) == null) {
2675 x.parent = xp;
2676 if (dir <= 0)
2677 xp.left = x;
2678 else
2679 xp.right = x;
2680 r = balanceInsertion(r, x);
2681 break;
2682 }
2683 }
2684 }
2685 }
2686 this.root = r;
2687 assert checkInvariants(root);
2688 }
2689
2690 /**
2691 * Acquires write lock for tree restructuring.
2692 */
2693 private final void lockRoot() {
2694 if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER))
2695 contendedLock(); // offload to separate method
2696 }
2697
2698 /**
2699 * Releases write lock for tree restructuring.
2700 */
2701 private final void unlockRoot() {
2702 lockState = 0;
2703 }
2704
2705 /**
2706 * Possibly blocks awaiting root lock.
2707 */
2708 private final void contendedLock() {
2709 boolean waiting = false;
2710 for (int s;;) {
2711 if (((s = lockState) & ~WAITER) == 0) {
2712 if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) {
2713 if (waiting)
2714 waiter = null;
2715 return;
2716 }
2717 }
2718 else if ((s & WAITER) == 0) {
2719 if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) {
2720 waiting = true;
2721 waiter = Thread.currentThread();
2722 }
2723 }
2724 else if (waiting)
2725 LockSupport.park(this);
2726 }
2727 }
2728
2729 /**
2730 * Returns matching node or null if none. Tries to search
2731 * using tree comparisons from root, but continues linear
2732 * search when lock not available.
2733 */
2734 final Node<K,V> find(int h, Object k) {
2735 if (k != null) {
2736 for (Node<K,V> e = first; e != null; ) {
2737 int s; K ek;
2738 if (((s = lockState) & (WAITER|WRITER)) != 0) {
2739 if (e.hash == h &&
2740 ((ek = e.key) == k || (ek != null && k.equals(ek))))
2741 return e;
2742 e = e.next;
2743 }
2744 else if (U.compareAndSwapInt(this, LOCKSTATE, s,
2745 s + READER)) {
2746 TreeNode<K,V> r, p;
2747 try {
2748 p = ((r = root) == null ? null :
2749 r.findTreeNode(h, k, null));
2750 } finally {
2751 Thread w;
2752 int ls;
2753 do {} while (!U.compareAndSwapInt
2754 (this, LOCKSTATE,
2755 ls = lockState, ls - READER));
2756 if (ls == (READER|WAITER) && (w = waiter) != null)
2757 LockSupport.unpark(w);
2758 }
2759 return p;
2760 }
2761 }
2762 }
2763 return null;
2764 }
2765
2766 /**
2767 * Finds or adds a node.
2768 * @return null if added
2769 */
2770 final TreeNode<K,V> putTreeVal(int h, K k, V v) {
2771 Class<?> kc = null;
2772 boolean searched = false;
2773 for (TreeNode<K,V> p = root;;) {
2774 int dir, ph; K pk;
2775 if (p == null) {
2776 first = root = new TreeNode<K,V>(h, k, v, null, null);
2777 break;
2778 }
2779 else if ((ph = p.hash) > h)
2780 dir = -1;
2781 else if (ph < h)
2782 dir = 1;
2783 else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2784 return p;
2785 else if ((kc == null &&
2786 (kc = comparableClassFor(k)) == null) ||
2787 (dir = compareComparables(kc, k, pk)) == 0) {
2788 if (!searched) {
2789 TreeNode<K,V> q, ch;
2790 searched = true;
2791 if (((ch = p.left) != null &&
2792 (q = ch.findTreeNode(h, k, kc)) != null) ||
2793 ((ch = p.right) != null &&
2794 (q = ch.findTreeNode(h, k, kc)) != null))
2795 return q;
2796 }
2797 dir = tieBreakOrder(k, pk);
2798 }
2799
2800 TreeNode<K,V> xp = p;
2801 if ((p = (dir <= 0) ? p.left : p.right) == null) {
2802 TreeNode<K,V> x, f = first;
2803 first = x = new TreeNode<K,V>(h, k, v, f, xp);
2804 if (f != null)
2805 f.prev = x;
2806 if (dir <= 0)
2807 xp.left = x;
2808 else
2809 xp.right = x;
2810 if (!xp.red)
2811 x.red = true;
2812 else {
2813 lockRoot();
2814 try {
2815 root = balanceInsertion(root, x);
2816 } finally {
2817 unlockRoot();
2818 }
2819 }
2820 break;
2821 }
2822 }
2823 assert checkInvariants(root);
2824 return null;
2825 }
2826
2827 /**
2828 * Removes the given node, that must be present before this
2829 * call. This is messier than typical red-black deletion code
2830 * because we cannot swap the contents of an interior node
2831 * with a leaf successor that is pinned by "next" pointers
2832 * that are accessible independently of lock. So instead we
2833 * swap the tree linkages.
2834 *
2835 * @return true if now too small, so should be untreeified
2836 */
2837 final boolean removeTreeNode(TreeNode<K,V> p) {
2838 TreeNode<K,V> next = (TreeNode<K,V>)p.next;
2839 TreeNode<K,V> pred = p.prev; // unlink traversal pointers
2840 TreeNode<K,V> r, rl;
2841 if (pred == null)
2842 first = next;
2843 else
2844 pred.next = next;
2845 if (next != null)
2846 next.prev = pred;
2847 if (first == null) {
2848 root = null;
2849 return true;
2850 }
2851 if ((r = root) == null || r.right == null || // too small
2852 (rl = r.left) == null || rl.left == null)
2853 return true;
2854 lockRoot();
2855 try {
2856 TreeNode<K,V> replacement;
2857 TreeNode<K,V> pl = p.left;
2858 TreeNode<K,V> pr = p.right;
2859 if (pl != null && pr != null) {
2860 TreeNode<K,V> s = pr, sl;
2861 while ((sl = s.left) != null) // find successor
2862 s = sl;
2863 boolean c = s.red; s.red = p.red; p.red = c; // swap colors
2864 TreeNode<K,V> sr = s.right;
2865 TreeNode<K,V> pp = p.parent;
2866 if (s == pr) { // p was s's direct parent
2867 p.parent = s;
2868 s.right = p;
2869 }
2870 else {
2871 TreeNode<K,V> sp = s.parent;
2872 if ((p.parent = sp) != null) {
2873 if (s == sp.left)
2874 sp.left = p;
2875 else
2876 sp.right = p;
2877 }
2878 if ((s.right = pr) != null)
2879 pr.parent = s;
2880 }
2881 p.left = null;
2882 if ((p.right = sr) != null)
2883 sr.parent = p;
2884 if ((s.left = pl) != null)
2885 pl.parent = s;
2886 if ((s.parent = pp) == null)
2887 r = s;
2888 else if (p == pp.left)
2889 pp.left = s;
2890 else
2891 pp.right = s;
2892 if (sr != null)
2893 replacement = sr;
2894 else
2895 replacement = p;
2896 }
2897 else if (pl != null)
2898 replacement = pl;
2899 else if (pr != null)
2900 replacement = pr;
2901 else
2902 replacement = p;
2903 if (replacement != p) {
2904 TreeNode<K,V> pp = replacement.parent = p.parent;
2905 if (pp == null)
2906 r = replacement;
2907 else if (p == pp.left)
2908 pp.left = replacement;
2909 else
2910 pp.right = replacement;
2911 p.left = p.right = p.parent = null;
2912 }
2913
2914 root = (p.red) ? r : balanceDeletion(r, replacement);
2915
2916 if (p == replacement) { // detach pointers
2917 TreeNode<K,V> pp;
2918 if ((pp = p.parent) != null) {
2919 if (p == pp.left)
2920 pp.left = null;
2921 else if (p == pp.right)
2922 pp.right = null;
2923 p.parent = null;
2924 }
2925 }
2926 } finally {
2927 unlockRoot();
2928 }
2929 assert checkInvariants(root);
2930 return false;
2931 }
2932
2933 /* ------------------------------------------------------------ */
2934 // Red-black tree methods, all adapted from CLR
2935
2936 static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
2937 TreeNode<K,V> p) {
2938 TreeNode<K,V> r, pp, rl;
2939 if (p != null && (r = p.right) != null) {
2940 if ((rl = p.right = r.left) != null)
2941 rl.parent = p;
2942 if ((pp = r.parent = p.parent) == null)
2943 (root = r).red = false;
2944 else if (pp.left == p)
2945 pp.left = r;
2946 else
2947 pp.right = r;
2948 r.left = p;
2949 p.parent = r;
2950 }
2951 return root;
2952 }
2953
2954 static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
2955 TreeNode<K,V> p) {
2956 TreeNode<K,V> l, pp, lr;
2957 if (p != null && (l = p.left) != null) {
2958 if ((lr = p.left = l.right) != null)
2959 lr.parent = p;
2960 if ((pp = l.parent = p.parent) == null)
2961 (root = l).red = false;
2962 else if (pp.right == p)
2963 pp.right = l;
2964 else
2965 pp.left = l;
2966 l.right = p;
2967 p.parent = l;
2968 }
2969 return root;
2970 }
2971
2972 static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
2973 TreeNode<K,V> x) {
2974 x.red = true;
2975 for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
2976 if ((xp = x.parent) == null) {
2977 x.red = false;
2978 return x;
2979 }
2980 else if (!xp.red || (xpp = xp.parent) == null)
2981 return root;
2982 if (xp == (xppl = xpp.left)) {
2983 if ((xppr = xpp.right) != null && xppr.red) {
2984 xppr.red = false;
2985 xp.red = false;
2986 xpp.red = true;
2987 x = xpp;
2988 }
2989 else {
2990 if (x == xp.right) {
2991 root = rotateLeft(root, x = xp);
2992 xpp = (xp = x.parent) == null ? null : xp.parent;
2993 }
2994 if (xp != null) {
2995 xp.red = false;
2996 if (xpp != null) {
2997 xpp.red = true;
2998 root = rotateRight(root, xpp);
2999 }
3000 }
3001 }
3002 }
3003 else {
3004 if (xppl != null && xppl.red) {
3005 xppl.red = false;
3006 xp.red = false;
3007 xpp.red = true;
3008 x = xpp;
3009 }
3010 else {
3011 if (x == xp.left) {
3012 root = rotateRight(root, x = xp);
3013 xpp = (xp = x.parent) == null ? null : xp.parent;
3014 }
3015 if (xp != null) {
3016 xp.red = false;
3017 if (xpp != null) {
3018 xpp.red = true;
3019 root = rotateLeft(root, xpp);
3020 }
3021 }
3022 }
3023 }
3024 }
3025 }
3026
3027 static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
3028 TreeNode<K,V> x) {
3029 for (TreeNode<K,V> xp, xpl, xpr;;) {
3030 if (x == null || x == root)
3031 return root;
3032 else if ((xp = x.parent) == null) {
3033 x.red = false;
3034 return x;
3035 }
3036 else if (x.red) {
3037 x.red = false;
3038 return root;
3039 }
3040 else if ((xpl = xp.left) == x) {
3041 if ((xpr = xp.right) != null && xpr.red) {
3042 xpr.red = false;
3043 xp.red = true;
3044 root = rotateLeft(root, xp);
3045 xpr = (xp = x.parent) == null ? null : xp.right;
3046 }
3047 if (xpr == null)
3048 x = xp;
3049 else {
3050 TreeNode<K,V> sl = xpr.left, sr = xpr.right;
3051 if ((sr == null || !sr.red) &&
3052 (sl == null || !sl.red)) {
3053 xpr.red = true;
3054 x = xp;
3055 }
3056 else {
3057 if (sr == null || !sr.red) {
3058 if (sl != null)
3059 sl.red = false;
3060 xpr.red = true;
3061 root = rotateRight(root, xpr);
3062 xpr = (xp = x.parent) == null ?
3063 null : xp.right;
3064 }
3065 if (xpr != null) {
3066 xpr.red = (xp == null) ? false : xp.red;
3067 if ((sr = xpr.right) != null)
3068 sr.red = false;
3069 }
3070 if (xp != null) {
3071 xp.red = false;
3072 root = rotateLeft(root, xp);
3073 }
3074 x = root;
3075 }
3076 }
3077 }
3078 else { // symmetric
3079 if (xpl != null && xpl.red) {
3080 xpl.red = false;
3081 xp.red = true;
3082 root = rotateRight(root, xp);
3083 xpl = (xp = x.parent) == null ? null : xp.left;
3084 }
3085 if (xpl == null)
3086 x = xp;
3087 else {
3088 TreeNode<K,V> sl = xpl.left, sr = xpl.right;
3089 if ((sl == null || !sl.red) &&
3090 (sr == null || !sr.red)) {
3091 xpl.red = true;
3092 x = xp;
3093 }
3094 else {
3095 if (sl == null || !sl.red) {
3096 if (sr != null)
3097 sr.red = false;
3098 xpl.red = true;
3099 root = rotateLeft(root, xpl);
3100 xpl = (xp = x.parent) == null ?
3101 null : xp.left;
3102 }
3103 if (xpl != null) {
3104 xpl.red = (xp == null) ? false : xp.red;
3105 if ((sl = xpl.left) != null)
3106 sl.red = false;
3107 }
3108 if (xp != null) {
3109 xp.red = false;
3110 root = rotateRight(root, xp);
3111 }
3112 x = root;
3113 }
3114 }
3115 }
3116 }
3117 }
3118
3119 /**
3120 * Recursive invariant check
3121 */
3122 static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
3123 TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
3124 tb = t.prev, tn = (TreeNode<K,V>)t.next;
3125 if (tb != null && tb.next != t)
3126 return false;
3127 if (tn != null && tn.prev != t)
3128 return false;
3129 if (tp != null && t != tp.left && t != tp.right)
3130 return false;
3131 if (tl != null && (tl.parent != t || tl.hash > t.hash))
3132 return false;
3133 if (tr != null && (tr.parent != t || tr.hash < t.hash))
3134 return false;
3135 if (t.red && tl != null && tl.red && tr != null && tr.red)
3136 return false;
3137 if (tl != null && !checkInvariants(tl))
3138 return false;
3139 if (tr != null && !checkInvariants(tr))
3140 return false;
3141 return true;
3142 }
3143
3144 private static final sun.misc.Unsafe U;
3145 private static final long LOCKSTATE;
3146 static {
3147 try {
3148 U = getUnsafe();
3149 Class<?> k = TreeBin.class;
3150 LOCKSTATE = U.objectFieldOffset
3151 (k.getDeclaredField("lockState"));
3152 } catch (Exception e) {
3153 throw new Error(e);
3154 }
3155 }
3156 }
3157
3158 /* ----------------Table Traversal -------------- */
3159
3160 /**
3161 * Records the table, its length, and current traversal index for a
3162 * traverser that must process a region of a forwarded table before
3163 * proceeding with current table.
3164 */
3165 static final class TableStack<K,V> {
3166 int length;
3167 int index;
3168 Node<K,V>[] tab;
3169 TableStack<K,V> next;
3170 }
3171
3172 /**
3173 * Encapsulates traversal for methods such as containsValue; also
3174 * serves as a base class for other iterators and spliterators.
3175 *
3176 * Method advance visits once each still-valid node that was
3177 * reachable upon iterator construction. It might miss some that
3178 * were added to a bin after the bin was visited, which is OK wrt
3179 * consistency guarantees. Maintaining this property in the face
3180 * of possible ongoing resizes requires a fair amount of
3181 * bookkeeping state that is difficult to optimize away amidst
3182 * volatile accesses. Even so, traversal maintains reasonable
3183 * throughput.
3184 *
3185 * Normally, iteration proceeds bin-by-bin traversing lists.
3186 * However, if the table has been resized, then all future steps
3187 * must traverse both the bin at the current index as well as at
3188 * (index + baseSize); and so on for further resizings. To
3189 * paranoically cope with potential sharing by users of iterators
3190 * across threads, iteration terminates if a bounds checks fails
3191 * for a table read.
3192 */
3193 static class Traverser<K,V> {
3194 Node<K,V>[] tab; // current table; updated if resized
3195 Node<K,V> next; // the next entry to use
3196 TableStack<K,V> stack, spare; // to save/restore on ForwardingNodes
3197 int index; // index of bin to use next
3198 int baseIndex; // current index of initial table
3199 int baseLimit; // index bound for initial table
3200 final int baseSize; // initial table size
3201
3202 Traverser(Node<K,V>[] tab, int size, int index, int limit) {
3203 this.tab = tab;
3204 this.baseSize = size;
3205 this.baseIndex = this.index = index;
3206 this.baseLimit = limit;
3207 this.next = null;
3208 }
3209
3210 /**
3211 * Advances if possible, returning next valid node, or null if none.
3212 */
3213 final Node<K,V> advance() {
3214 Node<K,V> e;
3215 if ((e = next) != null)
3216 e = e.next;
3217 for (;;) {
3218 Node<K,V>[] t; int i, n; // must use locals in checks
3219 if (e != null)
3220 return next = e;
3221 if (baseIndex >= baseLimit || (t = tab) == null ||
3222 (n = t.length) <= (i = index) || i < 0)
3223 return next = null;
3224 if ((e = tabAt(t, i)) != null && e.hash < 0) {
3225 if (e instanceof ForwardingNode) {
3226 tab = ((ForwardingNode<K,V>)e).nextTable;
3227 e = null;
3228 pushState(t, i, n);
3229 continue;
3230 }
3231 else if (e instanceof TreeBin)
3232 e = ((TreeBin<K,V>)e).first;
3233 else
3234 e = null;
3235 }
3236 if (stack != null)
3237 recoverState(n);
3238 else if ((index = i + baseSize) >= n)
3239 index = ++baseIndex; // visit upper slots if present
3240 }
3241 }
3242
3243 /**
3244 * Saves traversal state upon encountering a forwarding node.
3245 */
3246 private void pushState(Node<K,V>[] t, int i, int n) {
3247 TableStack<K,V> s = spare; // reuse if possible
3248 if (s != null)
3249 spare = s.next;
3250 else
3251 s = new TableStack<K,V>();
3252 s.tab = t;
3253 s.length = n;
3254 s.index = i;
3255 s.next = stack;
3256 stack = s;
3257 }
3258
3259 /**
3260 * Possibly pops traversal state.
3261 *
3262 * @param n length of current table
3263 */
3264 private void recoverState(int n) {
3265 TableStack<K,V> s; int len;
3266 while ((s = stack) != null && (index += (len = s.length)) >= n) {
3267 n = len;
3268 index = s.index;
3269 tab = s.tab;
3270 s.tab = null;
3271 TableStack<K,V> next = s.next;
3272 s.next = spare; // save for reuse
3273 stack = next;
3274 spare = s;
3275 }
3276 if (s == null && (index += baseSize) >= n)
3277 index = ++baseIndex;
3278 }
3279 }
3280
3281 /**
3282 * Base of key, value, and entry Iterators. Adds fields to
3283 * Traverser to support iterator.remove.
3284 */
3285 static class BaseIterator<K,V> extends Traverser<K,V> {
3286 final ConcurrentHashMapV8<K,V> map;
3287 Node<K,V> lastReturned;
3288 BaseIterator(Node<K,V>[] tab, int size, int index, int limit,
3289 ConcurrentHashMapV8<K,V> map) {
3290 super(tab, size, index, limit);
3291 this.map = map;
3292 advance();
3293 }
3294
3295 public final boolean hasNext() { return next != null; }
3296 public final boolean hasMoreElements() { return next != null; }
3297
3298 public final void remove() {
3299 Node<K,V> p;
3300 if ((p = lastReturned) == null)
3301 throw new IllegalStateException();
3302 lastReturned = null;
3303 map.replaceNode(p.key, null, null);
3304 }
3305 }
3306
3307 static final class KeyIterator<K,V> extends BaseIterator<K,V>
3308 implements Iterator<K>, Enumeration<K> {
3309 KeyIterator(Node<K,V>[] tab, int index, int size, int limit,
3310 ConcurrentHashMapV8<K,V> map) {
3311 super(tab, index, size, limit, map);
3312 }
3313
3314 public final K next() {
3315 Node<K,V> p;
3316 if ((p = next) == null)
3317 throw new NoSuchElementException();
3318 K k = p.key;
3319 lastReturned = p;
3320 advance();
3321 return k;
3322 }
3323
3324 public final K nextElement() { return next(); }
3325 }
3326
3327 static final class ValueIterator<K,V> extends BaseIterator<K,V>
3328 implements Iterator<V>, Enumeration<V> {
3329 ValueIterator(Node<K,V>[] tab, int index, int size, int limit,
3330 ConcurrentHashMapV8<K,V> map) {
3331 super(tab, index, size, limit, map);
3332 }
3333
3334 public final V next() {
3335 Node<K,V> p;
3336 if ((p = next) == null)
3337 throw new NoSuchElementException();
3338 V v = p.val;
3339 lastReturned = p;
3340 advance();
3341 return v;
3342 }
3343
3344 public final V nextElement() { return next(); }
3345 }
3346
3347 static final class EntryIterator<K,V> extends BaseIterator<K,V>
3348 implements Iterator<Map.Entry<K,V>> {
3349 EntryIterator(Node<K,V>[] tab, int index, int size, int limit,
3350 ConcurrentHashMapV8<K,V> map) {
3351 super(tab, index, size, limit, map);
3352 }
3353
3354 public final Map.Entry<K,V> next() {
3355 Node<K,V> p;
3356 if ((p = next) == null)
3357 throw new NoSuchElementException();
3358 K k = p.key;
3359 V v = p.val;
3360 lastReturned = p;
3361 advance();
3362 return new MapEntry<K,V>(k, v, map);
3363 }
3364 }
3365
3366 /**
3367 * Exported Entry for EntryIterator
3368 */
3369 static final class MapEntry<K,V> implements Map.Entry<K,V> {
3370 final K key; // non-null
3371 V val; // non-null
3372 final ConcurrentHashMapV8<K,V> map;
3373 MapEntry(K key, V val, ConcurrentHashMapV8<K,V> map) {
3374 this.key = key;
3375 this.val = val;
3376 this.map = map;
3377 }
3378 public K getKey() { return key; }
3379 public V getValue() { return val; }
3380 public int hashCode() { return key.hashCode() ^ val.hashCode(); }
3381 public String toString() { return key + "=" + val; }
3382
3383 public boolean equals(Object o) {
3384 Object k, v; Map.Entry<?,?> e;
3385 return ((o instanceof Map.Entry) &&
3386 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3387 (v = e.getValue()) != null &&
3388 (k == key || k.equals(key)) &&
3389 (v == val || v.equals(val)));
3390 }
3391
3392 /**
3393 * Sets our entry's value and writes through to the map. The
3394 * value to return is somewhat arbitrary here. Since we do not
3395 * necessarily track asynchronous changes, the most recent
3396 * "previous" value could be different from what we return (or
3397 * could even have been removed, in which case the put will
3398 * re-establish). We do not and cannot guarantee more.
3399 */
3400 public V setValue(V value) {
3401 if (value == null) throw new NullPointerException();
3402 V v = val;
3403 val = value;
3404 map.put(key, value);
3405 return v;
3406 }
3407 }
3408
3409 static final class KeySpliterator<K,V> extends Traverser<K,V>
3410 implements ConcurrentHashMapSpliterator<K> {
3411 long est; // size estimate
3412 KeySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3413 long est) {
3414 super(tab, size, index, limit);
3415 this.est = est;
3416 }
3417
3418 public ConcurrentHashMapSpliterator<K> trySplit() {
3419 int i, f, h;
3420 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3421 new KeySpliterator<K,V>(tab, baseSize, baseLimit = h,
3422 f, est >>>= 1);
3423 }
3424
3425 public void forEachRemaining(Action<? super K> action) {
3426 if (action == null) throw new NullPointerException();
3427 for (Node<K,V> p; (p = advance()) != null;)
3428 action.apply(p.key);
3429 }
3430
3431 public boolean tryAdvance(Action<? super K> action) {
3432 if (action == null) throw new NullPointerException();
3433 Node<K,V> p;
3434 if ((p = advance()) == null)
3435 return false;
3436 action.apply(p.key);
3437 return true;
3438 }
3439
3440 public long estimateSize() { return est; }
3441
3442 }
3443
3444 static final class ValueSpliterator<K,V> extends Traverser<K,V>
3445 implements ConcurrentHashMapSpliterator<V> {
3446 long est; // size estimate
3447 ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit,
3448 long est) {
3449 super(tab, size, index, limit);
3450 this.est = est;
3451 }
3452
3453 public ConcurrentHashMapSpliterator<V> trySplit() {
3454 int i, f, h;
3455 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3456 new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h,
3457 f, est >>>= 1);
3458 }
3459
3460 public void forEachRemaining(Action<? super V> action) {
3461 if (action == null) throw new NullPointerException();
3462 for (Node<K,V> p; (p = advance()) != null;)
3463 action.apply(p.val);
3464 }
3465
3466 public boolean tryAdvance(Action<? super V> action) {
3467 if (action == null) throw new NullPointerException();
3468 Node<K,V> p;
3469 if ((p = advance()) == null)
3470 return false;
3471 action.apply(p.val);
3472 return true;
3473 }
3474
3475 public long estimateSize() { return est; }
3476
3477 }
3478
3479 static final class EntrySpliterator<K,V> extends Traverser<K,V>
3480 implements ConcurrentHashMapSpliterator<Map.Entry<K,V>> {
3481 final ConcurrentHashMapV8<K,V> map; // To export MapEntry
3482 long est; // size estimate
3483 EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3484 long est, ConcurrentHashMapV8<K,V> map) {
3485 super(tab, size, index, limit);
3486 this.map = map;
3487 this.est = est;
3488 }
3489
3490 public ConcurrentHashMapSpliterator<Map.Entry<K,V>> trySplit() {
3491 int i, f, h;
3492 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3493 new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h,
3494 f, est >>>= 1, map);
3495 }
3496
3497 public void forEachRemaining(Action<? super Map.Entry<K,V>> action) {
3498 if (action == null) throw new NullPointerException();
3499 for (Node<K,V> p; (p = advance()) != null; )
3500 action.apply(new MapEntry<K,V>(p.key, p.val, map));
3501 }
3502
3503 public boolean tryAdvance(Action<? super Map.Entry<K,V>> action) {
3504 if (action == null) throw new NullPointerException();
3505 Node<K,V> p;
3506 if ((p = advance()) == null)
3507 return false;
3508 action.apply(new MapEntry<K,V>(p.key, p.val, map));
3509 return true;
3510 }
3511
3512 public long estimateSize() { return est; }
3513
3514 }
3515
3516 // Parallel bulk operations
3517
3518 /**
3519 * Computes initial batch value for bulk tasks. The returned value
3520 * is approximately exp2 of the number of times (minus one) to
3521 * split task by two before executing leaf action. This value is
3522 * faster to compute and more convenient to use as a guide to
3523 * splitting than is the depth, since it is used while dividing by
3524 * two anyway.
3525 */
3526 final int batchFor(long b) {
3527 long n;
3528 if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b)
3529 return 0;
3530 int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4
3531 return (b <= 0L || (n /= b) >= sp) ? sp : (int)n;
3532 }
3533
3534 /**
3535 * Performs the given action for each (key, value).
3536 *
3537 * @param parallelismThreshold the (estimated) number of elements
3538 * needed for this operation to be executed in parallel
3539 * @param action the action
3540 * @since 1.8
3541 */
3542 public void forEach(long parallelismThreshold,
3543 BiAction<? super K,? super V> action) {
3544 if (action == null) throw new NullPointerException();
3545 new ForEachMappingTask<K,V>
3546 (null, batchFor(parallelismThreshold), 0, 0, table,
3547 action).invoke();
3548 }
3549
3550 /**
3551 * Performs the given action for each non-null transformation
3552 * of each (key, value).
3553 *
3554 * @param parallelismThreshold the (estimated) number of elements
3555 * needed for this operation to be executed in parallel
3556 * @param transformer a function returning the transformation
3557 * for an element, or null if there is no transformation (in
3558 * which case the action is not applied)
3559 * @param action the action
3560 * @since 1.8
3561 */
3562 public <U> void forEach(long parallelismThreshold,
3563 BiFun<? super K, ? super V, ? extends U> transformer,
3564 Action<? super U> action) {
3565 if (transformer == null || action == null)
3566 throw new NullPointerException();
3567 new ForEachTransformedMappingTask<K,V,U>
3568 (null, batchFor(parallelismThreshold), 0, 0, table,
3569 transformer, action).invoke();
3570 }
3571
3572 /**
3573 * Returns a non-null result from applying the given search
3574 * function on each (key, value), or null if none. Upon
3575 * success, further element processing is suppressed and the
3576 * results of any other parallel invocations of the search
3577 * function are ignored.
3578 *
3579 * @param parallelismThreshold the (estimated) number of elements
3580 * needed for this operation to be executed in parallel
3581 * @param searchFunction a function returning a non-null
3582 * result on success, else null
3583 * @return a non-null result from applying the given search
3584 * function on each (key, value), or null if none
3585 * @since 1.8
3586 */
3587 public <U> U search(long parallelismThreshold,
3588 BiFun<? super K, ? super V, ? extends U> searchFunction) {
3589 if (searchFunction == null) throw new NullPointerException();
3590 return new SearchMappingsTask<K,V,U>
3591 (null, batchFor(parallelismThreshold), 0, 0, table,
3592 searchFunction, new AtomicReference<U>()).invoke();
3593 }
3594
3595 /**
3596 * Returns the result of accumulating the given transformation
3597 * of all (key, value) pairs using the given reducer to
3598 * combine values, or null if none.
3599 *
3600 * @param parallelismThreshold the (estimated) number of elements
3601 * needed for this operation to be executed in parallel
3602 * @param transformer a function returning the transformation
3603 * for an element, or null if there is no transformation (in
3604 * which case it is not combined)
3605 * @param reducer a commutative associative combining function
3606 * @return the result of accumulating the given transformation
3607 * of all (key, value) pairs
3608 * @since 1.8
3609 */
3610 public <U> U reduce(long parallelismThreshold,
3611 BiFun<? super K, ? super V, ? extends U> transformer,
3612 BiFun<? super U, ? super U, ? extends U> reducer) {
3613 if (transformer == null || reducer == null)
3614 throw new NullPointerException();
3615 return new MapReduceMappingsTask<K,V,U>
3616 (null, batchFor(parallelismThreshold), 0, 0, table,
3617 null, transformer, reducer).invoke();
3618 }
3619
3620 /**
3621 * Returns the result of accumulating the given transformation
3622 * of all (key, value) pairs using the given reducer to
3623 * combine values, and the given basis as an identity value.
3624 *
3625 * @param parallelismThreshold the (estimated) number of elements
3626 * needed for this operation to be executed in parallel
3627 * @param transformer a function returning the transformation
3628 * for an element
3629 * @param basis the identity (initial default value) for the reduction
3630 * @param reducer a commutative associative combining function
3631 * @return the result of accumulating the given transformation
3632 * of all (key, value) pairs
3633 * @since 1.8
3634 */
3635 public double reduceToDouble(long parallelismThreshold,
3636 ObjectByObjectToDouble<? super K, ? super V> transformer,
3637 double basis,
3638 DoubleByDoubleToDouble reducer) {
3639 if (transformer == null || reducer == null)
3640 throw new NullPointerException();
3641 return new MapReduceMappingsToDoubleTask<K,V>
3642 (null, batchFor(parallelismThreshold), 0, 0, table,
3643 null, transformer, basis, reducer).invoke();
3644 }
3645
3646 /**
3647 * Returns the result of accumulating the given transformation
3648 * of all (key, value) pairs using the given reducer to
3649 * combine values, and the given basis as an identity value.
3650 *
3651 * @param parallelismThreshold the (estimated) number of elements
3652 * needed for this operation to be executed in parallel
3653 * @param transformer a function returning the transformation
3654 * for an element
3655 * @param basis the identity (initial default value) for the reduction
3656 * @param reducer a commutative associative combining function
3657 * @return the result of accumulating the given transformation
3658 * of all (key, value) pairs
3659 * @since 1.8
3660 */
3661 public long reduceToLong(long parallelismThreshold,
3662 ObjectByObjectToLong<? super K, ? super V> transformer,
3663 long basis,
3664 LongByLongToLong reducer) {
3665 if (transformer == null || reducer == null)
3666 throw new NullPointerException();
3667 return new MapReduceMappingsToLongTask<K,V>
3668 (null, batchFor(parallelismThreshold), 0, 0, table,
3669 null, transformer, basis, reducer).invoke();
3670 }
3671
3672 /**
3673 * Returns the result of accumulating the given transformation
3674 * of all (key, value) pairs using the given reducer to
3675 * combine values, and the given basis as an identity value.
3676 *
3677 * @param parallelismThreshold the (estimated) number of elements
3678 * needed for this operation to be executed in parallel
3679 * @param transformer a function returning the transformation
3680 * for an element
3681 * @param basis the identity (initial default value) for the reduction
3682 * @param reducer a commutative associative combining function
3683 * @return the result of accumulating the given transformation
3684 * of all (key, value) pairs
3685 * @since 1.8
3686 */
3687 public int reduceToInt(long parallelismThreshold,
3688 ObjectByObjectToInt<? super K, ? super V> transformer,
3689 int basis,
3690 IntByIntToInt reducer) {
3691 if (transformer == null || reducer == null)
3692 throw new NullPointerException();
3693 return new MapReduceMappingsToIntTask<K,V>
3694 (null, batchFor(parallelismThreshold), 0, 0, table,
3695 null, transformer, basis, reducer).invoke();
3696 }
3697
3698 /**
3699 * Performs the given action for each key.
3700 *
3701 * @param parallelismThreshold the (estimated) number of elements
3702 * needed for this operation to be executed in parallel
3703 * @param action the action
3704 * @since 1.8
3705 */
3706 public void forEachKey(long parallelismThreshold,
3707 Action<? super K> action) {
3708 if (action == null) throw new NullPointerException();
3709 new ForEachKeyTask<K,V>
3710 (null, batchFor(parallelismThreshold), 0, 0, table,
3711 action).invoke();
3712 }
3713
3714 /**
3715 * Performs the given action for each non-null transformation
3716 * of each key.
3717 *
3718 * @param parallelismThreshold the (estimated) number of elements
3719 * needed for this operation to be executed in parallel
3720 * @param transformer a function returning the transformation
3721 * for an element, or null if there is no transformation (in
3722 * which case the action is not applied)
3723 * @param action the action
3724 * @since 1.8
3725 */
3726 public <U> void forEachKey(long parallelismThreshold,
3727 Fun<? super K, ? extends U> transformer,
3728 Action<? super U> action) {
3729 if (transformer == null || action == null)
3730 throw new NullPointerException();
3731 new ForEachTransformedKeyTask<K,V,U>
3732 (null, batchFor(parallelismThreshold), 0, 0, table,
3733 transformer, action).invoke();
3734 }
3735
3736 /**
3737 * Returns a non-null result from applying the given search
3738 * function on each key, or null if none. Upon success,
3739 * further element processing is suppressed and the results of
3740 * any other parallel invocations of the search function are
3741 * ignored.
3742 *
3743 * @param parallelismThreshold the (estimated) number of elements
3744 * needed for this operation to be executed in parallel
3745 * @param searchFunction a function returning a non-null
3746 * result on success, else null
3747 * @return a non-null result from applying the given search
3748 * function on each key, or null if none
3749 * @since 1.8
3750 */
3751 public <U> U searchKeys(long parallelismThreshold,
3752 Fun<? super K, ? extends U> searchFunction) {
3753 if (searchFunction == null) throw new NullPointerException();
3754 return new SearchKeysTask<K,V,U>
3755 (null, batchFor(parallelismThreshold), 0, 0, table,
3756 searchFunction, new AtomicReference<U>()).invoke();
3757 }
3758
3759 /**
3760 * Returns the result of accumulating all keys using the given
3761 * reducer to combine values, or null if none.
3762 *
3763 * @param parallelismThreshold the (estimated) number of elements
3764 * needed for this operation to be executed in parallel
3765 * @param reducer a commutative associative combining function
3766 * @return the result of accumulating all keys using the given
3767 * reducer to combine values, or null if none
3768 * @since 1.8
3769 */
3770 public K reduceKeys(long parallelismThreshold,
3771 BiFun<? super K, ? super K, ? extends K> reducer) {
3772 if (reducer == null) throw new NullPointerException();
3773 return new ReduceKeysTask<K,V>
3774 (null, batchFor(parallelismThreshold), 0, 0, table,
3775 null, reducer).invoke();
3776 }
3777
3778 /**
3779 * Returns the result of accumulating the given transformation
3780 * of all keys using the given reducer to combine values, or
3781 * null if none.
3782 *
3783 * @param parallelismThreshold the (estimated) number of elements
3784 * needed for this operation to be executed in parallel
3785 * @param transformer a function returning the transformation
3786 * for an element, or null if there is no transformation (in
3787 * which case it is not combined)
3788 * @param reducer a commutative associative combining function
3789 * @return the result of accumulating the given transformation
3790 * of all keys
3791 * @since 1.8
3792 */
3793 public <U> U reduceKeys(long parallelismThreshold,
3794 Fun<? super K, ? extends U> transformer,
3795 BiFun<? super U, ? super U, ? extends U> reducer) {
3796 if (transformer == null || reducer == null)
3797 throw new NullPointerException();
3798 return new MapReduceKeysTask<K,V,U>
3799 (null, batchFor(parallelismThreshold), 0, 0, table,
3800 null, transformer, reducer).invoke();
3801 }
3802
3803 /**
3804 * Returns the result of accumulating the given transformation
3805 * of all keys using the given reducer to combine values, and
3806 * the given basis as an identity value.
3807 *
3808 * @param parallelismThreshold the (estimated) number of elements
3809 * needed for this operation to be executed in parallel
3810 * @param transformer a function returning the transformation
3811 * for an element
3812 * @param basis the identity (initial default value) for the reduction
3813 * @param reducer a commutative associative combining function
3814 * @return the result of accumulating the given transformation
3815 * of all keys
3816 * @since 1.8
3817 */
3818 public double reduceKeysToDouble(long parallelismThreshold,
3819 ObjectToDouble<? super K> transformer,
3820 double basis,
3821 DoubleByDoubleToDouble reducer) {
3822 if (transformer == null || reducer == null)
3823 throw new NullPointerException();
3824 return new MapReduceKeysToDoubleTask<K,V>
3825 (null, batchFor(parallelismThreshold), 0, 0, table,
3826 null, transformer, basis, reducer).invoke();
3827 }
3828
3829 /**
3830 * Returns the result of accumulating the given transformation
3831 * of all keys using the given reducer to combine values, and
3832 * the given basis as an identity value.
3833 *
3834 * @param parallelismThreshold the (estimated) number of elements
3835 * needed for this operation to be executed in parallel
3836 * @param transformer a function returning the transformation
3837 * for an element
3838 * @param basis the identity (initial default value) for the reduction
3839 * @param reducer a commutative associative combining function
3840 * @return the result of accumulating the given transformation
3841 * of all keys
3842 * @since 1.8
3843 */
3844 public long reduceKeysToLong(long parallelismThreshold,
3845 ObjectToLong<? super K> transformer,
3846 long basis,
3847 LongByLongToLong reducer) {
3848 if (transformer == null || reducer == null)
3849 throw new NullPointerException();
3850 return new MapReduceKeysToLongTask<K,V>
3851 (null, batchFor(parallelismThreshold), 0, 0, table,
3852 null, transformer, basis, reducer).invoke();
3853 }
3854
3855 /**
3856 * Returns the result of accumulating the given transformation
3857 * of all keys using the given reducer to combine values, and
3858 * the given basis as an identity value.
3859 *
3860 * @param parallelismThreshold the (estimated) number of elements
3861 * needed for this operation to be executed in parallel
3862 * @param transformer a function returning the transformation
3863 * for an element
3864 * @param basis the identity (initial default value) for the reduction
3865 * @param reducer a commutative associative combining function
3866 * @return the result of accumulating the given transformation
3867 * of all keys
3868 * @since 1.8
3869 */
3870 public int reduceKeysToInt(long parallelismThreshold,
3871 ObjectToInt<? super K> transformer,
3872 int basis,
3873 IntByIntToInt reducer) {
3874 if (transformer == null || reducer == null)
3875 throw new NullPointerException();
3876 return new MapReduceKeysToIntTask<K,V>
3877 (null, batchFor(parallelismThreshold), 0, 0, table,
3878 null, transformer, basis, reducer).invoke();
3879 }
3880
3881 /**
3882 * Performs the given action for each value.
3883 *
3884 * @param parallelismThreshold the (estimated) number of elements
3885 * needed for this operation to be executed in parallel
3886 * @param action the action
3887 * @since 1.8
3888 */
3889 public void forEachValue(long parallelismThreshold,
3890 Action<? super V> action) {
3891 if (action == null)
3892 throw new NullPointerException();
3893 new ForEachValueTask<K,V>
3894 (null, batchFor(parallelismThreshold), 0, 0, table,
3895 action).invoke();
3896 }
3897
3898 /**
3899 * Performs the given action for each non-null transformation
3900 * of each value.
3901 *
3902 * @param parallelismThreshold the (estimated) number of elements
3903 * needed for this operation to be executed in parallel
3904 * @param transformer a function returning the transformation
3905 * for an element, or null if there is no transformation (in
3906 * which case the action is not applied)
3907 * @param action the action
3908 * @since 1.8
3909 */
3910 public <U> void forEachValue(long parallelismThreshold,
3911 Fun<? super V, ? extends U> transformer,
3912 Action<? super U> action) {
3913 if (transformer == null || action == null)
3914 throw new NullPointerException();
3915 new ForEachTransformedValueTask<K,V,U>
3916 (null, batchFor(parallelismThreshold), 0, 0, table,
3917 transformer, action).invoke();
3918 }
3919
3920 /**
3921 * Returns a non-null result from applying the given search
3922 * function on each value, or null if none. Upon success,
3923 * further element processing is suppressed and the results of
3924 * any other parallel invocations of the search function are
3925 * ignored.
3926 *
3927 * @param parallelismThreshold the (estimated) number of elements
3928 * needed for this operation to be executed in parallel
3929 * @param searchFunction a function returning a non-null
3930 * result on success, else null
3931 * @return a non-null result from applying the given search
3932 * function on each value, or null if none
3933 * @since 1.8
3934 */
3935 public <U> U searchValues(long parallelismThreshold,
3936 Fun<? super V, ? extends U> searchFunction) {
3937 if (searchFunction == null) throw new NullPointerException();
3938 return new SearchValuesTask<K,V,U>
3939 (null, batchFor(parallelismThreshold), 0, 0, table,
3940 searchFunction, new AtomicReference<U>()).invoke();
3941 }
3942
3943 /**
3944 * Returns the result of accumulating all values using the
3945 * given reducer to combine values, or null if none.
3946 *
3947 * @param parallelismThreshold the (estimated) number of elements
3948 * needed for this operation to be executed in parallel
3949 * @param reducer a commutative associative combining function
3950 * @return the result of accumulating all values
3951 * @since 1.8
3952 */
3953 public V reduceValues(long parallelismThreshold,
3954 BiFun<? super V, ? super V, ? extends V> reducer) {
3955 if (reducer == null) throw new NullPointerException();
3956 return new ReduceValuesTask<K,V>
3957 (null, batchFor(parallelismThreshold), 0, 0, table,
3958 null, reducer).invoke();
3959 }
3960
3961 /**
3962 * Returns the result of accumulating the given transformation
3963 * of all values using the given reducer to combine values, or
3964 * null if none.
3965 *
3966 * @param parallelismThreshold the (estimated) number of elements
3967 * needed for this operation to be executed in parallel
3968 * @param transformer a function returning the transformation
3969 * for an element, or null if there is no transformation (in
3970 * which case it is not combined)
3971 * @param reducer a commutative associative combining function
3972 * @return the result of accumulating the given transformation
3973 * of all values
3974 * @since 1.8
3975 */
3976 public <U> U reduceValues(long parallelismThreshold,
3977 Fun<? super V, ? extends U> transformer,
3978 BiFun<? super U, ? super U, ? extends U> reducer) {
3979 if (transformer == null || reducer == null)
3980 throw new NullPointerException();
3981 return new MapReduceValuesTask<K,V,U>
3982 (null, batchFor(parallelismThreshold), 0, 0, table,
3983 null, transformer, reducer).invoke();
3984 }
3985
3986 /**
3987 * Returns the result of accumulating the given transformation
3988 * of all values using the given reducer to combine values,
3989 * and the given basis as an identity value.
3990 *
3991 * @param parallelismThreshold the (estimated) number of elements
3992 * needed for this operation to be executed in parallel
3993 * @param transformer a function returning the transformation
3994 * for an element
3995 * @param basis the identity (initial default value) for the reduction
3996 * @param reducer a commutative associative combining function
3997 * @return the result of accumulating the given transformation
3998 * of all values
3999 * @since 1.8
4000 */
4001 public double reduceValuesToDouble(long parallelismThreshold,
4002 ObjectToDouble<? super V> transformer,
4003 double basis,
4004 DoubleByDoubleToDouble reducer) {
4005 if (transformer == null || reducer == null)
4006 throw new NullPointerException();
4007 return new MapReduceValuesToDoubleTask<K,V>
4008 (null, batchFor(parallelismThreshold), 0, 0, table,
4009 null, transformer, basis, reducer).invoke();
4010 }
4011
4012 /**
4013 * Returns the result of accumulating the given transformation
4014 * of all values using the given reducer to combine values,
4015 * and the given basis as an identity value.
4016 *
4017 * @param parallelismThreshold the (estimated) number of elements
4018 * needed for this operation to be executed in parallel
4019 * @param transformer a function returning the transformation
4020 * for an element
4021 * @param basis the identity (initial default value) for the reduction
4022 * @param reducer a commutative associative combining function
4023 * @return the result of accumulating the given transformation
4024 * of all values
4025 * @since 1.8
4026 */
4027 public long reduceValuesToLong(long parallelismThreshold,
4028 ObjectToLong<? super V> transformer,
4029 long basis,
4030 LongByLongToLong reducer) {
4031 if (transformer == null || reducer == null)
4032 throw new NullPointerException();
4033 return new MapReduceValuesToLongTask<K,V>
4034 (null, batchFor(parallelismThreshold), 0, 0, table,
4035 null, transformer, basis, reducer).invoke();
4036 }
4037
4038 /**
4039 * Returns the result of accumulating the given transformation
4040 * of all values using the given reducer to combine values,
4041 * and the given basis as an identity value.
4042 *
4043 * @param parallelismThreshold the (estimated) number of elements
4044 * needed for this operation to be executed in parallel
4045 * @param transformer a function returning the transformation
4046 * for an element
4047 * @param basis the identity (initial default value) for the reduction
4048 * @param reducer a commutative associative combining function
4049 * @return the result of accumulating the given transformation
4050 * of all values
4051 * @since 1.8
4052 */
4053 public int reduceValuesToInt(long parallelismThreshold,
4054 ObjectToInt<? super V> transformer,
4055 int basis,
4056 IntByIntToInt reducer) {
4057 if (transformer == null || reducer == null)
4058 throw new NullPointerException();
4059 return new MapReduceValuesToIntTask<K,V>
4060 (null, batchFor(parallelismThreshold), 0, 0, table,
4061 null, transformer, basis, reducer).invoke();
4062 }
4063
4064 /**
4065 * Performs the given action for each entry.
4066 *
4067 * @param parallelismThreshold the (estimated) number of elements
4068 * needed for this operation to be executed in parallel
4069 * @param action the action
4070 * @since 1.8
4071 */
4072 public void forEachEntry(long parallelismThreshold,
4073 Action<? super Map.Entry<K,V>> action) {
4074 if (action == null) throw new NullPointerException();
4075 new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table,
4076 action).invoke();
4077 }
4078
4079 /**
4080 * Performs the given action for each non-null transformation
4081 * of each entry.
4082 *
4083 * @param parallelismThreshold the (estimated) number of elements
4084 * needed for this operation to be executed in parallel
4085 * @param transformer a function returning the transformation
4086 * for an element, or null if there is no transformation (in
4087 * which case the action is not applied)
4088 * @param action the action
4089 * @since 1.8
4090 */
4091 public <U> void forEachEntry(long parallelismThreshold,
4092 Fun<Map.Entry<K,V>, ? extends U> transformer,
4093 Action<? super U> action) {
4094 if (transformer == null || action == null)
4095 throw new NullPointerException();
4096 new ForEachTransformedEntryTask<K,V,U>
4097 (null, batchFor(parallelismThreshold), 0, 0, table,
4098 transformer, action).invoke();
4099 }
4100
4101 /**
4102 * Returns a non-null result from applying the given search
4103 * function on each entry, or null if none. Upon success,
4104 * further element processing is suppressed and the results of
4105 * any other parallel invocations of the search function are
4106 * ignored.
4107 *
4108 * @param parallelismThreshold the (estimated) number of elements
4109 * needed for this operation to be executed in parallel
4110 * @param searchFunction a function returning a non-null
4111 * result on success, else null
4112 * @return a non-null result from applying the given search
4113 * function on each entry, or null if none
4114 * @since 1.8
4115 */
4116 public <U> U searchEntries(long parallelismThreshold,
4117 Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
4118 if (searchFunction == null) throw new NullPointerException();
4119 return new SearchEntriesTask<K,V,U>
4120 (null, batchFor(parallelismThreshold), 0, 0, table,
4121 searchFunction, new AtomicReference<U>()).invoke();
4122 }
4123
4124 /**
4125 * Returns the result of accumulating all entries using the
4126 * given reducer to combine values, or null if none.
4127 *
4128 * @param parallelismThreshold the (estimated) number of elements
4129 * needed for this operation to be executed in parallel
4130 * @param reducer a commutative associative combining function
4131 * @return the result of accumulating all entries
4132 * @since 1.8
4133 */
4134 public Map.Entry<K,V> reduceEntries(long parallelismThreshold,
4135 BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4136 if (reducer == null) throw new NullPointerException();
4137 return new ReduceEntriesTask<K,V>
4138 (null, batchFor(parallelismThreshold), 0, 0, table,
4139 null, reducer).invoke();
4140 }
4141
4142 /**
4143 * Returns the result of accumulating the given transformation
4144 * of all entries using the given reducer to combine values,
4145 * or null if none.
4146 *
4147 * @param parallelismThreshold the (estimated) number of elements
4148 * needed for this operation to be executed in parallel
4149 * @param transformer a function returning the transformation
4150 * for an element, or null if there is no transformation (in
4151 * which case it is not combined)
4152 * @param reducer a commutative associative combining function
4153 * @return the result of accumulating the given transformation
4154 * of all entries
4155 * @since 1.8
4156 */
4157 public <U> U reduceEntries(long parallelismThreshold,
4158 Fun<Map.Entry<K,V>, ? extends U> transformer,
4159 BiFun<? super U, ? super U, ? extends U> reducer) {
4160 if (transformer == null || reducer == null)
4161 throw new NullPointerException();
4162 return new MapReduceEntriesTask<K,V,U>
4163 (null, batchFor(parallelismThreshold), 0, 0, table,
4164 null, transformer, reducer).invoke();
4165 }
4166
4167 /**
4168 * Returns the result of accumulating the given transformation
4169 * of all entries using the given reducer to combine values,
4170 * and the given basis as an identity value.
4171 *
4172 * @param parallelismThreshold the (estimated) number of elements
4173 * needed for this operation to be executed in parallel
4174 * @param transformer a function returning the transformation
4175 * for an element
4176 * @param basis the identity (initial default value) for the reduction
4177 * @param reducer a commutative associative combining function
4178 * @return the result of accumulating the given transformation
4179 * of all entries
4180 * @since 1.8
4181 */
4182 public double reduceEntriesToDouble(long parallelismThreshold,
4183 ObjectToDouble<Map.Entry<K,V>> transformer,
4184 double basis,
4185 DoubleByDoubleToDouble reducer) {
4186 if (transformer == null || reducer == null)
4187 throw new NullPointerException();
4188 return new MapReduceEntriesToDoubleTask<K,V>
4189 (null, batchFor(parallelismThreshold), 0, 0, table,
4190 null, transformer, basis, reducer).invoke();
4191 }
4192
4193 /**
4194 * Returns the result of accumulating the given transformation
4195 * of all entries using the given reducer to combine values,
4196 * and the given basis as an identity value.
4197 *
4198 * @param parallelismThreshold the (estimated) number of elements
4199 * needed for this operation to be executed in parallel
4200 * @param transformer a function returning the transformation
4201 * for an element
4202 * @param basis the identity (initial default value) for the reduction
4203 * @param reducer a commutative associative combining function
4204 * @return the result of accumulating the given transformation
4205 * of all entries
4206 * @since 1.8
4207 */
4208 public long reduceEntriesToLong(long parallelismThreshold,
4209 ObjectToLong<Map.Entry<K,V>> transformer,
4210 long basis,
4211 LongByLongToLong reducer) {
4212 if (transformer == null || reducer == null)
4213 throw new NullPointerException();
4214 return new MapReduceEntriesToLongTask<K,V>
4215 (null, batchFor(parallelismThreshold), 0, 0, table,
4216 null, transformer, basis, reducer).invoke();
4217 }
4218
4219 /**
4220 * Returns the result of accumulating the given transformation
4221 * of all entries using the given reducer to combine values,
4222 * and the given basis as an identity value.
4223 *
4224 * @param parallelismThreshold the (estimated) number of elements
4225 * needed for this operation to be executed in parallel
4226 * @param transformer a function returning the transformation
4227 * for an element
4228 * @param basis the identity (initial default value) for the reduction
4229 * @param reducer a commutative associative combining function
4230 * @return the result of accumulating the given transformation
4231 * of all entries
4232 * @since 1.8
4233 */
4234 public int reduceEntriesToInt(long parallelismThreshold,
4235 ObjectToInt<Map.Entry<K,V>> transformer,
4236 int basis,
4237 IntByIntToInt reducer) {
4238 if (transformer == null || reducer == null)
4239 throw new NullPointerException();
4240 return new MapReduceEntriesToIntTask<K,V>
4241 (null, batchFor(parallelismThreshold), 0, 0, table,
4242 null, transformer, basis, reducer).invoke();
4243 }
4244
4245
4246 /* ----------------Views -------------- */
4247
4248 /**
4249 * Base class for views.
4250 */
4251 abstract static class CollectionView<K,V,E>
4252 implements Collection<E>, java.io.Serializable {
4253 private static final long serialVersionUID = 7249069246763182397L;
4254 final ConcurrentHashMapV8<K,V> map;
4255 CollectionView(ConcurrentHashMapV8<K,V> map) { this.map = map; }
4256
4257 /**
4258 * Returns the map backing this view.
4259 *
4260 * @return the map backing this view
4261 */
4262 public ConcurrentHashMapV8<K,V> getMap() { return map; }
4263
4264 /**
4265 * Removes all of the elements from this view, by removing all
4266 * the mappings from the map backing this view.
4267 */
4268 public final void clear() { map.clear(); }
4269 public final int size() { return map.size(); }
4270 public final boolean isEmpty() { return map.isEmpty(); }
4271
4272 // implementations below rely on concrete classes supplying these
4273 // abstract methods
4274 /**
4275 * Returns a "weakly consistent" iterator that will never
4276 * throw {@link ConcurrentModificationException}, and
4277 * guarantees to traverse elements as they existed upon
4278 * construction of the iterator, and may (but is not
4279 * guaranteed to) reflect any modifications subsequent to
4280 * construction.
4281 */
4282 public abstract Iterator<E> iterator();
4283 public abstract boolean contains(Object o);
4284 public abstract boolean remove(Object o);
4285
4286 private static final String oomeMsg = "Required array size too large";
4287
4288 public final Object[] toArray() {
4289 long sz = map.mappingCount();
4290 if (sz > MAX_ARRAY_SIZE)
4291 throw new OutOfMemoryError(oomeMsg);
4292 int n = (int)sz;
4293 Object[] r = new Object[n];
4294 int i = 0;
4295 for (E e : this) {
4296 if (i == n) {
4297 if (n >= MAX_ARRAY_SIZE)
4298 throw new OutOfMemoryError(oomeMsg);
4299 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4300 n = MAX_ARRAY_SIZE;
4301 else
4302 n += (n >>> 1) + 1;
4303 r = Arrays.copyOf(r, n);
4304 }
4305 r[i++] = e;
4306 }
4307 return (i == n) ? r : Arrays.copyOf(r, i);
4308 }
4309
4310 @SuppressWarnings("unchecked")
4311 public final <T> T[] toArray(T[] a) {
4312 long sz = map.mappingCount();
4313 if (sz > MAX_ARRAY_SIZE)
4314 throw new OutOfMemoryError(oomeMsg);
4315 int m = (int)sz;
4316 T[] r = (a.length >= m) ? a :
4317 (T[])java.lang.reflect.Array
4318 .newInstance(a.getClass().getComponentType(), m);
4319 int n = r.length;
4320 int i = 0;
4321 for (E e : this) {
4322 if (i == n) {
4323 if (n >= MAX_ARRAY_SIZE)
4324 throw new OutOfMemoryError(oomeMsg);
4325 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4326 n = MAX_ARRAY_SIZE;
4327 else
4328 n += (n >>> 1) + 1;
4329 r = Arrays.copyOf(r, n);
4330 }
4331 r[i++] = (T)e;
4332 }
4333 if (a == r && i < n) {
4334 r[i] = null; // null-terminate
4335 return r;
4336 }
4337 return (i == n) ? r : Arrays.copyOf(r, i);
4338 }
4339
4340 /**
4341 * Returns a string representation of this collection.
4342 * The string representation consists of the string representations
4343 * of the collection's elements in the order they are returned by
4344 * its iterator, enclosed in square brackets ({@code "[]"}).
4345 * Adjacent elements are separated by the characters {@code ", "}
4346 * (comma and space). Elements are converted to strings as by
4347 * {@link String#valueOf(Object)}.
4348 *
4349 * @return a string representation of this collection
4350 */
4351 public final String toString() {
4352 StringBuilder sb = new StringBuilder();
4353 sb.append('[');
4354 Iterator<E> it = iterator();
4355 if (it.hasNext()) {
4356 for (;;) {
4357 Object e = it.next();
4358 sb.append(e == this ? "(this Collection)" : e);
4359 if (!it.hasNext())
4360 break;
4361 sb.append(',').append(' ');
4362 }
4363 }
4364 return sb.append(']').toString();
4365 }
4366
4367 public final boolean containsAll(Collection<?> c) {
4368 if (c != this) {
4369 for (Object e : c) {
4370 if (e == null || !contains(e))
4371 return false;
4372 }
4373 }
4374 return true;
4375 }
4376
4377 public final boolean removeAll(Collection<?> c) {
4378 boolean modified = false;
4379 for (Iterator<E> it = iterator(); it.hasNext();) {
4380 if (c.contains(it.next())) {
4381 it.remove();
4382 modified = true;
4383 }
4384 }
4385 return modified;
4386 }
4387
4388 public final boolean retainAll(Collection<?> c) {
4389 boolean modified = false;
4390 for (Iterator<E> it = iterator(); it.hasNext();) {
4391 if (!c.contains(it.next())) {
4392 it.remove();
4393 modified = true;
4394 }
4395 }
4396 return modified;
4397 }
4398
4399 }
4400
4401 /**
4402 * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in
4403 * which additions may optionally be enabled by mapping to a
4404 * common value. This class cannot be directly instantiated.
4405 * See {@link #keySet() keySet()},
4406 * {@link #keySet(Object) keySet(V)},
4407 * {@link #newKeySet() newKeySet()},
4408 * {@link #newKeySet(int) newKeySet(int)}.
4409 *
4410 * @since 1.8
4411 */
4412 public static class KeySetView<K,V> extends CollectionView<K,V,K>
4413 implements Set<K>, java.io.Serializable {
4414 private static final long serialVersionUID = 7249069246763182397L;
4415 private final V value;
4416 KeySetView(ConcurrentHashMapV8<K,V> map, V value) { // non-public
4417 super(map);
4418 this.value = value;
4419 }
4420
4421 /**
4422 * Returns the default mapped value for additions,
4423 * or {@code null} if additions are not supported.
4424 *
4425 * @return the default mapped value for additions, or {@code null}
4426 * if not supported
4427 */
4428 public V getMappedValue() { return value; }
4429
4430 /**
4431 * {@inheritDoc}
4432 * @throws NullPointerException if the specified key is null
4433 */
4434 public boolean contains(Object o) { return map.containsKey(o); }
4435
4436 /**
4437 * Removes the key from this map view, by removing the key (and its
4438 * corresponding value) from the backing map. This method does
4439 * nothing if the key is not in the map.
4440 *
4441 * @param o the key to be removed from the backing map
4442 * @return {@code true} if the backing map contained the specified key
4443 * @throws NullPointerException if the specified key is null
4444 */
4445 public boolean remove(Object o) { return map.remove(o) != null; }
4446
4447 /**
4448 * @return an iterator over the keys of the backing map
4449 */
4450 public Iterator<K> iterator() {
4451 Node<K,V>[] t;
4452 ConcurrentHashMapV8<K,V> m = map;
4453 int f = (t = m.table) == null ? 0 : t.length;
4454 return new KeyIterator<K,V>(t, f, 0, f, m);
4455 }
4456
4457 /**
4458 * Adds the specified key to this set view by mapping the key to
4459 * the default mapped value in the backing map, if defined.
4460 *
4461 * @param e key to be added
4462 * @return {@code true} if this set changed as a result of the call
4463 * @throws NullPointerException if the specified key is null
4464 * @throws UnsupportedOperationException if no default mapped value
4465 * for additions was provided
4466 */
4467 public boolean add(K e) {
4468 V v;
4469 if ((v = value) == null)
4470 throw new UnsupportedOperationException();
4471 return map.putVal(e, v, true) == null;
4472 }
4473
4474 /**
4475 * Adds all of the elements in the specified collection to this set,
4476 * as if by calling {@link #add} on each one.
4477 *
4478 * @param c the elements to be inserted into this set
4479 * @return {@code true} if this set changed as a result of the call
4480 * @throws NullPointerException if the collection or any of its
4481 * elements are {@code null}
4482 * @throws UnsupportedOperationException if no default mapped value
4483 * for additions was provided
4484 */
4485 public boolean addAll(Collection<? extends K> c) {
4486 boolean added = false;
4487 V v;
4488 if ((v = value) == null)
4489 throw new UnsupportedOperationException();
4490 for (K e : c) {
4491 if (map.putVal(e, v, true) == null)
4492 added = true;
4493 }
4494 return added;
4495 }
4496
4497 public int hashCode() {
4498 int h = 0;
4499 for (K e : this)
4500 h += e.hashCode();
4501 return h;
4502 }
4503
4504 public boolean equals(Object o) {
4505 Set<?> c;
4506 return ((o instanceof Set) &&
4507 ((c = (Set<?>)o) == this ||
4508 (containsAll(c) && c.containsAll(this))));
4509 }
4510
4511 public ConcurrentHashMapSpliterator<K> spliterator() {
4512 Node<K,V>[] t;
4513 ConcurrentHashMapV8<K,V> m = map;
4514 long n = m.sumCount();
4515 int f = (t = m.table) == null ? 0 : t.length;
4516 return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4517 }
4518
4519 public void forEach(Action<? super K> action) {
4520 if (action == null) throw new NullPointerException();
4521 Node<K,V>[] t;
4522 if ((t = map.table) != null) {
4523 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4524 for (Node<K,V> p; (p = it.advance()) != null; )
4525 action.apply(p.key);
4526 }
4527 }
4528 }
4529
4530 /**
4531 * A view of a ConcurrentHashMapV8 as a {@link Collection} of
4532 * values, in which additions are disabled. This class cannot be
4533 * directly instantiated. See {@link #values()}.
4534 */
4535 static final class ValuesView<K,V> extends CollectionView<K,V,V>
4536 implements Collection<V>, java.io.Serializable {
4537 private static final long serialVersionUID = 2249069246763182397L;
4538 ValuesView(ConcurrentHashMapV8<K,V> map) { super(map); }
4539 public final boolean contains(Object o) {
4540 return map.containsValue(o);
4541 }
4542
4543 public final boolean remove(Object o) {
4544 if (o != null) {
4545 for (Iterator<V> it = iterator(); it.hasNext();) {
4546 if (o.equals(it.next())) {
4547 it.remove();
4548 return true;
4549 }
4550 }
4551 }
4552 return false;
4553 }
4554
4555 public final Iterator<V> iterator() {
4556 ConcurrentHashMapV8<K,V> m = map;
4557 Node<K,V>[] t;
4558 int f = (t = m.table) == null ? 0 : t.length;
4559 return new ValueIterator<K,V>(t, f, 0, f, m);
4560 }
4561
4562 public final boolean add(V e) {
4563 throw new UnsupportedOperationException();
4564 }
4565 public final boolean addAll(Collection<? extends V> c) {
4566 throw new UnsupportedOperationException();
4567 }
4568
4569 public ConcurrentHashMapSpliterator<V> spliterator() {
4570 Node<K,V>[] t;
4571 ConcurrentHashMapV8<K,V> m = map;
4572 long n = m.sumCount();
4573 int f = (t = m.table) == null ? 0 : t.length;
4574 return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4575 }
4576
4577 public void forEach(Action<? super V> action) {
4578 if (action == null) throw new NullPointerException();
4579 Node<K,V>[] t;
4580 if ((t = map.table) != null) {
4581 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4582 for (Node<K,V> p; (p = it.advance()) != null; )
4583 action.apply(p.val);
4584 }
4585 }
4586 }
4587
4588 /**
4589 * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value)
4590 * entries. This class cannot be directly instantiated. See
4591 * {@link #entrySet()}.
4592 */
4593 static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>>
4594 implements Set<Map.Entry<K,V>>, java.io.Serializable {
4595 private static final long serialVersionUID = 2249069246763182397L;
4596 EntrySetView(ConcurrentHashMapV8<K,V> map) { super(map); }
4597
4598 public boolean contains(Object o) {
4599 Object k, v, r; Map.Entry<?,?> e;
4600 return ((o instanceof Map.Entry) &&
4601 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4602 (r = map.get(k)) != null &&
4603 (v = e.getValue()) != null &&
4604 (v == r || v.equals(r)));
4605 }
4606
4607 public boolean remove(Object o) {
4608 Object k, v; Map.Entry<?,?> e;
4609 return ((o instanceof Map.Entry) &&
4610 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4611 (v = e.getValue()) != null &&
4612 map.remove(k, v));
4613 }
4614
4615 /**
4616 * @return an iterator over the entries of the backing map
4617 */
4618 public Iterator<Map.Entry<K,V>> iterator() {
4619 ConcurrentHashMapV8<K,V> m = map;
4620 Node<K,V>[] t;
4621 int f = (t = m.table) == null ? 0 : t.length;
4622 return new EntryIterator<K,V>(t, f, 0, f, m);
4623 }
4624
4625 public boolean add(Entry<K,V> e) {
4626 return map.putVal(e.getKey(), e.getValue(), false) == null;
4627 }
4628
4629 public boolean addAll(Collection<? extends Entry<K,V>> c) {
4630 boolean added = false;
4631 for (Entry<K,V> e : c) {
4632 if (add(e))
4633 added = true;
4634 }
4635 return added;
4636 }
4637
4638 public final int hashCode() {
4639 int h = 0;
4640 Node<K,V>[] t;
4641 if ((t = map.table) != null) {
4642 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4643 for (Node<K,V> p; (p = it.advance()) != null; ) {
4644 h += p.hashCode();
4645 }
4646 }
4647 return h;
4648 }
4649
4650 public final boolean equals(Object o) {
4651 Set<?> c;
4652 return ((o instanceof Set) &&
4653 ((c = (Set<?>)o) == this ||
4654 (containsAll(c) && c.containsAll(this))));
4655 }
4656
4657 public ConcurrentHashMapSpliterator<Map.Entry<K,V>> spliterator() {
4658 Node<K,V>[] t;
4659 ConcurrentHashMapV8<K,V> m = map;
4660 long n = m.sumCount();
4661 int f = (t = m.table) == null ? 0 : t.length;
4662 return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m);
4663 }
4664
4665 public void forEach(Action<? super Map.Entry<K,V>> action) {
4666 if (action == null) throw new NullPointerException();
4667 Node<K,V>[] t;
4668 if ((t = map.table) != null) {
4669 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4670 for (Node<K,V> p; (p = it.advance()) != null; )
4671 action.apply(new MapEntry<K,V>(p.key, p.val, map));
4672 }
4673 }
4674
4675 }
4676
4677 // -------------------------------------------------------
4678
4679 /**
4680 * Base class for bulk tasks. Repeats some fields and code from
4681 * class Traverser, because we need to subclass CountedCompleter.
4682 */
4683 abstract static class BulkTask<K,V,R> extends CountedCompleter<R> {
4684 Node<K,V>[] tab; // same as Traverser
4685 Node<K,V> next;
4686 int index;
4687 int baseIndex;
4688 int baseLimit;
4689 final int baseSize;
4690 int batch; // split control
4691
4692 BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) {
4693 super(par);
4694 this.batch = b;
4695 this.index = this.baseIndex = i;
4696 if ((this.tab = t) == null)
4697 this.baseSize = this.baseLimit = 0;
4698 else if (par == null)
4699 this.baseSize = this.baseLimit = t.length;
4700 else {
4701 this.baseLimit = f;
4702 this.baseSize = par.baseSize;
4703 }
4704 }
4705
4706 /**
4707 * Same as Traverser version
4708 */
4709 final Node<K,V> advance() {
4710 Node<K,V> e;
4711 if ((e = next) != null)
4712 e = e.next;
4713 for (;;) {
4714 Node<K,V>[] t; int i, n; K ek; // must use locals in checks
4715 if (e != null)
4716 return next = e;
4717 if (baseIndex >= baseLimit || (t = tab) == null ||
4718 (n = t.length) <= (i = index) || i < 0)
4719 return next = null;
4720 if ((e = tabAt(t, index)) != null && e.hash < 0) {
4721 if (e instanceof ForwardingNode) {
4722 tab = ((ForwardingNode<K,V>)e).nextTable;
4723 e = null;
4724 continue;
4725 }
4726 else if (e instanceof TreeBin)
4727 e = ((TreeBin<K,V>)e).first;
4728 else
4729 e = null;
4730 }
4731 if ((index += baseSize) >= n)
4732 index = ++baseIndex; // visit upper slots if present
4733 }
4734 }
4735 }
4736
4737 /*
4738 * Task classes. Coded in a regular but ugly format/style to
4739 * simplify checks that each variant differs in the right way from
4740 * others. The null screenings exist because compilers cannot tell
4741 * that we've already null-checked task arguments, so we force
4742 * simplest hoisted bypass to help avoid convoluted traps.
4743 */
4744 @SuppressWarnings("serial")
4745 static final class ForEachKeyTask<K,V>
4746 extends BulkTask<K,V,Void> {
4747 final Action<? super K> action;
4748 ForEachKeyTask
4749 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4750 Action<? super K> action) {
4751 super(p, b, i, f, t);
4752 this.action = action;
4753 }
4754 public final void compute() {
4755 final Action<? super K> action;
4756 if ((action = this.action) != null) {
4757 for (int i = baseIndex, f, h; batch > 0 &&
4758 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4759 addToPendingCount(1);
4760 new ForEachKeyTask<K,V>
4761 (this, batch >>>= 1, baseLimit = h, f, tab,
4762 action).fork();
4763 }
4764 for (Node<K,V> p; (p = advance()) != null;)
4765 action.apply(p.key);
4766 propagateCompletion();
4767 }
4768 }
4769 }
4770
4771 @SuppressWarnings("serial")
4772 static final class ForEachValueTask<K,V>
4773 extends BulkTask<K,V,Void> {
4774 final Action<? super V> action;
4775 ForEachValueTask
4776 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4777 Action<? super V> action) {
4778 super(p, b, i, f, t);
4779 this.action = action;
4780 }
4781 public final void compute() {
4782 final Action<? super V> action;
4783 if ((action = this.action) != null) {
4784 for (int i = baseIndex, f, h; batch > 0 &&
4785 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4786 addToPendingCount(1);
4787 new ForEachValueTask<K,V>
4788 (this, batch >>>= 1, baseLimit = h, f, tab,
4789 action).fork();
4790 }
4791 for (Node<K,V> p; (p = advance()) != null;)
4792 action.apply(p.val);
4793 propagateCompletion();
4794 }
4795 }
4796 }
4797
4798 @SuppressWarnings("serial")
4799 static final class ForEachEntryTask<K,V>
4800 extends BulkTask<K,V,Void> {
4801 final Action<? super Entry<K,V>> action;
4802 ForEachEntryTask
4803 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4804 Action<? super Entry<K,V>> action) {
4805 super(p, b, i, f, t);
4806 this.action = action;
4807 }
4808 public final void compute() {
4809 final Action<? super Entry<K,V>> action;
4810 if ((action = this.action) != null) {
4811 for (int i = baseIndex, f, h; batch > 0 &&
4812 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4813 addToPendingCount(1);
4814 new ForEachEntryTask<K,V>
4815 (this, batch >>>= 1, baseLimit = h, f, tab,
4816 action).fork();
4817 }
4818 for (Node<K,V> p; (p = advance()) != null; )
4819 action.apply(p);
4820 propagateCompletion();
4821 }
4822 }
4823 }
4824
4825 @SuppressWarnings("serial")
4826 static final class ForEachMappingTask<K,V>
4827 extends BulkTask<K,V,Void> {
4828 final BiAction<? super K, ? super V> action;
4829 ForEachMappingTask
4830 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4831 BiAction<? super K,? super V> action) {
4832 super(p, b, i, f, t);
4833 this.action = action;
4834 }
4835 public final void compute() {
4836 final BiAction<? super K, ? super V> action;
4837 if ((action = this.action) != null) {
4838 for (int i = baseIndex, f, h; batch > 0 &&
4839 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4840 addToPendingCount(1);
4841 new ForEachMappingTask<K,V>
4842 (this, batch >>>= 1, baseLimit = h, f, tab,
4843 action).fork();
4844 }
4845 for (Node<K,V> p; (p = advance()) != null; )
4846 action.apply(p.key, p.val);
4847 propagateCompletion();
4848 }
4849 }
4850 }
4851
4852 @SuppressWarnings("serial")
4853 static final class ForEachTransformedKeyTask<K,V,U>
4854 extends BulkTask<K,V,Void> {
4855 final Fun<? super K, ? extends U> transformer;
4856 final Action<? super U> action;
4857 ForEachTransformedKeyTask
4858 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4859 Fun<? super K, ? extends U> transformer, Action<? super U> action) {
4860 super(p, b, i, f, t);
4861 this.transformer = transformer; this.action = action;
4862 }
4863 public final void compute() {
4864 final Fun<? super K, ? extends U> transformer;
4865 final Action<? super U> action;
4866 if ((transformer = this.transformer) != null &&
4867 (action = this.action) != null) {
4868 for (int i = baseIndex, f, h; batch > 0 &&
4869 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4870 addToPendingCount(1);
4871 new ForEachTransformedKeyTask<K,V,U>
4872 (this, batch >>>= 1, baseLimit = h, f, tab,
4873 transformer, action).fork();
4874 }
4875 for (Node<K,V> p; (p = advance()) != null; ) {
4876 U u;
4877 if ((u = transformer.apply(p.key)) != null)
4878 action.apply(u);
4879 }
4880 propagateCompletion();
4881 }
4882 }
4883 }
4884
4885 @SuppressWarnings("serial")
4886 static final class ForEachTransformedValueTask<K,V,U>
4887 extends BulkTask<K,V,Void> {
4888 final Fun<? super V, ? extends U> transformer;
4889 final Action<? super U> action;
4890 ForEachTransformedValueTask
4891 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4892 Fun<? super V, ? extends U> transformer, Action<? super U> action) {
4893 super(p, b, i, f, t);
4894 this.transformer = transformer; this.action = action;
4895 }
4896 public final void compute() {
4897 final Fun<? super V, ? extends U> transformer;
4898 final Action<? super U> action;
4899 if ((transformer = this.transformer) != null &&
4900 (action = this.action) != null) {
4901 for (int i = baseIndex, f, h; batch > 0 &&
4902 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4903 addToPendingCount(1);
4904 new ForEachTransformedValueTask<K,V,U>
4905 (this, batch >>>= 1, baseLimit = h, f, tab,
4906 transformer, action).fork();
4907 }
4908 for (Node<K,V> p; (p = advance()) != null; ) {
4909 U u;
4910 if ((u = transformer.apply(p.val)) != null)
4911 action.apply(u);
4912 }
4913 propagateCompletion();
4914 }
4915 }
4916 }
4917
4918 @SuppressWarnings("serial")
4919 static final class ForEachTransformedEntryTask<K,V,U>
4920 extends BulkTask<K,V,Void> {
4921 final Fun<Map.Entry<K,V>, ? extends U> transformer;
4922 final Action<? super U> action;
4923 ForEachTransformedEntryTask
4924 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4925 Fun<Map.Entry<K,V>, ? extends U> transformer, Action<? super U> action) {
4926 super(p, b, i, f, t);
4927 this.transformer = transformer; this.action = action;
4928 }
4929 public final void compute() {
4930 final Fun<Map.Entry<K,V>, ? extends U> transformer;
4931 final Action<? super U> action;
4932 if ((transformer = this.transformer) != null &&
4933 (action = this.action) != null) {
4934 for (int i = baseIndex, f, h; batch > 0 &&
4935 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4936 addToPendingCount(1);
4937 new ForEachTransformedEntryTask<K,V,U>
4938 (this, batch >>>= 1, baseLimit = h, f, tab,
4939 transformer, action).fork();
4940 }
4941 for (Node<K,V> p; (p = advance()) != null; ) {
4942 U u;
4943 if ((u = transformer.apply(p)) != null)
4944 action.apply(u);
4945 }
4946 propagateCompletion();
4947 }
4948 }
4949 }
4950
4951 @SuppressWarnings("serial")
4952 static final class ForEachTransformedMappingTask<K,V,U>
4953 extends BulkTask<K,V,Void> {
4954 final BiFun<? super K, ? super V, ? extends U> transformer;
4955 final Action<? super U> action;
4956 ForEachTransformedMappingTask
4957 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4958 BiFun<? super K, ? super V, ? extends U> transformer,
4959 Action<? super U> action) {
4960 super(p, b, i, f, t);
4961 this.transformer = transformer; this.action = action;
4962 }
4963 public final void compute() {
4964 final BiFun<? super K, ? super V, ? extends U> transformer;
4965 final Action<? super U> action;
4966 if ((transformer = this.transformer) != null &&
4967 (action = this.action) != null) {
4968 for (int i = baseIndex, f, h; batch > 0 &&
4969 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4970 addToPendingCount(1);
4971 new ForEachTransformedMappingTask<K,V,U>
4972 (this, batch >>>= 1, baseLimit = h, f, tab,
4973 transformer, action).fork();
4974 }
4975 for (Node<K,V> p; (p = advance()) != null; ) {
4976 U u;
4977 if ((u = transformer.apply(p.key, p.val)) != null)
4978 action.apply(u);
4979 }
4980 propagateCompletion();
4981 }
4982 }
4983 }
4984
4985 @SuppressWarnings("serial")
4986 static final class SearchKeysTask<K,V,U>
4987 extends BulkTask<K,V,U> {
4988 final Fun<? super K, ? extends U> searchFunction;
4989 final AtomicReference<U> result;
4990 SearchKeysTask
4991 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4992 Fun<? super K, ? extends U> searchFunction,
4993 AtomicReference<U> result) {
4994 super(p, b, i, f, t);
4995 this.searchFunction = searchFunction; this.result = result;
4996 }
4997 public final U getRawResult() { return result.get(); }
4998 public final void compute() {
4999 final Fun<? super K, ? extends U> searchFunction;
5000 final AtomicReference<U> result;
5001 if ((searchFunction = this.searchFunction) != null &&
5002 (result = this.result) != null) {
5003 for (int i = baseIndex, f, h; batch > 0 &&
5004 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5005 if (result.get() != null)
5006 return;
5007 addToPendingCount(1);
5008 new SearchKeysTask<K,V,U>
5009 (this, batch >>>= 1, baseLimit = h, f, tab,
5010 searchFunction, result).fork();
5011 }
5012 while (result.get() == null) {
5013 U u;
5014 Node<K,V> p;
5015 if ((p = advance()) == null) {
5016 propagateCompletion();
5017 break;
5018 }
5019 if ((u = searchFunction.apply(p.key)) != null) {
5020 if (result.compareAndSet(null, u))
5021 quietlyCompleteRoot();
5022 break;
5023 }
5024 }
5025 }
5026 }
5027 }
5028
5029 @SuppressWarnings("serial")
5030 static final class SearchValuesTask<K,V,U>
5031 extends BulkTask<K,V,U> {
5032 final Fun<? super V, ? extends U> searchFunction;
5033 final AtomicReference<U> result;
5034 SearchValuesTask
5035 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5036 Fun<? super V, ? extends U> searchFunction,
5037 AtomicReference<U> result) {
5038 super(p, b, i, f, t);
5039 this.searchFunction = searchFunction; this.result = result;
5040 }
5041 public final U getRawResult() { return result.get(); }
5042 public final void compute() {
5043 final Fun<? super V, ? extends U> searchFunction;
5044 final AtomicReference<U> result;
5045 if ((searchFunction = this.searchFunction) != null &&
5046 (result = this.result) != null) {
5047 for (int i = baseIndex, f, h; batch > 0 &&
5048 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5049 if (result.get() != null)
5050 return;
5051 addToPendingCount(1);
5052 new SearchValuesTask<K,V,U>
5053 (this, batch >>>= 1, baseLimit = h, f, tab,
5054 searchFunction, result).fork();
5055 }
5056 while (result.get() == null) {
5057 U u;
5058 Node<K,V> p;
5059 if ((p = advance()) == null) {
5060 propagateCompletion();
5061 break;
5062 }
5063 if ((u = searchFunction.apply(p.val)) != null) {
5064 if (result.compareAndSet(null, u))
5065 quietlyCompleteRoot();
5066 break;
5067 }
5068 }
5069 }
5070 }
5071 }
5072
5073 @SuppressWarnings("serial")
5074 static final class SearchEntriesTask<K,V,U>
5075 extends BulkTask<K,V,U> {
5076 final Fun<Entry<K,V>, ? extends U> searchFunction;
5077 final AtomicReference<U> result;
5078 SearchEntriesTask
5079 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5080 Fun<Entry<K,V>, ? extends U> searchFunction,
5081 AtomicReference<U> result) {
5082 super(p, b, i, f, t);
5083 this.searchFunction = searchFunction; this.result = result;
5084 }
5085 public final U getRawResult() { return result.get(); }
5086 public final void compute() {
5087 final Fun<Entry<K,V>, ? extends U> searchFunction;
5088 final AtomicReference<U> result;
5089 if ((searchFunction = this.searchFunction) != null &&
5090 (result = this.result) != null) {
5091 for (int i = baseIndex, f, h; batch > 0 &&
5092 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5093 if (result.get() != null)
5094 return;
5095 addToPendingCount(1);
5096 new SearchEntriesTask<K,V,U>
5097 (this, batch >>>= 1, baseLimit = h, f, tab,
5098 searchFunction, result).fork();
5099 }
5100 while (result.get() == null) {
5101 U u;
5102 Node<K,V> p;
5103 if ((p = advance()) == null) {
5104 propagateCompletion();
5105 break;
5106 }
5107 if ((u = searchFunction.apply(p)) != null) {
5108 if (result.compareAndSet(null, u))
5109 quietlyCompleteRoot();
5110 return;
5111 }
5112 }
5113 }
5114 }
5115 }
5116
5117 @SuppressWarnings("serial")
5118 static final class SearchMappingsTask<K,V,U>
5119 extends BulkTask<K,V,U> {
5120 final BiFun<? super K, ? super V, ? extends U> searchFunction;
5121 final AtomicReference<U> result;
5122 SearchMappingsTask
5123 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5124 BiFun<? super K, ? super V, ? extends U> searchFunction,
5125 AtomicReference<U> result) {
5126 super(p, b, i, f, t);
5127 this.searchFunction = searchFunction; this.result = result;
5128 }
5129 public final U getRawResult() { return result.get(); }
5130 public final void compute() {
5131 final BiFun<? super K, ? super V, ? extends U> searchFunction;
5132 final AtomicReference<U> result;
5133 if ((searchFunction = this.searchFunction) != null &&
5134 (result = this.result) != null) {
5135 for (int i = baseIndex, f, h; batch > 0 &&
5136 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5137 if (result.get() != null)
5138 return;
5139 addToPendingCount(1);
5140 new SearchMappingsTask<K,V,U>
5141 (this, batch >>>= 1, baseLimit = h, f, tab,
5142 searchFunction, result).fork();
5143 }
5144 while (result.get() == null) {
5145 U u;
5146 Node<K,V> p;
5147 if ((p = advance()) == null) {
5148 propagateCompletion();
5149 break;
5150 }
5151 if ((u = searchFunction.apply(p.key, p.val)) != null) {
5152 if (result.compareAndSet(null, u))
5153 quietlyCompleteRoot();
5154 break;
5155 }
5156 }
5157 }
5158 }
5159 }
5160
5161 @SuppressWarnings("serial")
5162 static final class ReduceKeysTask<K,V>
5163 extends BulkTask<K,V,K> {
5164 final BiFun<? super K, ? super K, ? extends K> reducer;
5165 K result;
5166 ReduceKeysTask<K,V> rights, nextRight;
5167 ReduceKeysTask
5168 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5169 ReduceKeysTask<K,V> nextRight,
5170 BiFun<? super K, ? super K, ? extends K> reducer) {
5171 super(p, b, i, f, t); this.nextRight = nextRight;
5172 this.reducer = reducer;
5173 }
5174 public final K getRawResult() { return result; }
5175 public final void compute() {
5176 final BiFun<? super K, ? super K, ? extends K> reducer;
5177 if ((reducer = this.reducer) != null) {
5178 for (int i = baseIndex, f, h; batch > 0 &&
5179 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5180 addToPendingCount(1);
5181 (rights = new ReduceKeysTask<K,V>
5182 (this, batch >>>= 1, baseLimit = h, f, tab,
5183 rights, reducer)).fork();
5184 }
5185 K r = null;
5186 for (Node<K,V> p; (p = advance()) != null; ) {
5187 K u = p.key;
5188 r = (r == null) ? u : u == null ? r : reducer.apply(r, u);
5189 }
5190 result = r;
5191 CountedCompleter<?> c;
5192 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5193 @SuppressWarnings("unchecked") ReduceKeysTask<K,V>
5194 t = (ReduceKeysTask<K,V>)c,
5195 s = t.rights;
5196 while (s != null) {
5197 K tr, sr;
5198 if ((sr = s.result) != null)
5199 t.result = (((tr = t.result) == null) ? sr :
5200 reducer.apply(tr, sr));
5201 s = t.rights = s.nextRight;
5202 }
5203 }
5204 }
5205 }
5206 }
5207
5208 @SuppressWarnings("serial")
5209 static final class ReduceValuesTask<K,V>
5210 extends BulkTask<K,V,V> {
5211 final BiFun<? super V, ? super V, ? extends V> reducer;
5212 V result;
5213 ReduceValuesTask<K,V> rights, nextRight;
5214 ReduceValuesTask
5215 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5216 ReduceValuesTask<K,V> nextRight,
5217 BiFun<? super V, ? super V, ? extends V> reducer) {
5218 super(p, b, i, f, t); this.nextRight = nextRight;
5219 this.reducer = reducer;
5220 }
5221 public final V getRawResult() { return result; }
5222 public final void compute() {
5223 final BiFun<? super V, ? super V, ? extends V> reducer;
5224 if ((reducer = this.reducer) != null) {
5225 for (int i = baseIndex, f, h; batch > 0 &&
5226 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5227 addToPendingCount(1);
5228 (rights = new ReduceValuesTask<K,V>
5229 (this, batch >>>= 1, baseLimit = h, f, tab,
5230 rights, reducer)).fork();
5231 }
5232 V r = null;
5233 for (Node<K,V> p; (p = advance()) != null; ) {
5234 V v = p.val;
5235 r = (r == null) ? v : reducer.apply(r, v);
5236 }
5237 result = r;
5238 CountedCompleter<?> c;
5239 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5240 @SuppressWarnings("unchecked") ReduceValuesTask<K,V>
5241 t = (ReduceValuesTask<K,V>)c,
5242 s = t.rights;
5243 while (s != null) {
5244 V tr, sr;
5245 if ((sr = s.result) != null)
5246 t.result = (((tr = t.result) == null) ? sr :
5247 reducer.apply(tr, sr));
5248 s = t.rights = s.nextRight;
5249 }
5250 }
5251 }
5252 }
5253 }
5254
5255 @SuppressWarnings("serial")
5256 static final class ReduceEntriesTask<K,V>
5257 extends BulkTask<K,V,Map.Entry<K,V>> {
5258 final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5259 Map.Entry<K,V> result;
5260 ReduceEntriesTask<K,V> rights, nextRight;
5261 ReduceEntriesTask
5262 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5263 ReduceEntriesTask<K,V> nextRight,
5264 BiFun<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5265 super(p, b, i, f, t); this.nextRight = nextRight;
5266 this.reducer = reducer;
5267 }
5268 public final Map.Entry<K,V> getRawResult() { return result; }
5269 public final void compute() {
5270 final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5271 if ((reducer = this.reducer) != null) {
5272 for (int i = baseIndex, f, h; batch > 0 &&
5273 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5274 addToPendingCount(1);
5275 (rights = new ReduceEntriesTask<K,V>
5276 (this, batch >>>= 1, baseLimit = h, f, tab,
5277 rights, reducer)).fork();
5278 }
5279 Map.Entry<K,V> r = null;
5280 for (Node<K,V> p; (p = advance()) != null; )
5281 r = (r == null) ? p : reducer.apply(r, p);
5282 result = r;
5283 CountedCompleter<?> c;
5284 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5285 @SuppressWarnings("unchecked") ReduceEntriesTask<K,V>
5286 t = (ReduceEntriesTask<K,V>)c,
5287 s = t.rights;
5288 while (s != null) {
5289 Map.Entry<K,V> tr, sr;
5290 if ((sr = s.result) != null)
5291 t.result = (((tr = t.result) == null) ? sr :
5292 reducer.apply(tr, sr));
5293 s = t.rights = s.nextRight;
5294 }
5295 }
5296 }
5297 }
5298 }
5299
5300 @SuppressWarnings("serial")
5301 static final class MapReduceKeysTask<K,V,U>
5302 extends BulkTask<K,V,U> {
5303 final Fun<? super K, ? extends U> transformer;
5304 final BiFun<? super U, ? super U, ? extends U> reducer;
5305 U result;
5306 MapReduceKeysTask<K,V,U> rights, nextRight;
5307 MapReduceKeysTask
5308 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5309 MapReduceKeysTask<K,V,U> nextRight,
5310 Fun<? super K, ? extends U> transformer,
5311 BiFun<? super U, ? super U, ? extends U> reducer) {
5312 super(p, b, i, f, t); this.nextRight = nextRight;
5313 this.transformer = transformer;
5314 this.reducer = reducer;
5315 }
5316 public final U getRawResult() { return result; }
5317 public final void compute() {
5318 final Fun<? super K, ? extends U> transformer;
5319 final BiFun<? super U, ? super U, ? extends U> reducer;
5320 if ((transformer = this.transformer) != null &&
5321 (reducer = this.reducer) != null) {
5322 for (int i = baseIndex, f, h; batch > 0 &&
5323 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5324 addToPendingCount(1);
5325 (rights = new MapReduceKeysTask<K,V,U>
5326 (this, batch >>>= 1, baseLimit = h, f, tab,
5327 rights, transformer, reducer)).fork();
5328 }
5329 U r = null;
5330 for (Node<K,V> p; (p = advance()) != null; ) {
5331 U u;
5332 if ((u = transformer.apply(p.key)) != null)
5333 r = (r == null) ? u : reducer.apply(r, u);
5334 }
5335 result = r;
5336 CountedCompleter<?> c;
5337 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5338 @SuppressWarnings("unchecked") MapReduceKeysTask<K,V,U>
5339 t = (MapReduceKeysTask<K,V,U>)c,
5340 s = t.rights;
5341 while (s != null) {
5342 U tr, sr;
5343 if ((sr = s.result) != null)
5344 t.result = (((tr = t.result) == null) ? sr :
5345 reducer.apply(tr, sr));
5346 s = t.rights = s.nextRight;
5347 }
5348 }
5349 }
5350 }
5351 }
5352
5353 @SuppressWarnings("serial")
5354 static final class MapReduceValuesTask<K,V,U>
5355 extends BulkTask<K,V,U> {
5356 final Fun<? super V, ? extends U> transformer;
5357 final BiFun<? super U, ? super U, ? extends U> reducer;
5358 U result;
5359 MapReduceValuesTask<K,V,U> rights, nextRight;
5360 MapReduceValuesTask
5361 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5362 MapReduceValuesTask<K,V,U> nextRight,
5363 Fun<? super V, ? extends U> transformer,
5364 BiFun<? super U, ? super U, ? extends U> reducer) {
5365 super(p, b, i, f, t); this.nextRight = nextRight;
5366 this.transformer = transformer;
5367 this.reducer = reducer;
5368 }
5369 public final U getRawResult() { return result; }
5370 public final void compute() {
5371 final Fun<? super V, ? extends U> transformer;
5372 final BiFun<? super U, ? super U, ? extends U> reducer;
5373 if ((transformer = this.transformer) != null &&
5374 (reducer = this.reducer) != null) {
5375 for (int i = baseIndex, f, h; batch > 0 &&
5376 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5377 addToPendingCount(1);
5378 (rights = new MapReduceValuesTask<K,V,U>
5379 (this, batch >>>= 1, baseLimit = h, f, tab,
5380 rights, transformer, reducer)).fork();
5381 }
5382 U r = null;
5383 for (Node<K,V> p; (p = advance()) != null; ) {
5384 U u;
5385 if ((u = transformer.apply(p.val)) != null)
5386 r = (r == null) ? u : reducer.apply(r, u);
5387 }
5388 result = r;
5389 CountedCompleter<?> c;
5390 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5391 @SuppressWarnings("unchecked") MapReduceValuesTask<K,V,U>
5392 t = (MapReduceValuesTask<K,V,U>)c,
5393 s = t.rights;
5394 while (s != null) {
5395 U tr, sr;
5396 if ((sr = s.result) != null)
5397 t.result = (((tr = t.result) == null) ? sr :
5398 reducer.apply(tr, sr));
5399 s = t.rights = s.nextRight;
5400 }
5401 }
5402 }
5403 }
5404 }
5405
5406 @SuppressWarnings("serial")
5407 static final class MapReduceEntriesTask<K,V,U>
5408 extends BulkTask<K,V,U> {
5409 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5410 final BiFun<? super U, ? super U, ? extends U> reducer;
5411 U result;
5412 MapReduceEntriesTask<K,V,U> rights, nextRight;
5413 MapReduceEntriesTask
5414 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5415 MapReduceEntriesTask<K,V,U> nextRight,
5416 Fun<Map.Entry<K,V>, ? extends U> transformer,
5417 BiFun<? super U, ? super U, ? extends U> reducer) {
5418 super(p, b, i, f, t); this.nextRight = nextRight;
5419 this.transformer = transformer;
5420 this.reducer = reducer;
5421 }
5422 public final U getRawResult() { return result; }
5423 public final void compute() {
5424 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5425 final BiFun<? super U, ? super U, ? extends U> reducer;
5426 if ((transformer = this.transformer) != null &&
5427 (reducer = this.reducer) != null) {
5428 for (int i = baseIndex, f, h; batch > 0 &&
5429 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5430 addToPendingCount(1);
5431 (rights = new MapReduceEntriesTask<K,V,U>
5432 (this, batch >>>= 1, baseLimit = h, f, tab,
5433 rights, transformer, reducer)).fork();
5434 }
5435 U r = null;
5436 for (Node<K,V> p; (p = advance()) != null; ) {
5437 U u;
5438 if ((u = transformer.apply(p)) != null)
5439 r = (r == null) ? u : reducer.apply(r, u);
5440 }
5441 result = r;
5442 CountedCompleter<?> c;
5443 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5444 @SuppressWarnings("unchecked") MapReduceEntriesTask<K,V,U>
5445 t = (MapReduceEntriesTask<K,V,U>)c,
5446 s = t.rights;
5447 while (s != null) {
5448 U tr, sr;
5449 if ((sr = s.result) != null)
5450 t.result = (((tr = t.result) == null) ? sr :
5451 reducer.apply(tr, sr));
5452 s = t.rights = s.nextRight;
5453 }
5454 }
5455 }
5456 }
5457 }
5458
5459 @SuppressWarnings("serial")
5460 static final class MapReduceMappingsTask<K,V,U>
5461 extends BulkTask<K,V,U> {
5462 final BiFun<? super K, ? super V, ? extends U> transformer;
5463 final BiFun<? super U, ? super U, ? extends U> reducer;
5464 U result;
5465 MapReduceMappingsTask<K,V,U> rights, nextRight;
5466 MapReduceMappingsTask
5467 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5468 MapReduceMappingsTask<K,V,U> nextRight,
5469 BiFun<? super K, ? super V, ? extends U> transformer,
5470 BiFun<? super U, ? super U, ? extends U> reducer) {
5471 super(p, b, i, f, t); this.nextRight = nextRight;
5472 this.transformer = transformer;
5473 this.reducer = reducer;
5474 }
5475 public final U getRawResult() { return result; }
5476 public final void compute() {
5477 final BiFun<? super K, ? super V, ? extends U> transformer;
5478 final BiFun<? super U, ? super U, ? extends U> reducer;
5479 if ((transformer = this.transformer) != null &&
5480 (reducer = this.reducer) != null) {
5481 for (int i = baseIndex, f, h; batch > 0 &&
5482 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5483 addToPendingCount(1);
5484 (rights = new MapReduceMappingsTask<K,V,U>
5485 (this, batch >>>= 1, baseLimit = h, f, tab,
5486 rights, transformer, reducer)).fork();
5487 }
5488 U r = null;
5489 for (Node<K,V> p; (p = advance()) != null; ) {
5490 U u;
5491 if ((u = transformer.apply(p.key, p.val)) != null)
5492 r = (r == null) ? u : reducer.apply(r, u);
5493 }
5494 result = r;
5495 CountedCompleter<?> c;
5496 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5497 @SuppressWarnings("unchecked") MapReduceMappingsTask<K,V,U>
5498 t = (MapReduceMappingsTask<K,V,U>)c,
5499 s = t.rights;
5500 while (s != null) {
5501 U tr, sr;
5502 if ((sr = s.result) != null)
5503 t.result = (((tr = t.result) == null) ? sr :
5504 reducer.apply(tr, sr));
5505 s = t.rights = s.nextRight;
5506 }
5507 }
5508 }
5509 }
5510 }
5511
5512 @SuppressWarnings("serial")
5513 static final class MapReduceKeysToDoubleTask<K,V>
5514 extends BulkTask<K,V,Double> {
5515 final ObjectToDouble<? super K> transformer;
5516 final DoubleByDoubleToDouble reducer;
5517 final double basis;
5518 double result;
5519 MapReduceKeysToDoubleTask<K,V> rights, nextRight;
5520 MapReduceKeysToDoubleTask
5521 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5522 MapReduceKeysToDoubleTask<K,V> nextRight,
5523 ObjectToDouble<? super K> transformer,
5524 double basis,
5525 DoubleByDoubleToDouble reducer) {
5526 super(p, b, i, f, t); this.nextRight = nextRight;
5527 this.transformer = transformer;
5528 this.basis = basis; this.reducer = reducer;
5529 }
5530 public final Double getRawResult() { return result; }
5531 public final void compute() {
5532 final ObjectToDouble<? super K> transformer;
5533 final DoubleByDoubleToDouble reducer;
5534 if ((transformer = this.transformer) != null &&
5535 (reducer = this.reducer) != null) {
5536 double r = this.basis;
5537 for (int i = baseIndex, f, h; batch > 0 &&
5538 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5539 addToPendingCount(1);
5540 (rights = new MapReduceKeysToDoubleTask<K,V>
5541 (this, batch >>>= 1, baseLimit = h, f, tab,
5542 rights, transformer, r, reducer)).fork();
5543 }
5544 for (Node<K,V> p; (p = advance()) != null; )
5545 r = reducer.apply(r, transformer.apply(p.key));
5546 result = r;
5547 CountedCompleter<?> c;
5548 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5549 @SuppressWarnings("unchecked") MapReduceKeysToDoubleTask<K,V>
5550 t = (MapReduceKeysToDoubleTask<K,V>)c,
5551 s = t.rights;
5552 while (s != null) {
5553 t.result = reducer.apply(t.result, s.result);
5554 s = t.rights = s.nextRight;
5555 }
5556 }
5557 }
5558 }
5559 }
5560
5561 @SuppressWarnings("serial")
5562 static final class MapReduceValuesToDoubleTask<K,V>
5563 extends BulkTask<K,V,Double> {
5564 final ObjectToDouble<? super V> transformer;
5565 final DoubleByDoubleToDouble reducer;
5566 final double basis;
5567 double result;
5568 MapReduceValuesToDoubleTask<K,V> rights, nextRight;
5569 MapReduceValuesToDoubleTask
5570 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5571 MapReduceValuesToDoubleTask<K,V> nextRight,
5572 ObjectToDouble<? super V> transformer,
5573 double basis,
5574 DoubleByDoubleToDouble reducer) {
5575 super(p, b, i, f, t); this.nextRight = nextRight;
5576 this.transformer = transformer;
5577 this.basis = basis; this.reducer = reducer;
5578 }
5579 public final Double getRawResult() { return result; }
5580 public final void compute() {
5581 final ObjectToDouble<? super V> transformer;
5582 final DoubleByDoubleToDouble reducer;
5583 if ((transformer = this.transformer) != null &&
5584 (reducer = this.reducer) != null) {
5585 double r = this.basis;
5586 for (int i = baseIndex, f, h; batch > 0 &&
5587 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5588 addToPendingCount(1);
5589 (rights = new MapReduceValuesToDoubleTask<K,V>
5590 (this, batch >>>= 1, baseLimit = h, f, tab,
5591 rights, transformer, r, reducer)).fork();
5592 }
5593 for (Node<K,V> p; (p = advance()) != null; )
5594 r = reducer.apply(r, transformer.apply(p.val));
5595 result = r;
5596 CountedCompleter<?> c;
5597 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5598 @SuppressWarnings("unchecked") MapReduceValuesToDoubleTask<K,V>
5599 t = (MapReduceValuesToDoubleTask<K,V>)c,
5600 s = t.rights;
5601 while (s != null) {
5602 t.result = reducer.apply(t.result, s.result);
5603 s = t.rights = s.nextRight;
5604 }
5605 }
5606 }
5607 }
5608 }
5609
5610 @SuppressWarnings("serial")
5611 static final class MapReduceEntriesToDoubleTask<K,V>
5612 extends BulkTask<K,V,Double> {
5613 final ObjectToDouble<Map.Entry<K,V>> transformer;
5614 final DoubleByDoubleToDouble reducer;
5615 final double basis;
5616 double result;
5617 MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
5618 MapReduceEntriesToDoubleTask
5619 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5620 MapReduceEntriesToDoubleTask<K,V> nextRight,
5621 ObjectToDouble<Map.Entry<K,V>> transformer,
5622 double basis,
5623 DoubleByDoubleToDouble reducer) {
5624 super(p, b, i, f, t); this.nextRight = nextRight;
5625 this.transformer = transformer;
5626 this.basis = basis; this.reducer = reducer;
5627 }
5628 public final Double getRawResult() { return result; }
5629 public final void compute() {
5630 final ObjectToDouble<Map.Entry<K,V>> transformer;
5631 final DoubleByDoubleToDouble reducer;
5632 if ((transformer = this.transformer) != null &&
5633 (reducer = this.reducer) != null) {
5634 double r = this.basis;
5635 for (int i = baseIndex, f, h; batch > 0 &&
5636 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5637 addToPendingCount(1);
5638 (rights = new MapReduceEntriesToDoubleTask<K,V>
5639 (this, batch >>>= 1, baseLimit = h, f, tab,
5640 rights, transformer, r, reducer)).fork();
5641 }
5642 for (Node<K,V> p; (p = advance()) != null; )
5643 r = reducer.apply(r, transformer.apply(p));
5644 result = r;
5645 CountedCompleter<?> c;
5646 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5647 @SuppressWarnings("unchecked") MapReduceEntriesToDoubleTask<K,V>
5648 t = (MapReduceEntriesToDoubleTask<K,V>)c,
5649 s = t.rights;
5650 while (s != null) {
5651 t.result = reducer.apply(t.result, s.result);
5652 s = t.rights = s.nextRight;
5653 }
5654 }
5655 }
5656 }
5657 }
5658
5659 @SuppressWarnings("serial")
5660 static final class MapReduceMappingsToDoubleTask<K,V>
5661 extends BulkTask<K,V,Double> {
5662 final ObjectByObjectToDouble<? super K, ? super V> transformer;
5663 final DoubleByDoubleToDouble reducer;
5664 final double basis;
5665 double result;
5666 MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
5667 MapReduceMappingsToDoubleTask
5668 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5669 MapReduceMappingsToDoubleTask<K,V> nextRight,
5670 ObjectByObjectToDouble<? super K, ? super V> transformer,
5671 double basis,
5672 DoubleByDoubleToDouble reducer) {
5673 super(p, b, i, f, t); this.nextRight = nextRight;
5674 this.transformer = transformer;
5675 this.basis = basis; this.reducer = reducer;
5676 }
5677 public final Double getRawResult() { return result; }
5678 public final void compute() {
5679 final ObjectByObjectToDouble<? super K, ? super V> transformer;
5680 final DoubleByDoubleToDouble reducer;
5681 if ((transformer = this.transformer) != null &&
5682 (reducer = this.reducer) != null) {
5683 double r = this.basis;
5684 for (int i = baseIndex, f, h; batch > 0 &&
5685 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5686 addToPendingCount(1);
5687 (rights = new MapReduceMappingsToDoubleTask<K,V>
5688 (this, batch >>>= 1, baseLimit = h, f, tab,
5689 rights, transformer, r, reducer)).fork();
5690 }
5691 for (Node<K,V> p; (p = advance()) != null; )
5692 r = reducer.apply(r, transformer.apply(p.key, p.val));
5693 result = r;
5694 CountedCompleter<?> c;
5695 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5696 @SuppressWarnings("unchecked") MapReduceMappingsToDoubleTask<K,V>
5697 t = (MapReduceMappingsToDoubleTask<K,V>)c,
5698 s = t.rights;
5699 while (s != null) {
5700 t.result = reducer.apply(t.result, s.result);
5701 s = t.rights = s.nextRight;
5702 }
5703 }
5704 }
5705 }
5706 }
5707
5708 @SuppressWarnings("serial")
5709 static final class MapReduceKeysToLongTask<K,V>
5710 extends BulkTask<K,V,Long> {
5711 final ObjectToLong<? super K> transformer;
5712 final LongByLongToLong reducer;
5713 final long basis;
5714 long result;
5715 MapReduceKeysToLongTask<K,V> rights, nextRight;
5716 MapReduceKeysToLongTask
5717 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5718 MapReduceKeysToLongTask<K,V> nextRight,
5719 ObjectToLong<? super K> transformer,
5720 long basis,
5721 LongByLongToLong reducer) {
5722 super(p, b, i, f, t); this.nextRight = nextRight;
5723 this.transformer = transformer;
5724 this.basis = basis; this.reducer = reducer;
5725 }
5726 public final Long getRawResult() { return result; }
5727 public final void compute() {
5728 final ObjectToLong<? super K> transformer;
5729 final LongByLongToLong reducer;
5730 if ((transformer = this.transformer) != null &&
5731 (reducer = this.reducer) != null) {
5732 long r = this.basis;
5733 for (int i = baseIndex, f, h; batch > 0 &&
5734 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5735 addToPendingCount(1);
5736 (rights = new MapReduceKeysToLongTask<K,V>
5737 (this, batch >>>= 1, baseLimit = h, f, tab,
5738 rights, transformer, r, reducer)).fork();
5739 }
5740 for (Node<K,V> p; (p = advance()) != null; )
5741 r = reducer.apply(r, transformer.apply(p.key));
5742 result = r;
5743 CountedCompleter<?> c;
5744 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5745 @SuppressWarnings("unchecked") MapReduceKeysToLongTask<K,V>
5746 t = (MapReduceKeysToLongTask<K,V>)c,
5747 s = t.rights;
5748 while (s != null) {
5749 t.result = reducer.apply(t.result, s.result);
5750 s = t.rights = s.nextRight;
5751 }
5752 }
5753 }
5754 }
5755 }
5756
5757 @SuppressWarnings("serial")
5758 static final class MapReduceValuesToLongTask<K,V>
5759 extends BulkTask<K,V,Long> {
5760 final ObjectToLong<? super V> transformer;
5761 final LongByLongToLong reducer;
5762 final long basis;
5763 long result;
5764 MapReduceValuesToLongTask<K,V> rights, nextRight;
5765 MapReduceValuesToLongTask
5766 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5767 MapReduceValuesToLongTask<K,V> nextRight,
5768 ObjectToLong<? super V> transformer,
5769 long basis,
5770 LongByLongToLong reducer) {
5771 super(p, b, i, f, t); this.nextRight = nextRight;
5772 this.transformer = transformer;
5773 this.basis = basis; this.reducer = reducer;
5774 }
5775 public final Long getRawResult() { return result; }
5776 public final void compute() {
5777 final ObjectToLong<? super V> transformer;
5778 final LongByLongToLong reducer;
5779 if ((transformer = this.transformer) != null &&
5780 (reducer = this.reducer) != null) {
5781 long r = this.basis;
5782 for (int i = baseIndex, f, h; batch > 0 &&
5783 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5784 addToPendingCount(1);
5785 (rights = new MapReduceValuesToLongTask<K,V>
5786 (this, batch >>>= 1, baseLimit = h, f, tab,
5787 rights, transformer, r, reducer)).fork();
5788 }
5789 for (Node<K,V> p; (p = advance()) != null; )
5790 r = reducer.apply(r, transformer.apply(p.val));
5791 result = r;
5792 CountedCompleter<?> c;
5793 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5794 @SuppressWarnings("unchecked") MapReduceValuesToLongTask<K,V>
5795 t = (MapReduceValuesToLongTask<K,V>)c,
5796 s = t.rights;
5797 while (s != null) {
5798 t.result = reducer.apply(t.result, s.result);
5799 s = t.rights = s.nextRight;
5800 }
5801 }
5802 }
5803 }
5804 }
5805
5806 @SuppressWarnings("serial")
5807 static final class MapReduceEntriesToLongTask<K,V>
5808 extends BulkTask<K,V,Long> {
5809 final ObjectToLong<Map.Entry<K,V>> transformer;
5810 final LongByLongToLong reducer;
5811 final long basis;
5812 long result;
5813 MapReduceEntriesToLongTask<K,V> rights, nextRight;
5814 MapReduceEntriesToLongTask
5815 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5816 MapReduceEntriesToLongTask<K,V> nextRight,
5817 ObjectToLong<Map.Entry<K,V>> transformer,
5818 long basis,
5819 LongByLongToLong reducer) {
5820 super(p, b, i, f, t); this.nextRight = nextRight;
5821 this.transformer = transformer;
5822 this.basis = basis; this.reducer = reducer;
5823 }
5824 public final Long getRawResult() { return result; }
5825 public final void compute() {
5826 final ObjectToLong<Map.Entry<K,V>> transformer;
5827 final LongByLongToLong reducer;
5828 if ((transformer = this.transformer) != null &&
5829 (reducer = this.reducer) != null) {
5830 long r = this.basis;
5831 for (int i = baseIndex, f, h; batch > 0 &&
5832 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5833 addToPendingCount(1);
5834 (rights = new MapReduceEntriesToLongTask<K,V>
5835 (this, batch >>>= 1, baseLimit = h, f, tab,
5836 rights, transformer, r, reducer)).fork();
5837 }
5838 for (Node<K,V> p; (p = advance()) != null; )
5839 r = reducer.apply(r, transformer.apply(p));
5840 result = r;
5841 CountedCompleter<?> c;
5842 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5843 @SuppressWarnings("unchecked") MapReduceEntriesToLongTask<K,V>
5844 t = (MapReduceEntriesToLongTask<K,V>)c,
5845 s = t.rights;
5846 while (s != null) {
5847 t.result = reducer.apply(t.result, s.result);
5848 s = t.rights = s.nextRight;
5849 }
5850 }
5851 }
5852 }
5853 }
5854
5855 @SuppressWarnings("serial")
5856 static final class MapReduceMappingsToLongTask<K,V>
5857 extends BulkTask<K,V,Long> {
5858 final ObjectByObjectToLong<? super K, ? super V> transformer;
5859 final LongByLongToLong reducer;
5860 final long basis;
5861 long result;
5862 MapReduceMappingsToLongTask<K,V> rights, nextRight;
5863 MapReduceMappingsToLongTask
5864 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5865 MapReduceMappingsToLongTask<K,V> nextRight,
5866 ObjectByObjectToLong<? super K, ? super V> transformer,
5867 long basis,
5868 LongByLongToLong reducer) {
5869 super(p, b, i, f, t); this.nextRight = nextRight;
5870 this.transformer = transformer;
5871 this.basis = basis; this.reducer = reducer;
5872 }
5873 public final Long getRawResult() { return result; }
5874 public final void compute() {
5875 final ObjectByObjectToLong<? super K, ? super V> transformer;
5876 final LongByLongToLong reducer;
5877 if ((transformer = this.transformer) != null &&
5878 (reducer = this.reducer) != null) {
5879 long r = this.basis;
5880 for (int i = baseIndex, f, h; batch > 0 &&
5881 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5882 addToPendingCount(1);
5883 (rights = new MapReduceMappingsToLongTask<K,V>
5884 (this, batch >>>= 1, baseLimit = h, f, tab,
5885 rights, transformer, r, reducer)).fork();
5886 }
5887 for (Node<K,V> p; (p = advance()) != null; )
5888 r = reducer.apply(r, transformer.apply(p.key, p.val));
5889 result = r;
5890 CountedCompleter<?> c;
5891 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5892 @SuppressWarnings("unchecked") MapReduceMappingsToLongTask<K,V>
5893 t = (MapReduceMappingsToLongTask<K,V>)c,
5894 s = t.rights;
5895 while (s != null) {
5896 t.result = reducer.apply(t.result, s.result);
5897 s = t.rights = s.nextRight;
5898 }
5899 }
5900 }
5901 }
5902 }
5903
5904 @SuppressWarnings("serial")
5905 static final class MapReduceKeysToIntTask<K,V>
5906 extends BulkTask<K,V,Integer> {
5907 final ObjectToInt<? super K> transformer;
5908 final IntByIntToInt reducer;
5909 final int basis;
5910 int result;
5911 MapReduceKeysToIntTask<K,V> rights, nextRight;
5912 MapReduceKeysToIntTask
5913 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5914 MapReduceKeysToIntTask<K,V> nextRight,
5915 ObjectToInt<? super K> transformer,
5916 int basis,
5917 IntByIntToInt reducer) {
5918 super(p, b, i, f, t); this.nextRight = nextRight;
5919 this.transformer = transformer;
5920 this.basis = basis; this.reducer = reducer;
5921 }
5922 public final Integer getRawResult() { return result; }
5923 public final void compute() {
5924 final ObjectToInt<? super K> transformer;
5925 final IntByIntToInt reducer;
5926 if ((transformer = this.transformer) != null &&
5927 (reducer = this.reducer) != null) {
5928 int r = this.basis;
5929 for (int i = baseIndex, f, h; batch > 0 &&
5930 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5931 addToPendingCount(1);
5932 (rights = new MapReduceKeysToIntTask<K,V>
5933 (this, batch >>>= 1, baseLimit = h, f, tab,
5934 rights, transformer, r, reducer)).fork();
5935 }
5936 for (Node<K,V> p; (p = advance()) != null; )
5937 r = reducer.apply(r, transformer.apply(p.key));
5938 result = r;
5939 CountedCompleter<?> c;
5940 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5941 @SuppressWarnings("unchecked") MapReduceKeysToIntTask<K,V>
5942 t = (MapReduceKeysToIntTask<K,V>)c,
5943 s = t.rights;
5944 while (s != null) {
5945 t.result = reducer.apply(t.result, s.result);
5946 s = t.rights = s.nextRight;
5947 }
5948 }
5949 }
5950 }
5951 }
5952
5953 @SuppressWarnings("serial")
5954 static final class MapReduceValuesToIntTask<K,V>
5955 extends BulkTask<K,V,Integer> {
5956 final ObjectToInt<? super V> transformer;
5957 final IntByIntToInt reducer;
5958 final int basis;
5959 int result;
5960 MapReduceValuesToIntTask<K,V> rights, nextRight;
5961 MapReduceValuesToIntTask
5962 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5963 MapReduceValuesToIntTask<K,V> nextRight,
5964 ObjectToInt<? super V> transformer,
5965 int basis,
5966 IntByIntToInt reducer) {
5967 super(p, b, i, f, t); this.nextRight = nextRight;
5968 this.transformer = transformer;
5969 this.basis = basis; this.reducer = reducer;
5970 }
5971 public final Integer getRawResult() { return result; }
5972 public final void compute() {
5973 final ObjectToInt<? super V> transformer;
5974 final IntByIntToInt reducer;
5975 if ((transformer = this.transformer) != null &&
5976 (reducer = this.reducer) != null) {
5977 int r = this.basis;
5978 for (int i = baseIndex, f, h; batch > 0 &&
5979 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5980 addToPendingCount(1);
5981 (rights = new MapReduceValuesToIntTask<K,V>
5982 (this, batch >>>= 1, baseLimit = h, f, tab,
5983 rights, transformer, r, reducer)).fork();
5984 }
5985 for (Node<K,V> p; (p = advance()) != null; )
5986 r = reducer.apply(r, transformer.apply(p.val));
5987 result = r;
5988 CountedCompleter<?> c;
5989 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5990 @SuppressWarnings("unchecked") MapReduceValuesToIntTask<K,V>
5991 t = (MapReduceValuesToIntTask<K,V>)c,
5992 s = t.rights;
5993 while (s != null) {
5994 t.result = reducer.apply(t.result, s.result);
5995 s = t.rights = s.nextRight;
5996 }
5997 }
5998 }
5999 }
6000 }
6001
6002 @SuppressWarnings("serial")
6003 static final class MapReduceEntriesToIntTask<K,V>
6004 extends BulkTask<K,V,Integer> {
6005 final ObjectToInt<Map.Entry<K,V>> transformer;
6006 final IntByIntToInt reducer;
6007 final int basis;
6008 int result;
6009 MapReduceEntriesToIntTask<K,V> rights, nextRight;
6010 MapReduceEntriesToIntTask
6011 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6012 MapReduceEntriesToIntTask<K,V> nextRight,
6013 ObjectToInt<Map.Entry<K,V>> transformer,
6014 int basis,
6015 IntByIntToInt reducer) {
6016 super(p, b, i, f, t); this.nextRight = nextRight;
6017 this.transformer = transformer;
6018 this.basis = basis; this.reducer = reducer;
6019 }
6020 public final Integer getRawResult() { return result; }
6021 public final void compute() {
6022 final ObjectToInt<Map.Entry<K,V>> transformer;
6023 final IntByIntToInt reducer;
6024 if ((transformer = this.transformer) != null &&
6025 (reducer = this.reducer) != null) {
6026 int r = this.basis;
6027 for (int i = baseIndex, f, h; batch > 0 &&
6028 (h = ((f = baseLimit) + i) >>> 1) > i;) {
6029 addToPendingCount(1);
6030 (rights = new MapReduceEntriesToIntTask<K,V>
6031 (this, batch >>>= 1, baseLimit = h, f, tab,
6032 rights, transformer, r, reducer)).fork();
6033 }
6034 for (Node<K,V> p; (p = advance()) != null; )
6035 r = reducer.apply(r, transformer.apply(p));
6036 result = r;
6037 CountedCompleter<?> c;
6038 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6039 @SuppressWarnings("unchecked") MapReduceEntriesToIntTask<K,V>
6040 t = (MapReduceEntriesToIntTask<K,V>)c,
6041 s = t.rights;
6042 while (s != null) {
6043 t.result = reducer.apply(t.result, s.result);
6044 s = t.rights = s.nextRight;
6045 }
6046 }
6047 }
6048 }
6049 }
6050
6051 @SuppressWarnings("serial")
6052 static final class MapReduceMappingsToIntTask<K,V>
6053 extends BulkTask<K,V,Integer> {
6054 final ObjectByObjectToInt<? super K, ? super V> transformer;
6055 final IntByIntToInt reducer;
6056 final int basis;
6057 int result;
6058 MapReduceMappingsToIntTask<K,V> rights, nextRight;
6059 MapReduceMappingsToIntTask
6060 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6061 MapReduceMappingsToIntTask<K,V> nextRight,
6062 ObjectByObjectToInt<? super K, ? super V> transformer,
6063 int basis,
6064 IntByIntToInt reducer) {
6065 super(p, b, i, f, t); this.nextRight = nextRight;
6066 this.transformer = transformer;
6067 this.basis = basis; this.reducer = reducer;
6068 }
6069 public final Integer getRawResult() { return result; }
6070 public final void compute() {
6071 final ObjectByObjectToInt<? super K, ? super V> transformer;
6072 final IntByIntToInt reducer;
6073 if ((transformer = this.transformer) != null &&
6074 (reducer = this.reducer) != null) {
6075 int r = this.basis;
6076 for (int i = baseIndex, f, h; batch > 0 &&
6077 (h = ((f = baseLimit) + i) >>> 1) > i;) {
6078 addToPendingCount(1);
6079 (rights = new MapReduceMappingsToIntTask<K,V>
6080 (this, batch >>>= 1, baseLimit = h, f, tab,
6081 rights, transformer, r, reducer)).fork();
6082 }
6083 for (Node<K,V> p; (p = advance()) != null; )
6084 r = reducer.apply(r, transformer.apply(p.key, p.val));
6085 result = r;
6086 CountedCompleter<?> c;
6087 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6088 @SuppressWarnings("unchecked") MapReduceMappingsToIntTask<K,V>
6089 t = (MapReduceMappingsToIntTask<K,V>)c,
6090 s = t.rights;
6091 while (s != null) {
6092 t.result = reducer.apply(t.result, s.result);
6093 s = t.rights = s.nextRight;
6094 }
6095 }
6096 }
6097 }
6098 }
6099
6100 /* ---------------- Counters -------------- */
6101
6102 // Adapted from LongAdder and Striped64.
6103 // See their internal docs for explanation.
6104
6105 // A padded cell for distributing counts
6106 static final class CounterCell {
6107 volatile long p0, p1, p2, p3, p4, p5, p6;
6108 volatile long value;
6109 volatile long q0, q1, q2, q3, q4, q5, q6;
6110 CounterCell(long x) { value = x; }
6111 }
6112
6113 /**
6114 * Holder for the thread-local hash code determining which
6115 * CounterCell to use. The code is initialized via the
6116 * counterHashCodeGenerator, but may be moved upon collisions.
6117 */
6118 static final class CounterHashCode {
6119 int code;
6120 }
6121
6122 /**
6123 * Generates initial value for per-thread CounterHashCodes.
6124 */
6125 static final AtomicInteger counterHashCodeGenerator = new AtomicInteger();
6126
6127 /**
6128 * Increment for counterHashCodeGenerator. See class ThreadLocal
6129 * for explanation.
6130 */
6131 static final int SEED_INCREMENT = 0x61c88647;
6132
6133 /**
6134 * Per-thread counter hash codes. Shared across all instances.
6135 */
6136 static final ThreadLocal<CounterHashCode> threadCounterHashCode =
6137 new ThreadLocal<CounterHashCode>();
6138
6139
6140 final long sumCount() {
6141 CounterCell[] as = counterCells; CounterCell a;
6142 long sum = baseCount;
6143 if (as != null) {
6144 for (int i = 0; i < as.length; ++i) {
6145 if ((a = as[i]) != null)
6146 sum += a.value;
6147 }
6148 }
6149 return sum;
6150 }
6151
6152 // See LongAdder version for explanation
6153 private final void fullAddCount(long x, CounterHashCode hc,
6154 boolean wasUncontended) {
6155 int h;
6156 if (hc == null) {
6157 hc = new CounterHashCode();
6158 int s = counterHashCodeGenerator.addAndGet(SEED_INCREMENT);
6159 h = hc.code = (s == 0) ? 1 : s; // Avoid zero
6160 threadCounterHashCode.set(hc);
6161 }
6162 else
6163 h = hc.code;
6164 boolean collide = false; // True if last slot nonempty
6165 for (;;) {
6166 CounterCell[] as; CounterCell a; int n; long v;
6167 if ((as = counterCells) != null && (n = as.length) > 0) {
6168 if ((a = as[(n - 1) & h]) == null) {
6169 if (cellsBusy == 0) { // Try to attach new Cell
6170 CounterCell r = new CounterCell(x); // Optimistic create
6171 if (cellsBusy == 0 &&
6172 U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
6173 boolean created = false;
6174 try { // Recheck under lock
6175 CounterCell[] rs; int m, j;
6176 if ((rs = counterCells) != null &&
6177 (m = rs.length) > 0 &&
6178 rs[j = (m - 1) & h] == null) {
6179 rs[j] = r;
6180 created = true;
6181 }
6182 } finally {
6183 cellsBusy = 0;
6184 }
6185 if (created)
6186 break;
6187 continue; // Slot is now non-empty
6188 }
6189 }
6190 collide = false;
6191 }
6192 else if (!wasUncontended) // CAS already known to fail
6193 wasUncontended = true; // Continue after rehash
6194 else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))
6195 break;
6196 else if (counterCells != as || n >= NCPU)
6197 collide = false; // At max size or stale
6198 else if (!collide)
6199 collide = true;
6200 else if (cellsBusy == 0 &&
6201 U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
6202 try {
6203 if (counterCells == as) {// Expand table unless stale
6204 CounterCell[] rs = new CounterCell[n << 1];
6205 for (int i = 0; i < n; ++i)
6206 rs[i] = as[i];
6207 counterCells = rs;
6208 }
6209 } finally {
6210 cellsBusy = 0;
6211 }
6212 collide = false;
6213 continue; // Retry with expanded table
6214 }
6215 h ^= h << 13; // Rehash
6216 h ^= h >>> 17;
6217 h ^= h << 5;
6218 }
6219 else if (cellsBusy == 0 && counterCells == as &&
6220 U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
6221 boolean init = false;
6222 try { // Initialize table
6223 if (counterCells == as) {
6224 CounterCell[] rs = new CounterCell[2];
6225 rs[h & 1] = new CounterCell(x);
6226 counterCells = rs;
6227 init = true;
6228 }
6229 } finally {
6230 cellsBusy = 0;
6231 }
6232 if (init)
6233 break;
6234 }
6235 else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x))
6236 break; // Fall back on using base
6237 }
6238 hc.code = h; // Record index for next time
6239 }
6240
6241 // Unsafe mechanics
6242 private static final sun.misc.Unsafe U;
6243 private static final long SIZECTL;
6244 private static final long TRANSFERINDEX;
6245 private static final long BASECOUNT;
6246 private static final long CELLSBUSY;
6247 private static final long CELLVALUE;
6248 private static final long ABASE;
6249 private static final int ASHIFT;
6250
6251 static {
6252 try {
6253 U = getUnsafe();
6254 Class<?> k = ConcurrentHashMapV8.class;
6255 SIZECTL = U.objectFieldOffset
6256 (k.getDeclaredField("sizeCtl"));
6257 TRANSFERINDEX = U.objectFieldOffset
6258 (k.getDeclaredField("transferIndex"));
6259 BASECOUNT = U.objectFieldOffset
6260 (k.getDeclaredField("baseCount"));
6261 CELLSBUSY = U.objectFieldOffset
6262 (k.getDeclaredField("cellsBusy"));
6263 Class<?> ck = CounterCell.class;
6264 CELLVALUE = U.objectFieldOffset
6265 (ck.getDeclaredField("value"));
6266 Class<?> ak = Node[].class;
6267 ABASE = U.arrayBaseOffset(ak);
6268 int scale = U.arrayIndexScale(ak);
6269 if ((scale & (scale - 1)) != 0)
6270 throw new Error("data type scale not a power of two");
6271 ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
6272 } catch (Exception e) {
6273 throw new Error(e);
6274 }
6275 }
6276
6277 /**
6278 * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
6279 * Replace with a simple call to Unsafe.getUnsafe when integrating
6280 * into a jdk.
6281 *
6282 * @return a sun.misc.Unsafe
6283 */
6284 private static sun.misc.Unsafe getUnsafe() {
6285 try {
6286 return sun.misc.Unsafe.getUnsafe();
6287 } catch (SecurityException tryReflectionInstead) {}
6288 try {
6289 return java.security.AccessController.doPrivileged
6290 (new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
6291 public sun.misc.Unsafe run() throws Exception {
6292 Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
6293 for (java.lang.reflect.Field f : k.getDeclaredFields()) {
6294 f.setAccessible(true);
6295 Object x = f.get(null);
6296 if (k.isInstance(x))
6297 return k.cast(x);
6298 }
6299 throw new NoSuchFieldError("the Unsafe");
6300 }});
6301 } catch (java.security.PrivilegedActionException e) {
6302 throw new RuntimeException("Could not initialize intrinsics",
6303 e.getCause());
6304 }
6305 }
6306 }