ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/jsr166e/ConcurrentHashMapV8.java
Revision: 1.102
Committed: Wed Jun 19 14:55:40 2013 UTC (10 years, 10 months ago) by dl
Branch: MAIN
Changes since 1.101: +3728 -4410 lines
Log Message:
Sync with jdk8 versions

File Contents

# User Rev Content
1 dl 1.1 /*
2     * Written by Doug Lea with assistance from members of JCP JSR-166
3     * Expert Group and released to the public domain, as explained at
4     * http://creativecommons.org/publicdomain/zero/1.0/
5     */
6    
7     package jsr166e;
8 dl 1.71
9 dl 1.102 import jsr166e.ForkJoinPool;
10    
11     import java.io.ObjectStreamField;
12     import java.io.Serializable;
13     import java.lang.reflect.ParameterizedType;
14     import java.lang.reflect.Type;
15 dl 1.24 import java.util.Arrays;
16 dl 1.1 import java.util.Collection;
17 dl 1.102 import java.util.Comparator;
18     import java.util.ConcurrentModificationException;
19     import java.util.Enumeration;
20     import java.util.HashMap;
21 dl 1.1 import java.util.Hashtable;
22     import java.util.Iterator;
23 dl 1.102 import java.util.Map;
24 dl 1.1 import java.util.NoSuchElementException;
25 dl 1.102 import java.util.Set;
26 dl 1.1 import java.util.concurrent.ConcurrentMap;
27 dl 1.102 import java.util.concurrent.atomic.AtomicReference;
28 dl 1.82 import java.util.concurrent.atomic.AtomicInteger;
29 dl 1.102 import java.util.concurrent.locks.LockSupport;
30     import java.util.concurrent.locks.ReentrantLock;
31 dl 1.79
32 dl 1.1 /**
33     * A hash table supporting full concurrency of retrievals and
34     * high expected concurrency for updates. This class obeys the
35     * same functional specification as {@link java.util.Hashtable}, and
36     * includes versions of methods corresponding to each method of
37     * {@code Hashtable}. However, even though all operations are
38     * thread-safe, retrieval operations do <em>not</em> entail locking,
39     * and there is <em>not</em> any support for locking the entire table
40     * in a way that prevents all access. This class is fully
41     * interoperable with {@code Hashtable} in programs that rely on its
42     * thread safety but not on its synchronization details.
43     *
44 jsr166 1.78 * <p>Retrieval operations (including {@code get}) generally do not
45 dl 1.1 * block, so may overlap with update operations (including {@code put}
46     * and {@code remove}). Retrievals reflect the results of the most
47     * recently <em>completed</em> update operations holding upon their
48 dl 1.59 * onset. (More formally, an update operation for a given key bears a
49     * <em>happens-before</em> relation with any (non-null) retrieval for
50     * that key reporting the updated value.) For aggregate operations
51     * such as {@code putAll} and {@code clear}, concurrent retrievals may
52     * reflect insertion or removal of only some entries. Similarly,
53     * Iterators and Enumerations return elements reflecting the state of
54     * the hash table at some point at or since the creation of the
55     * iterator/enumeration. They do <em>not</em> throw {@link
56     * ConcurrentModificationException}. However, iterators are designed
57     * to be used by only one thread at a time. Bear in mind that the
58     * results of aggregate status methods including {@code size}, {@code
59     * isEmpty}, and {@code containsValue} are typically useful only when
60     * a map is not undergoing concurrent updates in other threads.
61     * Otherwise the results of these methods reflect transient states
62     * that may be adequate for monitoring or estimation purposes, but not
63     * for program control.
64 dl 1.1 *
65 jsr166 1.78 * <p>The table is dynamically expanded when there are too many
66 dl 1.16 * collisions (i.e., keys that have distinct hash codes but fall into
67     * the same slot modulo the table size), with the expected average
68 dl 1.24 * effect of maintaining roughly two bins per mapping (corresponding
69     * to a 0.75 load factor threshold for resizing). There may be much
70     * variance around this average as mappings are added and removed, but
71     * overall, this maintains a commonly accepted time/space tradeoff for
72     * hash tables. However, resizing this or any other kind of hash
73     * table may be a relatively slow operation. When possible, it is a
74     * good idea to provide a size estimate as an optional {@code
75 dl 1.16 * initialCapacity} constructor argument. An additional optional
76     * {@code loadFactor} constructor argument provides a further means of
77     * customizing initial table capacity by specifying the table density
78     * to be used in calculating the amount of space to allocate for the
79     * given number of elements. Also, for compatibility with previous
80     * versions of this class, constructors may optionally specify an
81     * expected {@code concurrencyLevel} as an additional hint for
82     * internal sizing. Note that using many keys with exactly the same
83 jsr166 1.31 * {@code hashCode()} is a sure way to slow down performance of any
84 dl 1.102 * hash table. To ameliorate impact, when keys are {@link Comparable},
85     * this class may use comparison order among keys to help break ties.
86 dl 1.1 *
87 jsr166 1.78 * <p>A {@link Set} projection of a ConcurrentHashMapV8 may be created
88 dl 1.70 * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
89     * (using {@link #keySet(Object)} when only keys are of interest, and the
90     * mapped values are (perhaps transiently) not used or all take the
91     * same mapping value.
92     *
93 dl 1.1 * <p>This class and its views and iterators implement all of the
94     * <em>optional</em> methods of the {@link Map} and {@link Iterator}
95     * interfaces.
96     *
97 jsr166 1.78 * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
98 dl 1.1 * does <em>not</em> allow {@code null} to be used as a key or value.
99     *
100 dl 1.102 * <p>ConcurrentHashMapV8s support a set of sequential and parallel bulk
101     * operations that are designed
102     * to be safely, and often sensibly, applied even with maps that are
103     * being concurrently updated by other threads; for example, when
104     * computing a snapshot summary of the values in a shared registry.
105     * There are three kinds of operation, each with four forms, accepting
106     * functions with Keys, Values, Entries, and (Key, Value) arguments
107     * and/or return values. Because the elements of a ConcurrentHashMapV8
108     * are not ordered in any particular way, and may be processed in
109     * different orders in different parallel executions, the correctness
110     * of supplied functions should not depend on any ordering, or on any
111     * other objects or values that may transiently change while
112     * computation is in progress; and except for forEach actions, should
113     * ideally be side-effect-free. Bulk operations on {@link java.util.Map.Entry}
114     * objects do not support method {@code setValue}.
115 dl 1.70 *
116     * <ul>
117     * <li> forEach: Perform a given action on each element.
118     * A variant form applies a given transformation on each element
119     * before performing the action.</li>
120     *
121     * <li> search: Return the first available non-null result of
122     * applying a given function on each element; skipping further
123     * search when a result is found.</li>
124     *
125     * <li> reduce: Accumulate each element. The supplied reduction
126     * function cannot rely on ordering (more formally, it should be
127     * both associative and commutative). There are five variants:
128     *
129     * <ul>
130     *
131     * <li> Plain reductions. (There is not a form of this method for
132     * (key, value) function arguments since there is no corresponding
133     * return type.)</li>
134     *
135     * <li> Mapped reductions that accumulate the results of a given
136     * function applied to each element.</li>
137     *
138     * <li> Reductions to scalar doubles, longs, and ints, using a
139     * given basis value.</li>
140     *
141 dl 1.102 * </ul>
142 dl 1.70 * </li>
143     * </ul>
144 dl 1.102 *
145     * <p>These bulk operations accept a {@code parallelismThreshold}
146     * argument. Methods proceed sequentially if the current map size is
147     * estimated to be less than the given threshold. Using a value of
148     * {@code Long.MAX_VALUE} suppresses all parallelism. Using a value
149     * of {@code 1} results in maximal parallelism by partitioning into
150     * enough subtasks to fully utilize the {@link
151     * ForkJoinPool#commonPool()} that is used for all parallel
152     * computations. Normally, you would initially choose one of these
153     * extreme values, and then measure performance of using in-between
154     * values that trade off overhead versus throughput.
155 dl 1.70 *
156     * <p>The concurrency properties of bulk operations follow
157     * from those of ConcurrentHashMapV8: Any non-null result returned
158     * from {@code get(key)} and related access methods bears a
159     * happens-before relation with the associated insertion or
160     * update. The result of any bulk operation reflects the
161     * composition of these per-element relations (but is not
162     * necessarily atomic with respect to the map as a whole unless it
163     * is somehow known to be quiescent). Conversely, because keys
164     * and values in the map are never null, null serves as a reliable
165     * atomic indicator of the current lack of any result. To
166     * maintain this property, null serves as an implicit basis for
167     * all non-scalar reduction operations. For the double, long, and
168     * int versions, the basis should be one that, when combined with
169     * any other value, returns that other value (more formally, it
170     * should be the identity element for the reduction). Most common
171     * reductions have these properties; for example, computing a sum
172     * with basis 0 or a minimum with basis MAX_VALUE.
173     *
174     * <p>Search and transformation functions provided as arguments
175     * should similarly return null to indicate the lack of any result
176     * (in which case it is not used). In the case of mapped
177     * reductions, this also enables transformations to serve as
178     * filters, returning null (or, in the case of primitive
179     * specializations, the identity basis) if the element should not
180     * be combined. You can create compound transformations and
181     * filterings by composing them yourself under this "null means
182     * there is nothing there now" rule before using them in search or
183     * reduce operations.
184     *
185     * <p>Methods accepting and/or returning Entry arguments maintain
186     * key-value associations. They may be useful for example when
187     * finding the key for the greatest value. Note that "plain" Entry
188     * arguments can be supplied using {@code new
189     * AbstractMap.SimpleEntry(k,v)}.
190     *
191 jsr166 1.78 * <p>Bulk operations may complete abruptly, throwing an
192 dl 1.70 * exception encountered in the application of a supplied
193     * function. Bear in mind when handling such exceptions that other
194     * concurrently executing functions could also have thrown
195     * exceptions, or would have done so if the first exception had
196     * not occurred.
197     *
198 dl 1.84 * <p>Speedups for parallel compared to sequential forms are common
199     * but not guaranteed. Parallel operations involving brief functions
200     * on small maps may execute more slowly than sequential forms if the
201     * underlying work to parallelize the computation is more expensive
202     * than the computation itself. Similarly, parallelization may not
203     * lead to much actual parallelism if all processors are busy
204     * performing unrelated tasks.
205 dl 1.70 *
206 jsr166 1.78 * <p>All arguments to all task methods must be non-null.
207 dl 1.70 *
208     * <p><em>jsr166e note: During transition, this class
209     * uses nested functional interfaces with different names but the
210 jsr166 1.77 * same forms as those expected for JDK8.</em>
211 dl 1.70 *
212 dl 1.1 * <p>This class is a member of the
213     * <a href="{@docRoot}/../technotes/guides/collections/index.html">
214     * Java Collections Framework</a>.
215     *
216 jsr166 1.22 * @since 1.5
217 dl 1.1 * @author Doug Lea
218     * @param <K> the type of keys maintained by this map
219     * @param <V> the type of mapped values
220     */
221 jsr166 1.99 public class ConcurrentHashMapV8<K,V>
222     implements ConcurrentMap<K,V>, Serializable {
223 dl 1.1 private static final long serialVersionUID = 7249069246763182397L;
224    
225     /**
226 dl 1.102 * An object for traversing and partitioning elements of a source.
227     * This interface provides a subset of the functionality of JDK8
228     * java.util.Spliterator.
229     */
230     public static interface ConcurrentHashMapSpliterator<T> {
231     /**
232     * If possible, returns a new spliterator covering
233     * approximately one half of the elements, which will not be
234     * covered by this spliterator. Returns null if cannot be
235     * split.
236     */
237     ConcurrentHashMapSpliterator<T> trySplit();
238     /**
239     * Returns an estimate of the number of elements covered by
240     * this Spliterator.
241     */
242     long estimateSize();
243    
244     /** Applies the action to each untraversed element */
245     void forEachRemaining(Action<? super T> action);
246     /** If an element remains, applies the action and returns true. */
247     boolean tryAdvance(Action<? super T> action);
248 dl 1.41 }
249    
250 dl 1.102 // Sams
251     /** Interface describing a void action of one argument */
252     public interface Action<A> { void apply(A a); }
253     /** Interface describing a void action of two arguments */
254     public interface BiAction<A,B> { void apply(A a, B b); }
255     /** Interface describing a function of one argument */
256     public interface Fun<A,T> { T apply(A a); }
257     /** Interface describing a function of two arguments */
258     public interface BiFun<A,B,T> { T apply(A a, B b); }
259     /** Interface describing a function mapping its argument to a double */
260     public interface ObjectToDouble<A> { double apply(A a); }
261     /** Interface describing a function mapping its argument to a long */
262     public interface ObjectToLong<A> { long apply(A a); }
263     /** Interface describing a function mapping its argument to an int */
264     public interface ObjectToInt<A> {int apply(A a); }
265     /** Interface describing a function mapping two arguments to a double */
266     public interface ObjectByObjectToDouble<A,B> { double apply(A a, B b); }
267     /** Interface describing a function mapping two arguments to a long */
268     public interface ObjectByObjectToLong<A,B> { long apply(A a, B b); }
269     /** Interface describing a function mapping two arguments to an int */
270     public interface ObjectByObjectToInt<A,B> {int apply(A a, B b); }
271     /** Interface describing a function mapping two doubles to a double */
272     public interface DoubleByDoubleToDouble { double apply(double a, double b); }
273     /** Interface describing a function mapping two longs to a long */
274     public interface LongByLongToLong { long apply(long a, long b); }
275     /** Interface describing a function mapping two ints to an int */
276     public interface IntByIntToInt { int apply(int a, int b); }
277    
278 dl 1.1 /*
279     * Overview:
280     *
281     * The primary design goal of this hash table is to maintain
282     * concurrent readability (typically method get(), but also
283     * iterators and related methods) while minimizing update
284 dl 1.24 * contention. Secondary goals are to keep space consumption about
285     * the same or better than java.util.HashMap, and to support high
286     * initial insertion rates on an empty table by many threads.
287 dl 1.1 *
288 dl 1.102 * This map usually acts as a binned (bucketed) hash table. Each
289     * key-value mapping is held in a Node. Most nodes are instances
290     * of the basic Node class with hash, key, value, and next
291     * fields. However, various subclasses exist: TreeNodes are
292     * arranged in balanced trees, not lists. TreeBins hold the roots
293     * of sets of TreeNodes. ForwardingNodes are placed at the heads
294     * of bins during resizing. ReservationNodes are used as
295     * placeholders while establishing values in computeIfAbsent and
296     * related methods. The types TreeBin, ForwardingNode, and
297     * ReservationNode do not hold normal user keys, values, or
298     * hashes, and are readily distinguishable during search etc
299     * because they have negative hash fields and null key and value
300     * fields. (These special nodes are either uncommon or transient,
301     * so the impact of carrying around some unused fields is
302     * insignficant.)
303 dl 1.1 *
304     * The table is lazily initialized to a power-of-two size upon the
305 dl 1.38 * first insertion. Each bin in the table normally contains a
306     * list of Nodes (most often, the list has only zero or one Node).
307     * Table accesses require volatile/atomic reads, writes, and
308     * CASes. Because there is no other way to arrange this without
309     * adding further indirections, we use intrinsics
310 dl 1.102 * (sun.misc.Unsafe) operations.
311 dl 1.24 *
312 dl 1.82 * We use the top (sign) bit of Node hash fields for control
313     * purposes -- it is available anyway because of addressing
314 dl 1.102 * constraints. Nodes with negative hash fields are specially
315     * handled or ignored in map methods.
316 dl 1.14 *
317 dl 1.27 * Insertion (via put or its variants) of the first node in an
318 dl 1.14 * empty bin is performed by just CASing it to the bin. This is
319 dl 1.38 * by far the most common case for put operations under most
320     * key/hash distributions. Other update operations (insert,
321     * delete, and replace) require locks. We do not want to waste
322     * the space required to associate a distinct lock object with
323     * each bin, so instead use the first node of a bin list itself as
324 dl 1.82 * a lock. Locking support for these locks relies on builtin
325     * "synchronized" monitors.
326 dl 1.24 *
327     * Using the first node of a list as a lock does not by itself
328     * suffice though: When a node is locked, any update must first
329     * validate that it is still the first node after locking it, and
330     * retry if not. Because new nodes are always appended to lists,
331     * once a node is first in a bin, it remains first until deleted
332 dl 1.102 * or the bin becomes invalidated (upon resizing).
333 dl 1.14 *
334 dl 1.24 * The main disadvantage of per-bin locks is that other update
335 dl 1.14 * operations on other nodes in a bin list protected by the same
336     * lock can stall, for example when user equals() or mapping
337 dl 1.38 * functions take a long time. However, statistically, under
338     * random hash codes, this is not a common problem. Ideally, the
339     * frequency of nodes in bins follows a Poisson distribution
340 dl 1.14 * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
341 dl 1.16 * parameter of about 0.5 on average, given the resizing threshold
342     * of 0.75, although with a large variance because of resizing
343     * granularity. Ignoring variance, the expected occurrences of
344     * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
345 dl 1.38 * first values are:
346 dl 1.16 *
347 dl 1.38 * 0: 0.60653066
348     * 1: 0.30326533
349     * 2: 0.07581633
350     * 3: 0.01263606
351     * 4: 0.00157952
352     * 5: 0.00015795
353     * 6: 0.00001316
354     * 7: 0.00000094
355     * 8: 0.00000006
356     * more: less than 1 in ten million
357 dl 1.16 *
358     * Lock contention probability for two threads accessing distinct
359 dl 1.38 * elements is roughly 1 / (8 * #elements) under random hashes.
360     *
361     * Actual hash code distributions encountered in practice
362     * sometimes deviate significantly from uniform randomness. This
363     * includes the case when N > (1<<30), so some keys MUST collide.
364     * Similarly for dumb or hostile usages in which multiple keys are
365 dl 1.102 * designed to have identical hash codes or ones that differs only
366     * in masked-out high bits. So we use a secondary strategy that
367     * applies when the number of nodes in a bin exceeds a
368     * threshold. These TreeBins use a balanced tree to hold nodes (a
369     * specialized form of red-black trees), bounding search time to
370     * O(log N). Each search step in a TreeBin is at least twice as
371 dl 1.38 * slow as in a regular list, but given that N cannot exceed
372     * (1<<64) (before running out of addresses) this bounds search
373     * steps, lock hold times, etc, to reasonable constants (roughly
374     * 100 nodes inspected per operation worst case) so long as keys
375     * are Comparable (which is very common -- String, Long, etc).
376     * TreeBin nodes (TreeNodes) also maintain the same "next"
377     * traversal pointers as regular nodes, so can be traversed in
378     * iterators in the same way.
379 dl 1.1 *
380 dl 1.38 * The table is resized when occupancy exceeds a percentage
381 dl 1.82 * threshold (nominally, 0.75, but see below). Any thread
382     * noticing an overfull bin may assist in resizing after the
383     * initiating thread allocates and sets up the replacement
384     * array. However, rather than stalling, these other threads may
385     * proceed with insertions etc. The use of TreeBins shields us
386     * from the worst case effects of overfilling while resizes are in
387     * progress. Resizing proceeds by transferring bins, one by one,
388     * from the table to the next table. To enable concurrency, the
389     * next table must be (incrementally) prefilled with place-holders
390     * serving as reverse forwarders to the old table. Because we are
391     * using power-of-two expansion, the elements from each bin must
392     * either stay at same index, or move with a power of two
393     * offset. We eliminate unnecessary node creation by catching
394     * cases where old nodes can be reused because their next fields
395     * won't change. On average, only about one-sixth of them need
396     * cloning when a table doubles. The nodes they replace will be
397     * garbage collectable as soon as they are no longer referenced by
398     * any reader thread that may be in the midst of concurrently
399     * traversing table. Upon transfer, the old table bin contains
400     * only a special forwarding node (with hash field "MOVED") that
401     * contains the next table as its key. On encountering a
402     * forwarding node, access and update operations restart, using
403     * the new table.
404     *
405     * Each bin transfer requires its bin lock, which can stall
406     * waiting for locks while resizing. However, because other
407     * threads can join in and help resize rather than contend for
408     * locks, average aggregate waits become shorter as resizing
409     * progresses. The transfer operation must also ensure that all
410     * accessible bins in both the old and new table are usable by any
411     * traversal. This is arranged by proceeding from the last bin
412     * (table.length - 1) up towards the first. Upon seeing a
413     * forwarding node, traversals (see class Traverser) arrange to
414     * move to the new table without revisiting nodes. However, to
415     * ensure that no intervening nodes are skipped, bin splitting can
416     * only begin after the associated reverse-forwarders are in
417     * place.
418 dl 1.14 *
419 dl 1.24 * The traversal scheme also applies to partial traversals of
420 dl 1.52 * ranges of bins (via an alternate Traverser constructor)
421 dl 1.41 * to support partitioned aggregate operations. Also, read-only
422     * operations give up if ever forwarded to a null table, which
423     * provides support for shutdown-style clearing, which is also not
424     * currently implemented.
425 dl 1.14 *
426     * Lazy table initialization minimizes footprint until first use,
427     * and also avoids resizings when the first operation is from a
428     * putAll, constructor with map argument, or deserialization.
429 dl 1.24 * These cases attempt to override the initial capacity settings,
430     * but harmlessly fail to take effect in cases of races.
431 dl 1.1 *
432 dl 1.82 * The element count is maintained using a specialization of
433     * LongAdder. We need to incorporate a specialization rather than
434     * just use a LongAdder in order to access implicit
435     * contention-sensing that leads to creation of multiple
436     * CounterCells. The counter mechanics avoid contention on
437     * updates but can encounter cache thrashing if read too
438     * frequently during concurrent access. To avoid reading so often,
439     * resizing under contention is attempted only upon adding to a
440     * bin already holding two or more nodes. Under uniform hash
441     * distributions, the probability of this occurring at threshold
442     * is around 13%, meaning that only about 1 in 8 puts check
443 dl 1.102 * threshold (and after resizing, many fewer do so).
444     *
445     * TreeBins use a special form of comparison for search and
446     * related operations (which is the main reason we cannot use
447     * existing collections such as TreeMaps). TreeBins contain
448     * Comparable elements, but may contain others, as well as
449     * elements that are Comparable but not necessarily Comparable
450     * for the same T, so we cannot invoke compareTo among them. To
451     * handle this, the tree is ordered primarily by hash value, then
452     * by Comparable.compareTo order if applicable. On lookup at a
453     * node, if elements are not comparable or compare as 0 then both
454     * left and right children may need to be searched in the case of
455     * tied hash values. (This corresponds to the full list search
456     * that would be necessary if all elements were non-Comparable and
457     * had tied hashes.) The red-black balancing code is updated from
458     * pre-jdk-collections
459     * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
460     * based in turn on Cormen, Leiserson, and Rivest "Introduction to
461     * Algorithms" (CLR).
462     *
463     * TreeBins also require an additional locking mechanism. While
464     * list traversal is always possible by readers even during
465     * updates, tree traversal is not, mainly beause of tree-rotations
466     * that may change the root node and/or its linkages. TreeBins
467     * include a simple read-write lock mechanism parasitic on the
468     * main bin-synchronization strategy: Structural adjustments
469     * associated with an insertion or removal are already bin-locked
470     * (and so cannot conflict with other writers) but must wait for
471     * ongoing readers to finish. Since there can be only one such
472     * waiter, we use a simple scheme using a single "waiter" field to
473     * block writers. However, readers need never block. If the root
474     * lock is held, they proceed along the slow traversal path (via
475     * next-pointers) until the lock becomes available or the list is
476     * exhausted, whichever comes first. These cases are not fast, but
477     * maximize aggregate expected throughput.
478 dl 1.14 *
479     * Maintaining API and serialization compatibility with previous
480     * versions of this class introduces several oddities. Mainly: We
481     * leave untouched but unused constructor arguments refering to
482 dl 1.24 * concurrencyLevel. We accept a loadFactor constructor argument,
483     * but apply it only to initial table capacity (which is the only
484     * time that we can guarantee to honor it.) We also declare an
485     * unused "Segment" class that is instantiated in minimal form
486     * only when serializing.
487 dl 1.102 *
488     * This file is organized to make things a little easier to follow
489     * while reading than they might otherwise: First the main static
490     * declarations and utilities, then fields, then main public
491     * methods (with a few factorings of multiple public methods into
492     * internal ones), then sizing methods, trees, traversers, and
493     * bulk operations.
494 dl 1.1 */
495    
496     /* ---------------- Constants -------------- */
497    
498     /**
499 dl 1.16 * The largest possible table capacity. This value must be
500     * exactly 1<<30 to stay within Java array allocation and indexing
501 dl 1.24 * bounds for power of two table sizes, and is further required
502     * because the top two bits of 32bit hash fields are used for
503     * control purposes.
504 dl 1.1 */
505 dl 1.14 private static final int MAXIMUM_CAPACITY = 1 << 30;
506 dl 1.1
507     /**
508 dl 1.14 * The default initial table capacity. Must be a power of 2
509     * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
510 dl 1.1 */
511 dl 1.14 private static final int DEFAULT_CAPACITY = 16;
512 dl 1.1
513     /**
514 dl 1.24 * The largest possible (non-power of two) array size.
515     * Needed by toArray and related methods.
516     */
517     static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
518    
519     /**
520     * The default concurrency level for this table. Unused but
521     * defined for compatibility with previous versions of this class.
522     */
523     private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
524    
525     /**
526 dl 1.16 * The load factor for this table. Overrides of this value in
527     * constructors affect only the initial table capacity. The
528 dl 1.24 * actual floating point value isn't normally used -- it is
529     * simpler to use expressions such as {@code n - (n >>> 2)} for
530     * the associated resizing threshold.
531 dl 1.1 */
532 dl 1.16 private static final float LOAD_FACTOR = 0.75f;
533 dl 1.1
534     /**
535 dl 1.38 * The bin count threshold for using a tree rather than list for a
536 dl 1.102 * bin. Bins are converted to trees when adding an element to a
537     * bin with at least this many nodes. The value must be greater
538     * than 2, and should be at least 8 to mesh with assumptions in
539     * tree removal about conversion back to plain bins upon
540     * shrinkage.
541     */
542     static final int TREEIFY_THRESHOLD = 8;
543    
544     /**
545     * The bin count threshold for untreeifying a (split) bin during a
546     * resize operation. Should be less than TREEIFY_THRESHOLD, and at
547     * most 6 to mesh with shrinkage detection under removal.
548     */
549     static final int UNTREEIFY_THRESHOLD = 6;
550    
551     /**
552     * The smallest table capacity for which bins may be treeified.
553     * (Otherwise the table is resized if too many nodes in a bin.)
554     * The value should be at least 4 * TREEIFY_THRESHOLD to avoid
555     * conflicts between resizing and treeification thresholds.
556 dl 1.38 */
557 dl 1.102 static final int MIN_TREEIFY_CAPACITY = 64;
558 dl 1.38
559 dl 1.82 /**
560     * Minimum number of rebinnings per transfer step. Ranges are
561     * subdivided to allow multiple resizer threads. This value
562     * serves as a lower bound to avoid resizers encountering
563     * excessive memory contention. The value should be at least
564     * DEFAULT_CAPACITY.
565     */
566     private static final int MIN_TRANSFER_STRIDE = 16;
567    
568 dl 1.24 /*
569 dl 1.82 * Encodings for Node hash fields. See above for explanation.
570 dl 1.1 */
571 dl 1.102 static final int MOVED = 0x8fffffff; // (-1) hash for forwarding nodes
572     static final int TREEBIN = 0x80000000; // hash for heads of treea
573     static final int RESERVED = 0x80000001; // hash for transient reservations
574 dl 1.82 static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
575    
576     /** Number of CPUS, to place bounds on some sizings */
577     static final int NCPU = Runtime.getRuntime().availableProcessors();
578    
579 dl 1.102 /** For serialization compatibility. */
580     private static final ObjectStreamField[] serialPersistentFields = {
581     new ObjectStreamField("segments", Segment[].class),
582     new ObjectStreamField("segmentMask", Integer.TYPE),
583     new ObjectStreamField("segmentShift", Integer.TYPE)
584     };
585    
586     /* ---------------- Nodes -------------- */
587    
588     /**
589     * Key-value entry. This class is never exported out as a
590     * user-mutable Map.Entry (i.e., one supporting setValue; see
591     * MapEntry below), but can be used for read-only traversals used
592     * in bulk tasks. Subclasses of Node with a negativehash field
593     * are special, and contain null keys and values (but are never
594     * exported). Otherwise, keys and vals are never null.
595     */
596     static class Node<K,V> implements Map.Entry<K,V> {
597     final int hash;
598     final K key;
599     volatile V val;
600     Node<K,V> next;
601    
602     Node(int hash, K key, V val, Node<K,V> next) {
603     this.hash = hash;
604     this.key = key;
605     this.val = val;
606     this.next = next;
607     }
608    
609     public final K getKey() { return key; }
610     public final V getValue() { return val; }
611     public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
612     public final String toString(){ return key + "=" + val; }
613     public final V setValue(V value) {
614     throw new UnsupportedOperationException();
615     }
616 dl 1.82
617 dl 1.102 public final boolean equals(Object o) {
618     Object k, v, u; Map.Entry<?,?> e;
619     return ((o instanceof Map.Entry) &&
620     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
621     (v = e.getValue()) != null &&
622     (k == key || k.equals(key)) &&
623     (v == (u = val) || v.equals(u)));
624     }
625 dl 1.82
626 dl 1.102 /**
627     * Virtualized support for map.get(); overridden in subclasses.
628     */
629     Node<K,V> find(int h, Object k) {
630     Node<K,V> e = this;
631     if (k != null) {
632     do {
633     K ek;
634     if (e.hash == h &&
635     ((ek = e.key) == k || (ek != null && k.equals(ek))))
636     return e;
637     } while ((e = e.next) != null);
638     }
639     return null;
640     }
641 dl 1.82 }
642    
643 dl 1.102 /* ---------------- Static utilities -------------- */
644    
645 dl 1.82 /**
646 dl 1.102 * Spreads (XORs) higher bits of hash to lower and also forces top
647     * bit to 0. Because the table uses power-of-two masking, sets of
648     * hashes that vary only in bits above the current mask will
649     * always collide. (Among known examples are sets of Float keys
650     * holding consecutive whole numbers in small tables.) So we
651     * apply a transform that spreads the impact of higher bits
652     * downward. There is a tradeoff between speed, utility, and
653     * quality of bit-spreading. Because many common sets of hashes
654     * are already reasonably distributed (so don't benefit from
655     * spreading), and because we use trees to handle large sets of
656     * collisions in bins, we just XOR some shifted bits in the
657     * cheapest possible way to reduce systematic lossage, as well as
658     * to incorporate impact of the highest bits that would otherwise
659     * never be used in index calculations because of table bounds.
660 dl 1.82 */
661 dl 1.102 static final int spread(int h) {
662     return (h ^ (h >>> 16)) & HASH_BITS;
663 dl 1.82 }
664    
665     /**
666 dl 1.102 * Returns a power of two table size for the given desired capacity.
667     * See Hackers Delight, sec 3.2
668 dl 1.82 */
669 dl 1.102 private static final int tableSizeFor(int c) {
670     int n = c - 1;
671     n |= n >>> 1;
672     n |= n >>> 2;
673     n |= n >>> 4;
674     n |= n >>> 8;
675     n |= n >>> 16;
676     return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
677     }
678 dl 1.82
679     /**
680 dl 1.102 * Returns x's Class if it is of the form "class C implements
681     * Comparable<C>", else null.
682 dl 1.82 */
683 dl 1.102 static Class<?> comparableClassFor(Object x) {
684     if (x instanceof Comparable) {
685     Class<?> c; Type[] ts, as; Type t; ParameterizedType p;
686     if ((c = x.getClass()) == String.class) // bypass checks
687     return c;
688     if ((ts = c.getGenericInterfaces()) != null) {
689     for (int i = 0; i < ts.length; ++i) {
690     if (((t = ts[i]) instanceof ParameterizedType) &&
691     ((p = (ParameterizedType)t).getRawType() ==
692     Comparable.class) &&
693     (as = p.getActualTypeArguments()) != null &&
694     as.length == 1 && as[0] == c) // type arg is c
695     return c;
696     }
697     }
698     }
699     return null;
700     }
701 dl 1.82
702     /**
703 dl 1.102 * Returns k.compareTo(x) if x matches kc (k's screened comparable
704     * class), else 0.
705 dl 1.82 */
706 dl 1.102 @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
707     static int compareComparables(Class<?> kc, Object k, Object x) {
708     return (x == null || x.getClass() != kc ? 0 :
709     ((Comparable)k).compareTo(x));
710     }
711    
712     /* ---------------- Table element access -------------- */
713    
714     /*
715     * Volatile access methods are used for table elements as well as
716     * elements of in-progress next table while resizing. All uses of
717     * the tab arguments must be null checked by callers. All callers
718     * also paranoically precheck that tab's length is not zero (or an
719     * equivalent check), thus ensuring that any index argument taking
720     * the form of a hash value anded with (length - 1) is a valid
721     * index. Note that, to be correct wrt arbitrary concurrency
722     * errors by users, these checks must operate on local variables,
723     * which accounts for some odd-looking inline assignments below.
724     * Note that calls to setTabAt always occur within locked regions,
725     * and so do not need full volatile semantics, but still require
726     * ordering to maintain concurrent readability.
727     */
728    
729     @SuppressWarnings("unchecked")
730     static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) {
731     return (Node<K,V>)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE);
732     }
733 dl 1.24
734 dl 1.102 static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i,
735     Node<K,V> c, Node<K,V> v) {
736     return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
737     }
738    
739     static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) {
740     U.putOrderedObject(tab, ((long)i << ASHIFT) + ABASE, v);
741     }
742    
743 dl 1.24 /* ---------------- Fields -------------- */
744    
745     /**
746     * The array of bins. Lazily initialized upon first insertion.
747     * Size is always a power of two. Accessed directly by iterators.
748     */
749 dl 1.102 transient volatile Node<K,V>[] table;
750 dl 1.14
751 dl 1.16 /**
752 dl 1.82 * The next table to use; non-null only while resizing.
753     */
754 dl 1.102 private transient volatile Node<K,V>[] nextTable;
755 dl 1.82
756     /**
757     * Base counter value, used mainly when there is no contention,
758     * but also as a fallback during table initialization
759     * races. Updated via CAS.
760 dl 1.16 */
761 dl 1.82 private transient volatile long baseCount;
762 dl 1.24
763     /**
764     * Table initialization and resizing control. When negative, the
765 dl 1.82 * table is being initialized or resized: -1 for initialization,
766     * else -(1 + the number of active resizing threads). Otherwise,
767     * when table is null, holds the initial table size to use upon
768     * creation, or 0 for default. After initialization, holds the
769     * next element count value upon which to resize the table.
770 dl 1.24 */
771     private transient volatile int sizeCtl;
772    
773 dl 1.82 /**
774     * The next table index (plus one) to split while resizing.
775     */
776     private transient volatile int transferIndex;
777    
778     /**
779     * The least available table index to split while resizing.
780     */
781     private transient volatile int transferOrigin;
782    
783     /**
784 dl 1.102 * Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
785 dl 1.82 */
786 dl 1.102 private transient volatile int cellsBusy;
787 dl 1.82
788     /**
789     * Table of counter cells. When non-null, size is a power of 2.
790     */
791     private transient volatile CounterCell[] counterCells;
792    
793 dl 1.24 // views
794 dl 1.70 private transient KeySetView<K,V> keySet;
795 dl 1.75 private transient ValuesView<K,V> values;
796     private transient EntrySetView<K,V> entrySet;
797 dl 1.24
798 dl 1.16
799 dl 1.102 /* ---------------- Public operations -------------- */
800 dl 1.38
801 dl 1.102 /**
802     * Creates a new, empty map with the default initial table size (16).
803     */
804     public ConcurrentHashMapV8() {
805 dl 1.38 }
806    
807 dl 1.102 /**
808     * Creates a new, empty map with an initial table size
809     * accommodating the specified number of elements without the need
810     * to dynamically resize.
811     *
812     * @param initialCapacity The implementation performs internal
813     * sizing to accommodate this many elements.
814     * @throws IllegalArgumentException if the initial capacity of
815     * elements is negative
816     */
817     public ConcurrentHashMapV8(int initialCapacity) {
818     if (initialCapacity < 0)
819     throw new IllegalArgumentException();
820     int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
821     MAXIMUM_CAPACITY :
822     tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
823     this.sizeCtl = cap;
824 dl 1.38 }
825    
826 dl 1.102 /**
827     * Creates a new map with the same mappings as the given map.
828     *
829     * @param m the map
830     */
831     public ConcurrentHashMapV8(Map<? extends K, ? extends V> m) {
832     this.sizeCtl = DEFAULT_CAPACITY;
833     putAll(m);
834 dl 1.38 }
835    
836 dl 1.102 /**
837     * Creates a new, empty map with an initial table size based on
838     * the given number of elements ({@code initialCapacity}) and
839     * initial table density ({@code loadFactor}).
840     *
841     * @param initialCapacity the initial capacity. The implementation
842     * performs internal sizing to accommodate this many elements,
843     * given the specified load factor.
844     * @param loadFactor the load factor (table density) for
845     * establishing the initial table size
846     * @throws IllegalArgumentException if the initial capacity of
847     * elements is negative or the load factor is nonpositive
848     *
849     * @since 1.6
850     */
851     public ConcurrentHashMapV8(int initialCapacity, float loadFactor) {
852     this(initialCapacity, loadFactor, 1);
853     }
854 dl 1.1
855     /**
856 dl 1.102 * Creates a new, empty map with an initial table size based on
857     * the given number of elements ({@code initialCapacity}), table
858     * density ({@code loadFactor}), and number of concurrently
859     * updating threads ({@code concurrencyLevel}).
860     *
861     * @param initialCapacity the initial capacity. The implementation
862     * performs internal sizing to accommodate this many elements,
863     * given the specified load factor.
864     * @param loadFactor the load factor (table density) for
865     * establishing the initial table size
866     * @param concurrencyLevel the estimated number of concurrently
867     * updating threads. The implementation may use this value as
868     * a sizing hint.
869     * @throws IllegalArgumentException if the initial capacity is
870     * negative or the load factor or concurrencyLevel are
871     * nonpositive
872 dl 1.1 */
873 dl 1.102 public ConcurrentHashMapV8(int initialCapacity,
874     float loadFactor, int concurrencyLevel) {
875     if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
876     throw new IllegalArgumentException();
877     if (initialCapacity < concurrencyLevel) // Use at least as many bins
878     initialCapacity = concurrencyLevel; // as estimated threads
879     long size = (long)(1.0 + (long)initialCapacity / loadFactor);
880     int cap = (size >= (long)MAXIMUM_CAPACITY) ?
881     MAXIMUM_CAPACITY : tableSizeFor((int)size);
882     this.sizeCtl = cap;
883 dl 1.24 }
884 dl 1.1
885 dl 1.102 // Original (since JDK1.2) Map methods
886 dl 1.38
887     /**
888 dl 1.102 * {@inheritDoc}
889 dl 1.38 */
890 dl 1.102 public int size() {
891     long n = sumCount();
892     return ((n < 0L) ? 0 :
893     (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
894     (int)n);
895 dl 1.38 }
896 dl 1.1
897 dl 1.38 /**
898 dl 1.102 * {@inheritDoc}
899     */
900     public boolean isEmpty() {
901     return sumCount() <= 0L; // ignore transient negative values
902     }
903    
904     /**
905     * Returns the value to which the specified key is mapped,
906     * or {@code null} if this map contains no mapping for the key.
907 dl 1.38 *
908 dl 1.102 * <p>More formally, if this map contains a mapping from a key
909     * {@code k} to a value {@code v} such that {@code key.equals(k)},
910     * then this method returns {@code v}; otherwise it returns
911     * {@code null}. (There can be at most one such mapping.)
912 dl 1.38 *
913 dl 1.102 * @throws NullPointerException if the specified key is null
914 dl 1.1 */
915 dl 1.102 public V get(Object key) {
916     Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
917     int h = spread(key.hashCode());
918     if ((tab = table) != null && (n = tab.length) > 0 &&
919     (e = tabAt(tab, (n - 1) & h)) != null) {
920     if ((eh = e.hash) == h) {
921     if ((ek = e.key) == key || (ek != null && key.equals(ek)))
922     return e.val;
923     }
924     else if (eh < 0)
925     return (p = e.find(h, key)) != null ? p.val : null;
926     while ((e = e.next) != null) {
927     if (e.hash == h &&
928     ((ek = e.key) == key || (ek != null && key.equals(ek))))
929     return e.val;
930 dl 1.41 }
931     }
932 dl 1.102 return null;
933     }
934 dl 1.41
935 dl 1.102 /**
936     * Tests if the specified object is a key in this table.
937     *
938     * @param key possible key
939     * @return {@code true} if and only if the specified object
940     * is a key in this table, as determined by the
941     * {@code equals} method; {@code false} otherwise
942     * @throws NullPointerException if the specified key is null
943     */
944     public boolean containsKey(Object key) {
945     return get(key) != null;
946     }
947 dl 1.41
948 dl 1.102 /**
949     * Returns {@code true} if this map maps one or more keys to the
950     * specified value. Note: This method may require a full traversal
951     * of the map, and is much slower than method {@code containsKey}.
952     *
953     * @param value value whose presence in this map is to be tested
954     * @return {@code true} if this map maps one or more keys to the
955     * specified value
956     * @throws NullPointerException if the specified value is null
957     */
958     public boolean containsValue(Object value) {
959     if (value == null)
960     throw new NullPointerException();
961     Node<K,V>[] t;
962     if ((t = table) != null) {
963     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
964     for (Node<K,V> p; (p = it.advance()) != null; ) {
965     V v;
966     if ((v = p.val) == value || (v != null && value.equals(v)))
967     return true;
968 dl 1.38 }
969     }
970 dl 1.102 return false;
971     }
972 dl 1.38
973 dl 1.102 /**
974     * Maps the specified key to the specified value in this table.
975     * Neither the key nor the value can be null.
976     *
977     * <p>The value can be retrieved by calling the {@code get} method
978     * with a key that is equal to the original key.
979     *
980     * @param key key with which the specified value is to be associated
981     * @param value value to be associated with the specified key
982     * @return the previous value associated with {@code key}, or
983     * {@code null} if there was no mapping for {@code key}
984     * @throws NullPointerException if the specified key or value is null
985     */
986     public V put(K key, V value) {
987     return putVal(key, value, false);
988     }
989 dl 1.38
990 dl 1.102 /** Implementation for put and putIfAbsent */
991     final V putVal(K key, V value, boolean onlyIfAbsent) {
992     if (key == null || value == null) throw new NullPointerException();
993     int hash = spread(key.hashCode());
994     int binCount = 0;
995     for (Node<K,V>[] tab = table;;) {
996     Node<K,V> f; int n, i, fh;
997     if (tab == null || (n = tab.length) == 0)
998     tab = initTable();
999     else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
1000     if (casTabAt(tab, i, null,
1001     new Node<K,V>(hash, key, value, null)))
1002     break; // no lock when adding to empty bin
1003 dl 1.38 }
1004 dl 1.102 else if ((fh = f.hash) == MOVED)
1005     tab = helpTransfer(tab, f);
1006 dl 1.38 else {
1007 dl 1.102 V oldVal = null;
1008     synchronized (f) {
1009     if (tabAt(tab, i) == f) {
1010     if (fh >= 0) {
1011     binCount = 1;
1012     for (Node<K,V> e = f;; ++binCount) {
1013     K ek;
1014     if (e.hash == hash &&
1015     ((ek = e.key) == key ||
1016     (ek != null && key.equals(ek)))) {
1017     oldVal = e.val;
1018     if (!onlyIfAbsent)
1019     e.val = value;
1020     break;
1021 dl 1.41 }
1022 dl 1.102 Node<K,V> pred = e;
1023     if ((e = e.next) == null) {
1024     pred.next = new Node<K,V>(hash, key,
1025     value, null);
1026     break;
1027 dl 1.41 }
1028 dl 1.38 }
1029     }
1030 dl 1.102 else if (f instanceof TreeBin) {
1031     Node<K,V> p;
1032     binCount = 2;
1033     if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
1034     value)) != null) {
1035     oldVal = p.val;
1036     if (!onlyIfAbsent)
1037     p.val = value;
1038 dl 1.38 }
1039     }
1040     }
1041     }
1042 dl 1.102 if (binCount != 0) {
1043     if (binCount >= TREEIFY_THRESHOLD)
1044     treeifyBin(tab, i);
1045     if (oldVal != null)
1046     return oldVal;
1047     break;
1048     }
1049 dl 1.41 }
1050 dl 1.38 }
1051 dl 1.102 addCount(1L, binCount);
1052     return null;
1053 dl 1.1 }
1054    
1055 dl 1.14 /**
1056 dl 1.102 * Copies all of the mappings from the specified map to this one.
1057     * These mappings replace any mappings that this map had for any of the
1058     * keys currently in the specified map.
1059     *
1060     * @param m mappings to be stored in this map
1061 dl 1.14 */
1062 dl 1.102 public void putAll(Map<? extends K, ? extends V> m) {
1063     tryPresize(m.size());
1064     for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
1065     putVal(e.getKey(), e.getValue(), false);
1066 dl 1.38 }
1067    
1068     /**
1069 dl 1.102 * Removes the key (and its corresponding value) from this map.
1070     * This method does nothing if the key is not in the map.
1071     *
1072     * @param key the key that needs to be removed
1073     * @return the previous value associated with {@code key}, or
1074     * {@code null} if there was no mapping for {@code key}
1075     * @throws NullPointerException if the specified key is null
1076 dl 1.38 */
1077 dl 1.102 public V remove(Object key) {
1078     return replaceNode(key, null, null);
1079 dl 1.1 }
1080    
1081 dl 1.27 /**
1082     * Implementation for the four public remove/replace methods:
1083     * Replaces node value with v, conditional upon match of cv if
1084     * non-null. If resulting value is null, delete.
1085     */
1086 dl 1.102 final V replaceNode(Object key, V value, Object cv) {
1087     int hash = spread(key.hashCode());
1088     for (Node<K,V>[] tab = table;;) {
1089     Node<K,V> f; int n, i, fh;
1090     if (tab == null || (n = tab.length) == 0 ||
1091     (f = tabAt(tab, i = (n - 1) & hash)) == null)
1092 dl 1.27 break;
1093 dl 1.102 else if ((fh = f.hash) == MOVED)
1094     tab = helpTransfer(tab, f);
1095     else {
1096     V oldVal = null;
1097     boolean validated = false;
1098     synchronized (f) {
1099     if (tabAt(tab, i) == f) {
1100     if (fh >= 0) {
1101     validated = true;
1102     for (Node<K,V> e = f, pred = null;;) {
1103     K ek;
1104     if (e.hash == hash &&
1105     ((ek = e.key) == key ||
1106     (ek != null && key.equals(ek)))) {
1107     V ev = e.val;
1108     if (cv == null || cv == ev ||
1109     (ev != null && cv.equals(ev))) {
1110     oldVal = ev;
1111     if (value != null)
1112     e.val = value;
1113     else if (pred != null)
1114     pred.next = e.next;
1115     else
1116     setTabAt(tab, i, e.next);
1117     }
1118     break;
1119     }
1120     pred = e;
1121     if ((e = e.next) == null)
1122     break;
1123     }
1124     }
1125     else if (f instanceof TreeBin) {
1126 dl 1.38 validated = true;
1127 dl 1.102 TreeBin<K,V> t = (TreeBin<K,V>)f;
1128     TreeNode<K,V> r, p;
1129     if ((r = t.root) != null &&
1130     (p = r.findTreeNode(hash, key, null)) != null) {
1131 dl 1.84 V pv = p.val;
1132 dl 1.102 if (cv == null || cv == pv ||
1133     (pv != null && cv.equals(pv))) {
1134 dl 1.38 oldVal = pv;
1135 dl 1.102 if (value != null)
1136     p.val = value;
1137     else if (t.removeTreeNode(p))
1138     setTabAt(tab, i, untreeify(t.first));
1139 dl 1.38 }
1140     }
1141     }
1142     }
1143 dl 1.102 }
1144     if (validated) {
1145     if (oldVal != null) {
1146     if (value == null)
1147 dl 1.82 addCount(-1L, -1);
1148 dl 1.102 return oldVal;
1149 dl 1.38 }
1150 dl 1.102 break;
1151 dl 1.38 }
1152     }
1153 dl 1.102 }
1154     return null;
1155     }
1156    
1157     /**
1158     * Removes all of the mappings from this map.
1159     */
1160     public void clear() {
1161     long delta = 0L; // negative number of deletions
1162     int i = 0;
1163     Node<K,V>[] tab = table;
1164     while (tab != null && i < tab.length) {
1165     int fh;
1166     Node<K,V> f = tabAt(tab, i);
1167     if (f == null)
1168     ++i;
1169     else if ((fh = f.hash) == MOVED) {
1170     tab = helpTransfer(tab, f);
1171     i = 0; // restart
1172     }
1173 dl 1.82 else {
1174 jsr166 1.83 synchronized (f) {
1175 dl 1.27 if (tabAt(tab, i) == f) {
1176 dl 1.102 Node<K,V> p = (fh >= 0 ? f :
1177     (f instanceof TreeBin) ?
1178     ((TreeBin<K,V>)f).first : null);
1179     while (p != null) {
1180     --delta;
1181     p = p.next;
1182 dl 1.27 }
1183 dl 1.102 setTabAt(tab, i++, null);
1184 dl 1.27 }
1185     }
1186     }
1187     }
1188 dl 1.102 if (delta != 0L)
1189     addCount(delta, -1);
1190     }
1191    
1192     /**
1193     * Returns a {@link Set} view of the keys contained in this map.
1194     * The set is backed by the map, so changes to the map are
1195     * reflected in the set, and vice-versa. The set supports element
1196     * removal, which removes the corresponding mapping from this map,
1197     * via the {@code Iterator.remove}, {@code Set.remove},
1198     * {@code removeAll}, {@code retainAll}, and {@code clear}
1199     * operations. It does not support the {@code add} or
1200     * {@code addAll} operations.
1201     *
1202     * <p>The view's {@code iterator} is a "weakly consistent" iterator
1203     * that will never throw {@link ConcurrentModificationException},
1204     * and guarantees to traverse elements as they existed upon
1205     * construction of the iterator, and may (but is not guaranteed to)
1206     * reflect any modifications subsequent to construction.
1207     *
1208     * @return the set view
1209     */
1210     public KeySetView<K,V> keySet() {
1211     KeySetView<K,V> ks;
1212     return (ks = keySet) != null ? ks : (keySet = new KeySetView<K,V>(this, null));
1213     }
1214    
1215     /**
1216     * Returns a {@link Collection} view of the values contained in this map.
1217     * The collection is backed by the map, so changes to the map are
1218     * reflected in the collection, and vice-versa. The collection
1219     * supports element removal, which removes the corresponding
1220     * mapping from this map, via the {@code Iterator.remove},
1221     * {@code Collection.remove}, {@code removeAll},
1222     * {@code retainAll}, and {@code clear} operations. It does not
1223     * support the {@code add} or {@code addAll} operations.
1224     *
1225     * <p>The view's {@code iterator} is a "weakly consistent" iterator
1226     * that will never throw {@link ConcurrentModificationException},
1227     * and guarantees to traverse elements as they existed upon
1228     * construction of the iterator, and may (but is not guaranteed to)
1229     * reflect any modifications subsequent to construction.
1230     *
1231     * @return the collection view
1232     */
1233     public Collection<V> values() {
1234     ValuesView<K,V> vs;
1235     return (vs = values) != null ? vs : (values = new ValuesView<K,V>(this));
1236     }
1237    
1238     /**
1239     * Returns a {@link Set} view of the mappings contained in this map.
1240     * The set is backed by the map, so changes to the map are
1241     * reflected in the set, and vice-versa. The set supports element
1242     * removal, which removes the corresponding mapping from the map,
1243     * via the {@code Iterator.remove}, {@code Set.remove},
1244     * {@code removeAll}, {@code retainAll}, and {@code clear}
1245     * operations.
1246     *
1247     * <p>The view's {@code iterator} is a "weakly consistent" iterator
1248     * that will never throw {@link ConcurrentModificationException},
1249     * and guarantees to traverse elements as they existed upon
1250     * construction of the iterator, and may (but is not guaranteed to)
1251     * reflect any modifications subsequent to construction.
1252     *
1253     * @return the set view
1254     */
1255     public Set<Map.Entry<K,V>> entrySet() {
1256     EntrySetView<K,V> es;
1257     return (es = entrySet) != null ? es : (entrySet = new EntrySetView<K,V>(this));
1258 dl 1.27 }
1259    
1260 dl 1.102 /**
1261     * Returns the hash code value for this {@link Map}, i.e.,
1262     * the sum of, for each key-value pair in the map,
1263     * {@code key.hashCode() ^ value.hashCode()}.
1264     *
1265     * @return the hash code value for this map
1266 dl 1.27 */
1267 dl 1.102 public int hashCode() {
1268     int h = 0;
1269     Node<K,V>[] t;
1270     if ((t = table) != null) {
1271     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1272     for (Node<K,V> p; (p = it.advance()) != null; )
1273     h += p.key.hashCode() ^ p.val.hashCode();
1274     }
1275     return h;
1276     }
1277 dl 1.27
1278 dl 1.102 /**
1279     * Returns a string representation of this map. The string
1280     * representation consists of a list of key-value mappings (in no
1281     * particular order) enclosed in braces ("{@code {}}"). Adjacent
1282     * mappings are separated by the characters {@code ", "} (comma
1283     * and space). Each key-value mapping is rendered as the key
1284     * followed by an equals sign ("{@code =}") followed by the
1285     * associated value.
1286     *
1287     * @return a string representation of this map
1288     */
1289     public String toString() {
1290     Node<K,V>[] t;
1291     int f = (t = table) == null ? 0 : t.length;
1292     Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1293     StringBuilder sb = new StringBuilder();
1294     sb.append('{');
1295     Node<K,V> p;
1296     if ((p = it.advance()) != null) {
1297     for (;;) {
1298     K k = p.key;
1299     V v = p.val;
1300     sb.append(k == this ? "(this Map)" : k);
1301     sb.append('=');
1302     sb.append(v == this ? "(this Map)" : v);
1303     if ((p = it.advance()) == null)
1304     break;
1305     sb.append(',').append(' ');
1306 dl 1.14 }
1307 dl 1.102 }
1308     return sb.append('}').toString();
1309     }
1310    
1311     /**
1312     * Compares the specified object with this map for equality.
1313     * Returns {@code true} if the given object is a map with the same
1314     * mappings as this map. This operation may return misleading
1315     * results if either map is concurrently modified during execution
1316     * of this method.
1317     *
1318     * @param o object to be compared for equality with this map
1319     * @return {@code true} if the specified object is equal to this map
1320     */
1321     public boolean equals(Object o) {
1322     if (o != this) {
1323     if (!(o instanceof Map))
1324     return false;
1325     Map<?,?> m = (Map<?,?>) o;
1326     Node<K,V>[] t;
1327     int f = (t = table) == null ? 0 : t.length;
1328     Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1329     for (Node<K,V> p; (p = it.advance()) != null; ) {
1330     V val = p.val;
1331     Object v = m.get(p.key);
1332     if (v == null || (v != val && !v.equals(val)))
1333     return false;
1334 dl 1.38 }
1335 dl 1.102 for (Map.Entry<?,?> e : m.entrySet()) {
1336     Object mk, mv, v;
1337     if ((mk = e.getKey()) == null ||
1338     (mv = e.getValue()) == null ||
1339     (v = get(mk)) == null ||
1340     (mv != v && !mv.equals(v)))
1341     return false;
1342 dl 1.1 }
1343     }
1344 dl 1.102 return true;
1345     }
1346    
1347     /**
1348     * Stripped-down version of helper class used in previous version,
1349     * declared for the sake of serialization compatibility
1350     */
1351     static class Segment<K,V> extends ReentrantLock implements Serializable {
1352     private static final long serialVersionUID = 2249069246763182397L;
1353     final float loadFactor;
1354     Segment(float lf) { this.loadFactor = lf; }
1355 dl 1.27 }
1356    
1357 dl 1.102 /**
1358     * Saves the state of the {@code ConcurrentHashMapV8} instance to a
1359     * stream (i.e., serializes it).
1360     * @param s the stream
1361     * @serialData
1362     * the key (Object) and value (Object)
1363     * for each key-value mapping, followed by a null pair.
1364     * The key-value mappings are emitted in no particular order.
1365     */
1366     private void writeObject(java.io.ObjectOutputStream s)
1367     throws java.io.IOException {
1368     // For serialization compatibility
1369     // Emulate segment calculation from previous version of this class
1370     int sshift = 0;
1371     int ssize = 1;
1372     while (ssize < DEFAULT_CONCURRENCY_LEVEL) {
1373     ++sshift;
1374     ssize <<= 1;
1375     }
1376     int segmentShift = 32 - sshift;
1377     int segmentMask = ssize - 1;
1378     @SuppressWarnings("unchecked") Segment<K,V>[] segments = (Segment<K,V>[])
1379     new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
1380     for (int i = 0; i < segments.length; ++i)
1381     segments[i] = new Segment<K,V>(LOAD_FACTOR);
1382     s.putFields().put("segments", segments);
1383     s.putFields().put("segmentShift", segmentShift);
1384     s.putFields().put("segmentMask", segmentMask);
1385     s.writeFields();
1386    
1387     Node<K,V>[] t;
1388     if ((t = table) != null) {
1389     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1390     for (Node<K,V> p; (p = it.advance()) != null; ) {
1391     s.writeObject(p.key);
1392     s.writeObject(p.val);
1393 dl 1.27 }
1394 dl 1.102 }
1395     s.writeObject(null);
1396     s.writeObject(null);
1397     segments = null; // throw away
1398     }
1399    
1400     /**
1401     * Reconstitutes the instance from a stream (that is, deserializes it).
1402     * @param s the stream
1403     */
1404     private void readObject(java.io.ObjectInputStream s)
1405     throws java.io.IOException, ClassNotFoundException {
1406     /*
1407     * To improve performance in typical cases, we create nodes
1408     * while reading, then place in table once size is known.
1409     * However, we must also validate uniqueness and deal with
1410     * overpopulated bins while doing so, which requires
1411     * specialized versions of putVal mechanics.
1412     */
1413     sizeCtl = -1; // force exclusion for table construction
1414     s.defaultReadObject();
1415     long size = 0L;
1416     Node<K,V> p = null;
1417     for (;;) {
1418     @SuppressWarnings("unchecked") K k = (K) s.readObject();
1419     @SuppressWarnings("unchecked") V v = (V) s.readObject();
1420     if (k != null && v != null) {
1421     p = new Node<K,V>(spread(k.hashCode()), k, v, p);
1422     ++size;
1423 dl 1.38 }
1424 dl 1.102 else
1425     break;
1426     }
1427     if (size == 0L)
1428     sizeCtl = 0;
1429     else {
1430     int n;
1431     if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
1432     n = MAXIMUM_CAPACITY;
1433 dl 1.27 else {
1434 dl 1.102 int sz = (int)size;
1435     n = tableSizeFor(sz + (sz >>> 1) + 1);
1436     }
1437     @SuppressWarnings({"rawtypes","unchecked"})
1438     Node<K,V>[] tab = (Node<K,V>[])new Node[n];
1439     int mask = n - 1;
1440     long added = 0L;
1441     while (p != null) {
1442     boolean insertAtFront;
1443     Node<K,V> next = p.next, first;
1444     int h = p.hash, j = h & mask;
1445     if ((first = tabAt(tab, j)) == null)
1446     insertAtFront = true;
1447     else {
1448     K k = p.key;
1449     if (first.hash < 0) {
1450     TreeBin<K,V> t = (TreeBin<K,V>)first;
1451     if (t.putTreeVal(h, k, p.val) == null)
1452     ++added;
1453     insertAtFront = false;
1454     }
1455     else {
1456     int binCount = 0;
1457     insertAtFront = true;
1458     Node<K,V> q; K qk;
1459     for (q = first; q != null; q = q.next) {
1460     if (q.hash == h &&
1461     ((qk = q.key) == k ||
1462     (qk != null && k.equals(qk)))) {
1463     insertAtFront = false;
1464 dl 1.82 break;
1465     }
1466 dl 1.102 ++binCount;
1467     }
1468     if (insertAtFront && binCount >= TREEIFY_THRESHOLD) {
1469     insertAtFront = false;
1470     ++added;
1471     p.next = first;
1472     TreeNode<K,V> hd = null, tl = null;
1473     for (q = p; q != null; q = q.next) {
1474     TreeNode<K,V> t = new TreeNode<K,V>
1475     (q.hash, q.key, q.val, null, null);
1476     if ((t.prev = tl) == null)
1477     hd = t;
1478     else
1479     tl.next = t;
1480     tl = t;
1481 dl 1.27 }
1482 dl 1.102 setTabAt(tab, j, new TreeBin<K,V>(hd));
1483 dl 1.27 }
1484 dl 1.38 }
1485 dl 1.82 }
1486 dl 1.102 if (insertAtFront) {
1487     ++added;
1488     p.next = first;
1489     setTabAt(tab, j, p);
1490     }
1491     p = next;
1492     }
1493     table = tab;
1494     sizeCtl = n - (n >>> 2);
1495     baseCount = added;
1496     }
1497     }
1498    
1499     // ConcurrentMap methods
1500    
1501     /**
1502     * {@inheritDoc}
1503     *
1504     * @return the previous value associated with the specified key,
1505     * or {@code null} if there was no mapping for the key
1506     * @throws NullPointerException if the specified key or value is null
1507     */
1508     public V putIfAbsent(K key, V value) {
1509     return putVal(key, value, true);
1510     }
1511    
1512     /**
1513     * {@inheritDoc}
1514     *
1515     * @throws NullPointerException if the specified key is null
1516     */
1517     public boolean remove(Object key, Object value) {
1518     if (key == null)
1519     throw new NullPointerException();
1520     return value != null && replaceNode(key, null, value) != null;
1521     }
1522    
1523     /**
1524     * {@inheritDoc}
1525     *
1526     * @throws NullPointerException if any of the arguments are null
1527     */
1528     public boolean replace(K key, V oldValue, V newValue) {
1529     if (key == null || oldValue == null || newValue == null)
1530     throw new NullPointerException();
1531     return replaceNode(key, newValue, oldValue) != null;
1532     }
1533    
1534     /**
1535     * {@inheritDoc}
1536     *
1537     * @return the previous value associated with the specified key,
1538     * or {@code null} if there was no mapping for the key
1539     * @throws NullPointerException if the specified key or value is null
1540     */
1541     public V replace(K key, V value) {
1542     if (key == null || value == null)
1543     throw new NullPointerException();
1544     return replaceNode(key, value, null);
1545     }
1546    
1547     // Overrides of JDK8+ Map extension method defaults
1548    
1549     /**
1550     * Returns the value to which the specified key is mapped, or the
1551     * given default value if this map contains no mapping for the
1552     * key.
1553     *
1554     * @param key the key whose associated value is to be returned
1555     * @param defaultValue the value to return if this map contains
1556     * no mapping for the given key
1557     * @return the mapping for the key, if present; else the default value
1558     * @throws NullPointerException if the specified key is null
1559     */
1560     public V getOrDefault(Object key, V defaultValue) {
1561     V v;
1562     return (v = get(key)) == null ? defaultValue : v;
1563     }
1564    
1565     public void forEach(BiAction<? super K, ? super V> action) {
1566     if (action == null) throw new NullPointerException();
1567     Node<K,V>[] t;
1568     if ((t = table) != null) {
1569     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1570     for (Node<K,V> p; (p = it.advance()) != null; ) {
1571     action.apply(p.key, p.val);
1572     }
1573     }
1574     }
1575    
1576     public void replaceAll(BiFun<? super K, ? super V, ? extends V> function) {
1577     if (function == null) throw new NullPointerException();
1578     Node<K,V>[] t;
1579     if ((t = table) != null) {
1580     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1581     for (Node<K,V> p; (p = it.advance()) != null; ) {
1582     V oldValue = p.val;
1583     for (K key = p.key;;) {
1584     V newValue = function.apply(key, oldValue);
1585     if (newValue == null)
1586     throw new NullPointerException();
1587     if (replaceNode(key, newValue, oldValue) != null ||
1588     (oldValue = get(key)) == null)
1589     break;
1590 dl 1.1 }
1591     }
1592     }
1593     }
1594    
1595 dl 1.102 /**
1596     * If the specified key is not already associated with a value,
1597     * attempts to compute its value using the given mapping function
1598     * and enters it into this map unless {@code null}. The entire
1599     * method invocation is performed atomically, so the function is
1600     * applied at most once per key. Some attempted update operations
1601     * on this map by other threads may be blocked while computation
1602     * is in progress, so the computation should be short and simple,
1603     * and must not attempt to update any other mappings of this map.
1604     *
1605     * @param key key with which the specified value is to be associated
1606     * @param mappingFunction the function to compute a value
1607     * @return the current (existing or computed) value associated with
1608     * the specified key, or null if the computed value is null
1609     * @throws NullPointerException if the specified key or mappingFunction
1610     * is null
1611     * @throws IllegalStateException if the computation detectably
1612     * attempts a recursive update to this map that would
1613     * otherwise never complete
1614     * @throws RuntimeException or Error if the mappingFunction does so,
1615     * in which case the mapping is left unestablished
1616     */
1617     public V computeIfAbsent(K key, Fun<? super K, ? extends V> mappingFunction) {
1618     if (key == null || mappingFunction == null)
1619 dl 1.82 throw new NullPointerException();
1620 dl 1.102 int h = spread(key.hashCode());
1621 dl 1.84 V val = null;
1622 dl 1.102 int binCount = 0;
1623     for (Node<K,V>[] tab = table;;) {
1624     Node<K,V> f; int n, i, fh;
1625     if (tab == null || (n = tab.length) == 0)
1626 dl 1.24 tab = initTable();
1627 dl 1.102 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1628     Node<K,V> r = new ReservationNode<K,V>();
1629     synchronized (r) {
1630     if (casTabAt(tab, i, null, r)) {
1631     binCount = 1;
1632     Node<K,V> node = null;
1633 dl 1.82 try {
1634 dl 1.102 if ((val = mappingFunction.apply(key)) != null)
1635     node = new Node<K,V>(h, key, val, null);
1636 dl 1.82 } finally {
1637 dl 1.102 setTabAt(tab, i, node);
1638 dl 1.1 }
1639     }
1640     }
1641 dl 1.102 if (binCount != 0)
1642 dl 1.10 break;
1643 dl 1.1 }
1644 dl 1.102 else if ((fh = f.hash) == MOVED)
1645     tab = helpTransfer(tab, f);
1646     else {
1647     boolean added = false;
1648     synchronized (f) {
1649     if (tabAt(tab, i) == f) {
1650     if (fh >= 0) {
1651     binCount = 1;
1652     for (Node<K,V> e = f;; ++binCount) {
1653     K ek; V ev;
1654     if (e.hash == h &&
1655     ((ek = e.key) == key ||
1656     (ek != null && key.equals(ek)))) {
1657     val = e.val;
1658     break;
1659     }
1660     Node<K,V> pred = e;
1661     if ((e = e.next) == null) {
1662     if ((val = mappingFunction.apply(key)) != null) {
1663     added = true;
1664     pred.next = new Node<K,V>(h, key, val, null);
1665     }
1666     break;
1667 dl 1.38 }
1668     }
1669 dl 1.102 }
1670     else if (f instanceof TreeBin) {
1671     binCount = 2;
1672     TreeBin<K,V> t = (TreeBin<K,V>)f;
1673     TreeNode<K,V> r, p;
1674     if ((r = t.root) != null &&
1675     (p = r.findTreeNode(h, key, null)) != null)
1676     val = p.val;
1677     else if ((val = mappingFunction.apply(key)) != null) {
1678     added = true;
1679     t.putTreeVal(h, key, val);
1680 dl 1.41 }
1681 dl 1.38 }
1682     }
1683     }
1684 dl 1.102 if (binCount != 0) {
1685     if (binCount >= TREEIFY_THRESHOLD)
1686     treeifyBin(tab, i);
1687     if (!added)
1688     return val;
1689     break;
1690     }
1691 dl 1.38 }
1692 dl 1.102 }
1693     if (val != null)
1694     addCount(1L, binCount);
1695     return val;
1696     }
1697    
1698     /**
1699     * If the value for the specified key is present, attempts to
1700     * compute a new mapping given the key and its current mapped
1701     * value. The entire method invocation is performed atomically.
1702     * Some attempted update operations on this map by other threads
1703     * may be blocked while computation is in progress, so the
1704     * computation should be short and simple, and must not attempt to
1705     * update any other mappings of this map.
1706     *
1707     * @param key key with which a value may be associated
1708     * @param remappingFunction the function to compute a value
1709     * @return the new value associated with the specified key, or null if none
1710     * @throws NullPointerException if the specified key or remappingFunction
1711     * is null
1712     * @throws IllegalStateException if the computation detectably
1713     * attempts a recursive update to this map that would
1714     * otherwise never complete
1715     * @throws RuntimeException or Error if the remappingFunction does so,
1716     * in which case the mapping is unchanged
1717     */
1718     public V computeIfPresent(K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
1719     if (key == null || remappingFunction == null)
1720     throw new NullPointerException();
1721     int h = spread(key.hashCode());
1722     V val = null;
1723     int delta = 0;
1724     int binCount = 0;
1725     for (Node<K,V>[] tab = table;;) {
1726     Node<K,V> f; int n, i, fh;
1727     if (tab == null || (n = tab.length) == 0)
1728     tab = initTable();
1729     else if ((f = tabAt(tab, i = (n - 1) & h)) == null)
1730     break;
1731     else if ((fh = f.hash) == MOVED)
1732     tab = helpTransfer(tab, f);
1733 dl 1.82 else {
1734 jsr166 1.83 synchronized (f) {
1735 dl 1.24 if (tabAt(tab, i) == f) {
1736 dl 1.102 if (fh >= 0) {
1737     binCount = 1;
1738     for (Node<K,V> e = f, pred = null;; ++binCount) {
1739     K ek;
1740     if (e.hash == h &&
1741     ((ek = e.key) == key ||
1742     (ek != null && key.equals(ek)))) {
1743     val = remappingFunction.apply(key, e.val);
1744     if (val != null)
1745     e.val = val;
1746     else {
1747     delta = -1;
1748     Node<K,V> en = e.next;
1749     if (pred != null)
1750     pred.next = en;
1751     else
1752     setTabAt(tab, i, en);
1753     }
1754     break;
1755     }
1756     pred = e;
1757     if ((e = e.next) == null)
1758     break;
1759     }
1760     }
1761     else if (f instanceof TreeBin) {
1762     binCount = 2;
1763     TreeBin<K,V> t = (TreeBin<K,V>)f;
1764     TreeNode<K,V> r, p;
1765     if ((r = t.root) != null &&
1766     (p = r.findTreeNode(h, key, null)) != null) {
1767     val = remappingFunction.apply(key, p.val);
1768 dl 1.27 if (val != null)
1769 dl 1.102 p.val = val;
1770 dl 1.41 else {
1771     delta = -1;
1772 dl 1.102 if (t.removeTreeNode(p))
1773     setTabAt(tab, i, untreeify(t.first));
1774 dl 1.1 }
1775     }
1776     }
1777     }
1778     }
1779 dl 1.102 if (binCount != 0)
1780 dl 1.10 break;
1781 dl 1.1 }
1782 dl 1.10 }
1783 dl 1.82 if (delta != 0)
1784 dl 1.102 addCount((long)delta, binCount);
1785 dl 1.84 return val;
1786 dl 1.1 }
1787    
1788 dl 1.102 /**
1789     * Attempts to compute a mapping for the specified key and its
1790     * current mapped value (or {@code null} if there is no current
1791     * mapping). The entire method invocation is performed atomically.
1792     * Some attempted update operations on this map by other threads
1793     * may be blocked while computation is in progress, so the
1794     * computation should be short and simple, and must not attempt to
1795     * update any other mappings of this Map.
1796     *
1797     * @param key key with which the specified value is to be associated
1798     * @param remappingFunction the function to compute a value
1799     * @return the new value associated with the specified key, or null if none
1800     * @throws NullPointerException if the specified key or remappingFunction
1801     * is null
1802     * @throws IllegalStateException if the computation detectably
1803     * attempts a recursive update to this map that would
1804     * otherwise never complete
1805     * @throws RuntimeException or Error if the remappingFunction does so,
1806     * in which case the mapping is unchanged
1807     */
1808     public V compute(K key,
1809     BiFun<? super K, ? super V, ? extends V> remappingFunction) {
1810     if (key == null || remappingFunction == null)
1811 dl 1.82 throw new NullPointerException();
1812 dl 1.102 int h = spread(key.hashCode());
1813 dl 1.84 V val = null;
1814 dl 1.52 int delta = 0;
1815 dl 1.102 int binCount = 0;
1816     for (Node<K,V>[] tab = table;;) {
1817     Node<K,V> f; int n, i, fh;
1818     if (tab == null || (n = tab.length) == 0)
1819 dl 1.52 tab = initTable();
1820 dl 1.102 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1821     Node<K,V> r = new ReservationNode<K,V>();
1822     synchronized (r) {
1823     if (casTabAt(tab, i, null, r)) {
1824     binCount = 1;
1825     Node<K,V> node = null;
1826     try {
1827     if ((val = remappingFunction.apply(key, null)) != null) {
1828     delta = 1;
1829     node = new Node<K,V>(h, key, val, null);
1830     }
1831     } finally {
1832     setTabAt(tab, i, node);
1833     }
1834     }
1835     }
1836     if (binCount != 0)
1837 dl 1.52 break;
1838     }
1839 dl 1.102 else if ((fh = f.hash) == MOVED)
1840     tab = helpTransfer(tab, f);
1841     else {
1842     synchronized (f) {
1843     if (tabAt(tab, i) == f) {
1844     if (fh >= 0) {
1845     binCount = 1;
1846     for (Node<K,V> e = f, pred = null;; ++binCount) {
1847     K ek;
1848     if (e.hash == h &&
1849     ((ek = e.key) == key ||
1850     (ek != null && key.equals(ek)))) {
1851     val = remappingFunction.apply(key, e.val);
1852     if (val != null)
1853     e.val = val;
1854     else {
1855     delta = -1;
1856     Node<K,V> en = e.next;
1857     if (pred != null)
1858     pred.next = en;
1859     else
1860     setTabAt(tab, i, en);
1861     }
1862     break;
1863     }
1864     pred = e;
1865     if ((e = e.next) == null) {
1866     val = remappingFunction.apply(key, null);
1867     if (val != null) {
1868     delta = 1;
1869     pred.next =
1870     new Node<K,V>(h, key, val, null);
1871     }
1872     break;
1873     }
1874     }
1875     }
1876     else if (f instanceof TreeBin) {
1877     binCount = 1;
1878     TreeBin<K,V> t = (TreeBin<K,V>)f;
1879     TreeNode<K,V> r, p;
1880     if ((r = t.root) != null)
1881     p = r.findTreeNode(h, key, null);
1882     else
1883     p = null;
1884     V pv = (p == null) ? null : p.val;
1885     val = remappingFunction.apply(key, pv);
1886 dl 1.52 if (val != null) {
1887     if (p != null)
1888     p.val = val;
1889     else {
1890     delta = 1;
1891 dl 1.102 t.putTreeVal(h, key, val);
1892 dl 1.52 }
1893     }
1894     else if (p != null) {
1895     delta = -1;
1896 dl 1.102 if (t.removeTreeNode(p))
1897     setTabAt(tab, i, untreeify(t.first));
1898 dl 1.52 }
1899     }
1900     }
1901     }
1902 dl 1.102 if (binCount != 0) {
1903     if (binCount >= TREEIFY_THRESHOLD)
1904     treeifyBin(tab, i);
1905     break;
1906 dl 1.52 }
1907     }
1908     }
1909 dl 1.82 if (delta != 0)
1910 dl 1.102 addCount((long)delta, binCount);
1911 dl 1.84 return val;
1912 dl 1.52 }
1913    
1914 dl 1.102 /**
1915     * If the specified key is not already associated with a
1916     * (non-null) value, associates it with the given value.
1917     * Otherwise, replaces the value with the results of the given
1918     * remapping function, or removes if {@code null}. The entire
1919     * method invocation is performed atomically. Some attempted
1920     * update operations on this map by other threads may be blocked
1921     * while computation is in progress, so the computation should be
1922     * short and simple, and must not attempt to update any other
1923     * mappings of this Map.
1924     *
1925     * @param key key with which the specified value is to be associated
1926     * @param value the value to use if absent
1927     * @param remappingFunction the function to recompute a value if present
1928     * @return the new value associated with the specified key, or null if none
1929     * @throws NullPointerException if the specified key or the
1930     * remappingFunction is null
1931     * @throws RuntimeException or Error if the remappingFunction does so,
1932     * in which case the mapping is unchanged
1933     */
1934     public V merge(K key, V value, BiFun<? super V, ? super V, ? extends V> remappingFunction) {
1935     if (key == null || value == null || remappingFunction == null)
1936     throw new NullPointerException();
1937     int h = spread(key.hashCode());
1938     V val = null;
1939     int delta = 0;
1940     int binCount = 0;
1941     for (Node<K,V>[] tab = table;;) {
1942     Node<K,V> f; int n, i, fh;
1943     if (tab == null || (n = tab.length) == 0)
1944     tab = initTable();
1945     else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1946     if (casTabAt(tab, i, null, new Node<K,V>(h, key, value, null))) {
1947     delta = 1;
1948     val = value;
1949 dl 1.27 break;
1950     }
1951 dl 1.102 }
1952     else if ((fh = f.hash) == MOVED)
1953     tab = helpTransfer(tab, f);
1954     else {
1955     synchronized (f) {
1956     if (tabAt(tab, i) == f) {
1957     if (fh >= 0) {
1958     binCount = 1;
1959     for (Node<K,V> e = f, pred = null;; ++binCount) {
1960     K ek;
1961     if (e.hash == h &&
1962     ((ek = e.key) == key ||
1963     (ek != null && key.equals(ek)))) {
1964     val = remappingFunction.apply(e.val, value);
1965     if (val != null)
1966     e.val = val;
1967 dl 1.38 else {
1968 dl 1.102 delta = -1;
1969     Node<K,V> en = e.next;
1970     if (pred != null)
1971     pred.next = en;
1972     else
1973     setTabAt(tab, i, en);
1974 dl 1.38 }
1975 dl 1.102 break;
1976     }
1977     pred = e;
1978     if ((e = e.next) == null) {
1979     delta = 1;
1980     val = value;
1981     pred.next =
1982     new Node<K,V>(h, key, val, null);
1983     break;
1984 dl 1.38 }
1985     }
1986     }
1987 dl 1.102 else if (f instanceof TreeBin) {
1988     binCount = 2;
1989     TreeBin<K,V> t = (TreeBin<K,V>)f;
1990     TreeNode<K,V> r = t.root;
1991     TreeNode<K,V> p = (r == null) ? null :
1992     r.findTreeNode(h, key, null);
1993     val = (p == null) ? value :
1994     remappingFunction.apply(p.val, value);
1995     if (val != null) {
1996     if (p != null)
1997     p.val = val;
1998     else {
1999     delta = 1;
2000     t.putTreeVal(h, key, val);
2001 dl 1.27 }
2002     }
2003 dl 1.102 else if (p != null) {
2004     delta = -1;
2005     if (t.removeTreeNode(p))
2006     setTabAt(tab, i, untreeify(t.first));
2007 dl 1.90 }
2008 dl 1.24 }
2009     }
2010 dl 1.1 }
2011 dl 1.102 if (binCount != 0) {
2012     if (binCount >= TREEIFY_THRESHOLD)
2013     treeifyBin(tab, i);
2014     break;
2015     }
2016 dl 1.1 }
2017     }
2018 dl 1.102 if (delta != 0)
2019     addCount((long)delta, binCount);
2020     return val;
2021 dl 1.1 }
2022    
2023 dl 1.102 // Hashtable legacy methods
2024    
2025 dl 1.82 /**
2026 dl 1.102 * Legacy method testing if some key maps into the specified value
2027     * in this table. This method is identical in functionality to
2028     * {@link #containsValue(Object)}, and exists solely to ensure
2029     * full compatibility with class {@link java.util.Hashtable},
2030     * which supported this method prior to introduction of the
2031     * Java Collections framework.
2032     *
2033     * @param value a value to search for
2034     * @return {@code true} if and only if some key maps to the
2035     * {@code value} argument in this table as
2036     * determined by the {@code equals} method;
2037     * {@code false} otherwise
2038     * @throws NullPointerException if the specified value is null
2039     */
2040     @Deprecated public boolean contains(Object value) {
2041     return containsValue(value);
2042     }
2043    
2044     /**
2045     * Returns an enumeration of the keys in this table.
2046     *
2047     * @return an enumeration of the keys in this table
2048     * @see #keySet()
2049     */
2050     public Enumeration<K> keys() {
2051     Node<K,V>[] t;
2052     int f = (t = table) == null ? 0 : t.length;
2053     return new KeyIterator<K,V>(t, f, 0, f, this);
2054     }
2055    
2056     /**
2057     * Returns an enumeration of the values in this table.
2058     *
2059     * @return an enumeration of the values in this table
2060     * @see #values()
2061     */
2062     public Enumeration<V> elements() {
2063     Node<K,V>[] t;
2064     int f = (t = table) == null ? 0 : t.length;
2065     return new ValueIterator<K,V>(t, f, 0, f, this);
2066     }
2067    
2068     // ConcurrentHashMapV8-only methods
2069    
2070     /**
2071     * Returns the number of mappings. This method should be used
2072     * instead of {@link #size} because a ConcurrentHashMapV8 may
2073     * contain more mappings than can be represented as an int. The
2074     * value returned is an estimate; the actual count may differ if
2075     * there are concurrent insertions or removals.
2076     *
2077     * @return the number of mappings
2078     * @since 1.8
2079     */
2080     public long mappingCount() {
2081     long n = sumCount();
2082     return (n < 0L) ? 0L : n; // ignore transient negative values
2083     }
2084    
2085     /**
2086     * Creates a new {@link Set} backed by a ConcurrentHashMapV8
2087     * from the given type to {@code Boolean.TRUE}.
2088     *
2089     * @return the new set
2090     * @since 1.8
2091     */
2092     public static <K> KeySetView<K,Boolean> newKeySet() {
2093     return new KeySetView<K,Boolean>
2094     (new ConcurrentHashMapV8<K,Boolean>(), Boolean.TRUE);
2095     }
2096    
2097     /**
2098     * Creates a new {@link Set} backed by a ConcurrentHashMapV8
2099     * from the given type to {@code Boolean.TRUE}.
2100     *
2101     * @param initialCapacity The implementation performs internal
2102     * sizing to accommodate this many elements.
2103     * @throws IllegalArgumentException if the initial capacity of
2104     * elements is negative
2105     * @return the new set
2106     * @since 1.8
2107     */
2108     public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2109     return new KeySetView<K,Boolean>
2110     (new ConcurrentHashMapV8<K,Boolean>(initialCapacity), Boolean.TRUE);
2111     }
2112    
2113     /**
2114     * Returns a {@link Set} view of the keys in this map, using the
2115     * given common mapped value for any additions (i.e., {@link
2116     * Collection#add} and {@link Collection#addAll(Collection)}).
2117     * This is of course only appropriate if it is acceptable to use
2118     * the same value for all additions from this view.
2119     *
2120     * @param mappedValue the mapped value to use for any additions
2121     * @return the set view
2122     * @throws NullPointerException if the mappedValue is null
2123 dl 1.82 */
2124 dl 1.102 public KeySetView<K,V> keySet(V mappedValue) {
2125     if (mappedValue == null)
2126     throw new NullPointerException();
2127     return new KeySetView<K,V>(this, mappedValue);
2128     }
2129    
2130     /* ---------------- Special Nodes -------------- */
2131    
2132     /**
2133     * A node inserted at head of bins during transfer operations.
2134     */
2135     static final class ForwardingNode<K,V> extends Node<K,V> {
2136     final Node<K,V>[] nextTable;
2137     ForwardingNode(Node<K,V>[] tab) {
2138     super(MOVED, null, null, null);
2139     this.nextTable = tab;
2140     }
2141    
2142     Node<K,V> find(int h, Object k) {
2143     Node<K,V> e; int n;
2144     Node<K,V>[] tab = nextTable;
2145     if (k != null && tab != null && (n = tab.length) > 0 &&
2146     (e = tabAt(tab, (n - 1) & h)) != null) {
2147     do {
2148     int eh; K ek;
2149     if ((eh = e.hash) == h &&
2150     ((ek = e.key) == k || (ek != null && k.equals(ek))))
2151     return e;
2152     if (eh < 0)
2153     return e.find(h, k);
2154     } while ((e = e.next) != null);
2155 dl 1.82 }
2156 dl 1.102 return null;
2157 dl 1.82 }
2158     }
2159    
2160 dl 1.24 /**
2161 dl 1.102 * A place-holder node used in computeIfAbsent and compute
2162 dl 1.24 */
2163 dl 1.102 static final class ReservationNode<K,V> extends Node<K,V> {
2164     ReservationNode() {
2165     super(RESERVED, null, null, null);
2166     }
2167    
2168     Node<K,V> find(int h, Object k) {
2169     return null;
2170     }
2171 dl 1.24 }
2172    
2173 dl 1.102 /* ---------------- Table Initialization and Resizing -------------- */
2174    
2175 dl 1.24 /**
2176     * Initializes table, using the size recorded in sizeCtl.
2177     */
2178 dl 1.102 private final Node<K,V>[] initTable() {
2179     Node<K,V>[] tab; int sc;
2180     while ((tab = table) == null || tab.length == 0) {
2181 dl 1.24 if ((sc = sizeCtl) < 0)
2182     Thread.yield(); // lost initialization race; just spin
2183 dl 1.82 else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
2184 dl 1.24 try {
2185 dl 1.102 if ((tab = table) == null || tab.length == 0) {
2186 dl 1.24 int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
2187 dl 1.102 @SuppressWarnings({"rawtypes","unchecked"})
2188     Node<K,V>[] nt = (Node<K,V>[])new Node[n];
2189     table = tab = nt;
2190 dl 1.27 sc = n - (n >>> 2);
2191 dl 1.24 }
2192     } finally {
2193     sizeCtl = sc;
2194     }
2195     break;
2196     }
2197     }
2198     return tab;
2199     }
2200    
2201     /**
2202 dl 1.82 * Adds to count, and if table is too small and not already
2203     * resizing, initiates transfer. If already resizing, helps
2204     * perform transfer if work is available. Rechecks occupancy
2205     * after a transfer to see if another resize is already needed
2206     * because resizings are lagging additions.
2207     *
2208     * @param x the count to add
2209     * @param check if <0, don't check resize, if <= 1 only check if uncontended
2210     */
2211     private final void addCount(long x, int check) {
2212     CounterCell[] as; long b, s;
2213     if ((as = counterCells) != null ||
2214     !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
2215     CounterHashCode hc; CounterCell a; long v; int m;
2216     boolean uncontended = true;
2217     if ((hc = threadCounterHashCode.get()) == null ||
2218     as == null || (m = as.length - 1) < 0 ||
2219     (a = as[m & hc.code]) == null ||
2220     !(uncontended =
2221     U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
2222     fullAddCount(x, hc, uncontended);
2223     return;
2224     }
2225     if (check <= 1)
2226     return;
2227     s = sumCount();
2228     }
2229     if (check >= 0) {
2230 dl 1.102 Node<K,V>[] tab, nt; int sc;
2231 dl 1.82 while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
2232     tab.length < MAXIMUM_CAPACITY) {
2233     if (sc < 0) {
2234     if (sc == -1 || transferIndex <= transferOrigin ||
2235     (nt = nextTable) == null)
2236     break;
2237     if (U.compareAndSwapInt(this, SIZECTL, sc, sc - 1))
2238     transfer(tab, nt);
2239 dl 1.24 }
2240 dl 1.82 else if (U.compareAndSwapInt(this, SIZECTL, sc, -2))
2241     transfer(tab, null);
2242     s = sumCount();
2243 dl 1.24 }
2244     }
2245     }
2246    
2247 dl 1.27 /**
2248 dl 1.102 * Helps transfer if a resize is in progress.
2249     */
2250     final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) {
2251     Node<K,V>[] nextTab; int sc;
2252     if ((f instanceof ForwardingNode) &&
2253     (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) {
2254     if (nextTab == nextTable && tab == table &&
2255     transferIndex > transferOrigin && (sc = sizeCtl) < -1 &&
2256     U.compareAndSwapInt(this, SIZECTL, sc, sc - 1))
2257     transfer(tab, nextTab);
2258     return nextTab;
2259     }
2260     return table;
2261     }
2262    
2263     /**
2264 dl 1.27 * Tries to presize table to accommodate the given number of elements.
2265     *
2266     * @param size number of elements (doesn't need to be perfectly accurate)
2267     */
2268 dl 1.102 private final void tryPresize(int size) {
2269 dl 1.27 int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
2270     tableSizeFor(size + (size >>> 1) + 1);
2271     int sc;
2272     while ((sc = sizeCtl) >= 0) {
2273 dl 1.102 Node<K,V>[] tab = table; int n;
2274 dl 1.27 if (tab == null || (n = tab.length) == 0) {
2275 jsr166 1.30 n = (sc > c) ? sc : c;
2276 dl 1.82 if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
2277 dl 1.27 try {
2278     if (table == tab) {
2279 dl 1.102 @SuppressWarnings({"rawtypes","unchecked"})
2280     Node<K,V>[] nt = (Node<K,V>[])new Node[n];
2281     table = nt;
2282 dl 1.27 sc = n - (n >>> 2);
2283     }
2284     } finally {
2285     sizeCtl = sc;
2286     }
2287     }
2288     }
2289     else if (c <= sc || n >= MAXIMUM_CAPACITY)
2290     break;
2291 dl 1.82 else if (tab == table &&
2292     U.compareAndSwapInt(this, SIZECTL, sc, -2))
2293     transfer(tab, null);
2294 dl 1.27 }
2295     }
2296    
2297 jsr166 1.92 /**
2298 dl 1.24 * Moves and/or copies the nodes in each bin to new table. See
2299     * above for explanation.
2300     */
2301 dl 1.102 private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
2302 dl 1.82 int n = tab.length, stride;
2303     if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
2304     stride = MIN_TRANSFER_STRIDE; // subdivide range
2305     if (nextTab == null) { // initiating
2306     try {
2307 dl 1.102 @SuppressWarnings({"rawtypes","unchecked"})
2308     Node<K,V>[] nt = (Node<K,V>[])new Node[n << 1];
2309     nextTab = nt;
2310 jsr166 1.83 } catch (Throwable ex) { // try to cope with OOME
2311 dl 1.82 sizeCtl = Integer.MAX_VALUE;
2312     return;
2313     }
2314     nextTable = nextTab;
2315     transferOrigin = n;
2316     transferIndex = n;
2317 dl 1.102 ForwardingNode<K,V> rev = new ForwardingNode<K,V>(tab);
2318 dl 1.82 for (int k = n; k > 0;) { // progressively reveal ready slots
2319 jsr166 1.83 int nextk = (k > stride) ? k - stride : 0;
2320 dl 1.82 for (int m = nextk; m < k; ++m)
2321     nextTab[m] = rev;
2322     for (int m = n + nextk; m < n + k; ++m)
2323     nextTab[m] = rev;
2324     U.putOrderedInt(this, TRANSFERORIGIN, k = nextk);
2325     }
2326     }
2327     int nextn = nextTab.length;
2328 dl 1.102 ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
2329 dl 1.82 boolean advance = true;
2330     for (int i = 0, bound = 0;;) {
2331 dl 1.102 int nextIndex, nextBound, fh; Node<K,V> f;
2332 dl 1.82 while (advance) {
2333     if (--i >= bound)
2334     advance = false;
2335     else if ((nextIndex = transferIndex) <= transferOrigin) {
2336     i = -1;
2337     advance = false;
2338     }
2339     else if (U.compareAndSwapInt
2340     (this, TRANSFERINDEX, nextIndex,
2341 jsr166 1.83 nextBound = (nextIndex > stride ?
2342 dl 1.82 nextIndex - stride : 0))) {
2343     bound = nextBound;
2344     i = nextIndex - 1;
2345     advance = false;
2346     }
2347     }
2348     if (i < 0 || i >= n || i + n >= nextn) {
2349     for (int sc;;) {
2350     if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, ++sc)) {
2351     if (sc == -1) {
2352     nextTable = null;
2353     table = nextTab;
2354     sizeCtl = (n << 1) - (n >>> 1);
2355     }
2356     return;
2357     }
2358     }
2359     }
2360     else if ((f = tabAt(tab, i)) == null) {
2361     if (casTabAt(tab, i, null, fwd)) {
2362 dl 1.24 setTabAt(nextTab, i, null);
2363     setTabAt(nextTab, i + n, null);
2364 dl 1.82 advance = true;
2365 dl 1.24 }
2366     }
2367 dl 1.102 else if ((fh = f.hash) == MOVED)
2368     advance = true; // already processed
2369     else {
2370 jsr166 1.83 synchronized (f) {
2371 dl 1.82 if (tabAt(tab, i) == f) {
2372 dl 1.102 Node<K,V> ln, hn;
2373     if (fh >= 0) {
2374     int runBit = fh & n;
2375     Node<K,V> lastRun = f;
2376     for (Node<K,V> p = f.next; p != null; p = p.next) {
2377     int b = p.hash & n;
2378     if (b != runBit) {
2379     runBit = b;
2380     lastRun = p;
2381     }
2382 dl 1.82 }
2383 dl 1.102 if (runBit == 0) {
2384     ln = lastRun;
2385     hn = null;
2386 dl 1.82 }
2387     else {
2388 dl 1.102 hn = lastRun;
2389     ln = null;
2390     }
2391     for (Node<K,V> p = f; p != lastRun; p = p.next) {
2392     int ph = p.hash; K pk = p.key; V pv = p.val;
2393     if ((ph & n) == 0)
2394     ln = new Node<K,V>(ph, pk, pv, ln);
2395     else
2396     hn = new Node<K,V>(ph, pk, pv, hn);
2397 dl 1.82 }
2398     }
2399 dl 1.102 else if (f instanceof TreeBin) {
2400     TreeBin<K,V> t = (TreeBin<K,V>)f;
2401     TreeNode<K,V> lo = null, loTail = null;
2402     TreeNode<K,V> hi = null, hiTail = null;
2403     int lc = 0, hc = 0;
2404     for (Node<K,V> e = t.first; e != null; e = e.next) {
2405     int h = e.hash;
2406     TreeNode<K,V> p = new TreeNode<K,V>
2407     (h, e.key, e.val, null, null);
2408     if ((h & n) == 0) {
2409     if ((p.prev = loTail) == null)
2410     lo = p;
2411     else
2412     loTail.next = p;
2413     loTail = p;
2414     ++lc;
2415     }
2416     else {
2417     if ((p.prev = hiTail) == null)
2418     hi = p;
2419     else
2420     hiTail.next = p;
2421     hiTail = p;
2422     ++hc;
2423     }
2424     }
2425     ln = (lc <= UNTREEIFY_THRESHOLD ? untreeify(lo) :
2426     (hc != 0) ? new TreeBin<K,V>(lo) : t);
2427     hn = (hc <= UNTREEIFY_THRESHOLD ? untreeify(hi) :
2428     (lc != 0) ? new TreeBin<K,V>(hi) : t);
2429 dl 1.82 }
2430     else
2431 dl 1.102 ln = hn = null;
2432 dl 1.82 setTabAt(nextTab, i, ln);
2433     setTabAt(nextTab, i + n, hn);
2434 dl 1.24 setTabAt(tab, i, fwd);
2435 dl 1.82 advance = true;
2436 dl 1.24 }
2437     }
2438     }
2439     }
2440     }
2441    
2442 dl 1.102 /* ---------------- Conversion from/to TreeBins -------------- */
2443 dl 1.82
2444 dl 1.102 /**
2445     * Replaces all linked nodes in bin at given index unless table is
2446     * too small, in which case resizes instead.
2447     */
2448     private final void treeifyBin(Node<K,V>[] tab, int index) {
2449     Node<K,V> b; int n, sc;
2450     if (tab != null) {
2451     if ((n = tab.length) < MIN_TREEIFY_CAPACITY) {
2452     if (tab == table && (sc = sizeCtl) >= 0 &&
2453     U.compareAndSwapInt(this, SIZECTL, sc, -2))
2454     transfer(tab, null);
2455 dl 1.38 }
2456 dl 1.102 else if ((b = tabAt(tab, index)) != null) {
2457     synchronized (b) {
2458     if (tabAt(tab, index) == b) {
2459     TreeNode<K,V> hd = null, tl = null;
2460     for (Node<K,V> e = b; e != null; e = e.next) {
2461     TreeNode<K,V> p =
2462     new TreeNode<K,V>(e.hash, e.key, e.val,
2463     null, null);
2464     if ((p.prev = tl) == null)
2465     hd = p;
2466     else
2467     tl.next = p;
2468     tl = p;
2469 dl 1.38 }
2470 dl 1.102 setTabAt(tab, index, new TreeBin<K,V>(hd));
2471 dl 1.38 }
2472     }
2473 dl 1.82 }
2474 dl 1.27 }
2475     }
2476    
2477 dl 1.102 /**
2478     * Returns a list on non-TreeNodes replacing those in given list
2479     */
2480     static <K,V> Node<K,V> untreeify(Node<K,V> b) {
2481     Node<K,V> hd = null, tl = null;
2482     for (Node<K,V> q = b; q != null; q = q.next) {
2483     Node<K,V> p = new Node<K,V>(q.hash, q.key, q.val, null);
2484     if (tl == null)
2485     hd = p;
2486     else
2487     tl.next = p;
2488     tl = p;
2489     }
2490     return hd;
2491     }
2492    
2493     /* ---------------- TreeNodes -------------- */
2494 dl 1.14
2495 dl 1.1 /**
2496 dl 1.102 * Nodes for use in TreeBins
2497 dl 1.14 */
2498 dl 1.102 static final class TreeNode<K,V> extends Node<K,V> {
2499     TreeNode<K,V> parent; // red-black tree links
2500     TreeNode<K,V> left;
2501     TreeNode<K,V> right;
2502     TreeNode<K,V> prev; // needed to unlink next upon deletion
2503     boolean red;
2504    
2505     TreeNode(int hash, K key, V val, Node<K,V> next,
2506     TreeNode<K,V> parent) {
2507     super(hash, key, val, next);
2508     this.parent = parent;
2509     }
2510    
2511     Node<K,V> find(int h, Object k) {
2512     return findTreeNode(h, k, null);
2513     }
2514    
2515     /**
2516     * Returns the TreeNode (or null if not found) for the given key
2517     * starting at given root.
2518     */
2519     final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) {
2520     if (k != null) {
2521     TreeNode<K,V> p = this;
2522     do {
2523     int ph, dir; K pk; TreeNode<K,V> q;
2524     TreeNode<K,V> pl = p.left, pr = p.right;
2525     if ((ph = p.hash) > h)
2526     p = pl;
2527     else if (ph < h)
2528     p = pr;
2529     else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2530     return p;
2531     else if (pl == null && pr == null)
2532     break;
2533     else if ((kc != null ||
2534     (kc = comparableClassFor(k)) != null) &&
2535     (dir = compareComparables(kc, k, pk)) != 0)
2536     p = (dir < 0) ? pl : pr;
2537     else if (pl == null)
2538     p = pr;
2539     else if (pr == null ||
2540     (q = pr.findTreeNode(h, k, kc)) == null)
2541     p = pl;
2542     else
2543     return q;
2544     } while (p != null);
2545     }
2546     return null;
2547     }
2548     }
2549    
2550     /* ---------------- TreeBins -------------- */
2551    
2552     /**
2553     * TreeNodes used at the heads of bins. TreeBins do not hold user
2554     * keys or values, but instead point to list of TreeNodes and
2555     * their root. They also maintain a parasitic read-write lock
2556     * forcing writers (who hold bin lock) to wait for readers (who do
2557     * not) to complete before tree restructuring operations.
2558     */
2559     static final class TreeBin<K,V> extends Node<K,V> {
2560     TreeNode<K,V> root;
2561     volatile TreeNode<K,V> first;
2562     volatile Thread waiter;
2563     volatile int lockState;
2564     // values for lockState
2565     static final int WRITER = 1; // set while holding write lock
2566     static final int WAITER = 2; // set when waiting for write lock
2567     static final int READER = 4; // increment value for setting read lock
2568    
2569     /**
2570     * Creates bin with initial set of nodes headed by b.
2571     */
2572     TreeBin(TreeNode<K,V> b) {
2573     super(TREEBIN, null, null, null);
2574     this.first = b;
2575     TreeNode<K,V> r = null;
2576     for (TreeNode<K,V> x = b, next; x != null; x = next) {
2577     next = (TreeNode<K,V>)x.next;
2578     x.left = x.right = null;
2579     if (r == null) {
2580     x.parent = null;
2581     x.red = false;
2582     r = x;
2583     }
2584     else {
2585     Object key = x.key;
2586     int hash = x.hash;
2587     Class<?> kc = null;
2588     for (TreeNode<K,V> p = r;;) {
2589     int dir, ph;
2590     if ((ph = p.hash) > hash)
2591     dir = -1;
2592     else if (ph < hash)
2593     dir = 1;
2594     else if ((kc != null ||
2595     (kc = comparableClassFor(key)) != null))
2596     dir = compareComparables(kc, key, p.key);
2597     else
2598     dir = 0;
2599     TreeNode<K,V> xp = p;
2600     if ((p = (dir <= 0) ? p.left : p.right) == null) {
2601     x.parent = xp;
2602     if (dir <= 0)
2603     xp.left = x;
2604     else
2605     xp.right = x;
2606     r = balanceInsertion(r, x);
2607     break;
2608     }
2609     }
2610     }
2611     }
2612     this.root = r;
2613     }
2614    
2615     /**
2616     * Acquires write lock for tree restructuring
2617     */
2618     private final void lockRoot() {
2619     if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER))
2620     contendedLock(); // offload to separate method
2621     }
2622    
2623     /**
2624     * Releases write lock for tree restructuring
2625     */
2626     private final void unlockRoot() {
2627     lockState = 0;
2628     }
2629 dl 1.14
2630 dl 1.102 /**
2631     * Possibly blocks awaiting root lock
2632     */
2633     private final void contendedLock() {
2634     boolean waiting = false;
2635     for (int s;;) {
2636     if (((s = lockState) & WRITER) == 0) {
2637     if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) {
2638     if (waiting)
2639     waiter = null;
2640     return;
2641     }
2642     }
2643     else if ((s | WAITER) == 0) {
2644     if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) {
2645     waiting = true;
2646     waiter = Thread.currentThread();
2647     }
2648     }
2649     else if (waiting)
2650     LockSupport.park(this);
2651     }
2652 dl 1.14 }
2653    
2654 dl 1.102 /**
2655     * Returns matching node or null if none. Tries to search
2656     * using tree compareisons from root, but continues linear
2657     * search when lock not available.
2658     */
2659     final Node<K,V> find(int h, Object k) {
2660     if (k != null) {
2661     for (Node<K,V> e = first; e != null; e = e.next) {
2662     int s; K ek;
2663     if (((s = lockState) & (WAITER|WRITER)) != 0) {
2664     if (e.hash == h &&
2665     ((ek = e.key) == k || (ek != null && k.equals(ek))))
2666     return e;
2667     }
2668     else if (U.compareAndSwapInt(this, LOCKSTATE, s,
2669     s + READER)) {
2670     TreeNode<K,V> r, p;
2671     try {
2672     p = ((r = root) == null ? null :
2673     r.findTreeNode(h, k, null));
2674     } finally {
2675     Thread w;
2676     int ls;
2677     do {} while (!U.compareAndSwapInt
2678     (this, LOCKSTATE,
2679     ls = lockState, ls - READER));
2680     if (ls == (READER|WAITER) && (w = waiter) != null)
2681     LockSupport.unpark(w);
2682     }
2683     return p;
2684     }
2685     }
2686 dl 1.79 }
2687 dl 1.102 return null;
2688 dl 1.41 }
2689    
2690     /**
2691 dl 1.102 * Finds or adds a node.
2692     * @return null if added
2693 dl 1.41 */
2694 dl 1.102 final TreeNode<K,V> putTreeVal(int h, K k, V v) {
2695     Class<?> kc = null;
2696     for (TreeNode<K,V> p = root;;) {
2697     int dir, ph; K pk; TreeNode<K,V> q, pr;
2698     if (p == null) {
2699     first = root = new TreeNode<K,V>(h, k, v, null, null);
2700     break;
2701     }
2702     else if ((ph = p.hash) > h)
2703     dir = -1;
2704     else if (ph < h)
2705     dir = 1;
2706     else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2707     return p;
2708     else if ((kc == null &&
2709     (kc = comparableClassFor(k)) == null) ||
2710     (dir = compareComparables(kc, k, pk)) == 0) {
2711     if (p.left == null)
2712     dir = 1;
2713     else if ((pr = p.right) == null ||
2714     (q = pr.findTreeNode(h, k, kc)) == null)
2715     dir = -1;
2716     else
2717     return q;
2718     }
2719     TreeNode<K,V> xp = p;
2720     if ((p = (dir < 0) ? p.left : p.right) == null) {
2721     TreeNode<K,V> x, f = first;
2722     first = x = new TreeNode<K,V>(h, k, v, f, xp);
2723     if (f != null)
2724     f.prev = x;
2725     if (dir < 0)
2726     xp.left = x;
2727 dl 1.63 else
2728 dl 1.102 xp.right = x;
2729     if (!xp.red)
2730     x.red = true;
2731     else {
2732     lockRoot();
2733     try {
2734     root = balanceInsertion(root, x);
2735     } finally {
2736     unlockRoot();
2737 dl 1.38 }
2738 dl 1.102 }
2739     break;
2740 dl 1.1 }
2741 dl 1.102 }
2742     assert checkInvariants(root);
2743     return null;
2744 dl 1.1 }
2745 dl 1.41
2746 dl 1.102 /**
2747     * Removes the given node, that must be present before this
2748     * call. This is messier than typical red-black deletion code
2749     * because we cannot swap the contents of an interior node
2750     * with a leaf successor that is pinned by "next" pointers
2751     * that are accessible independently of lock. So instead we
2752     * swap the tree linkages.
2753     *
2754     * @return true if now too small so should be untreeified.
2755     */
2756     final boolean removeTreeNode(TreeNode<K,V> p) {
2757     TreeNode<K,V> next = (TreeNode<K,V>)p.next;
2758     TreeNode<K,V> pred = p.prev; // unlink traversal pointers
2759     TreeNode<K,V> r, rl;
2760     if (pred == null)
2761     first = next;
2762     else
2763     pred.next = next;
2764     if (next != null)
2765     next.prev = pred;
2766     if (first == null) {
2767     root = null;
2768     return true;
2769     }
2770     if ((r = root) == null || r.right == null || // too small
2771     (rl = r.left) == null || rl.left == null)
2772     return true;
2773     lockRoot();
2774     try {
2775     TreeNode<K,V> replacement;
2776     TreeNode<K,V> pl = p.left;
2777     TreeNode<K,V> pr = p.right;
2778     if (pl != null && pr != null) {
2779     TreeNode<K,V> s = pr, sl;
2780     while ((sl = s.left) != null) // find successor
2781     s = sl;
2782     boolean c = s.red; s.red = p.red; p.red = c; // swap colors
2783     TreeNode<K,V> sr = s.right;
2784     TreeNode<K,V> pp = p.parent;
2785     if (s == pr) { // p was s's direct parent
2786     p.parent = s;
2787     s.right = p;
2788     }
2789     else {
2790     TreeNode<K,V> sp = s.parent;
2791     if ((p.parent = sp) != null) {
2792     if (s == sp.left)
2793     sp.left = p;
2794     else
2795     sp.right = p;
2796     }
2797     if ((s.right = pr) != null)
2798     pr.parent = s;
2799     }
2800     p.left = null;
2801     if ((p.right = sr) != null)
2802     sr.parent = p;
2803     if ((s.left = pl) != null)
2804     pl.parent = s;
2805     if ((s.parent = pp) == null)
2806     r = s;
2807     else if (p == pp.left)
2808     pp.left = s;
2809     else
2810     pp.right = s;
2811     if (sr != null)
2812     replacement = sr;
2813     else
2814     replacement = p;
2815     }
2816     else if (pl != null)
2817     replacement = pl;
2818     else if (pr != null)
2819     replacement = pr;
2820     else
2821     replacement = p;
2822     if (replacement != p) {
2823     TreeNode<K,V> pp = replacement.parent = p.parent;
2824     if (pp == null)
2825     r = replacement;
2826     else if (p == pp.left)
2827     pp.left = replacement;
2828     else
2829     pp.right = replacement;
2830     p.left = p.right = p.parent = null;
2831     }
2832    
2833     root = (p.red) ? r : balanceDeletion(r, replacement);
2834    
2835     if (p == replacement) { // detach pointers
2836     TreeNode<K,V> pp;
2837     if ((pp = p.parent) != null) {
2838     if (p == pp.left)
2839     pp.left = null;
2840     else if (p == pp.right)
2841     pp.right = null;
2842     p.parent = null;
2843     }
2844     }
2845     } finally {
2846     unlockRoot();
2847     }
2848     assert checkInvariants(root);
2849     return false;
2850 dl 1.41 }
2851    
2852 dl 1.102 /* ------------------------------------------------------------ */
2853     // Red-black tree methods, all adapted from CLR
2854    
2855     static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
2856     TreeNode<K,V> p) {
2857     TreeNode<K,V> r, pp, rl;
2858     if (p != null && (r = p.right) != null) {
2859     if ((rl = p.right = r.left) != null)
2860     rl.parent = p;
2861     if ((pp = r.parent = p.parent) == null)
2862     (root = r).red = false;
2863     else if (pp.left == p)
2864     pp.left = r;
2865     else
2866     pp.right = r;
2867     r.left = p;
2868     p.parent = r;
2869     }
2870     return root;
2871 dl 1.41 }
2872    
2873 dl 1.102 static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
2874     TreeNode<K,V> p) {
2875     TreeNode<K,V> l, pp, lr;
2876     if (p != null && (l = p.left) != null) {
2877     if ((lr = p.left = l.right) != null)
2878     lr.parent = p;
2879     if ((pp = l.parent = p.parent) == null)
2880     (root = l).red = false;
2881     else if (pp.right == p)
2882     pp.right = l;
2883     else
2884     pp.left = l;
2885     l.right = p;
2886     p.parent = l;
2887     }
2888     return root;
2889     }
2890 dl 1.79
2891 dl 1.102 static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
2892     TreeNode<K,V> x) {
2893     x.red = true;
2894     for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
2895     if ((xp = x.parent) == null) {
2896     x.red = false;
2897     return x;
2898     }
2899     else if (!xp.red || (xpp = xp.parent) == null)
2900     return root;
2901     if (xp == (xppl = xpp.left)) {
2902     if ((xppr = xpp.right) != null && xppr.red) {
2903     xppr.red = false;
2904     xp.red = false;
2905     xpp.red = true;
2906     x = xpp;
2907     }
2908     else {
2909     if (x == xp.right) {
2910     root = rotateLeft(root, x = xp);
2911     xpp = (xp = x.parent) == null ? null : xp.parent;
2912     }
2913     if (xp != null) {
2914     xp.red = false;
2915     if (xpp != null) {
2916     xpp.red = true;
2917     root = rotateRight(root, xpp);
2918     }
2919     }
2920     }
2921     }
2922     else {
2923     if (xppl != null && xppl.red) {
2924     xppl.red = false;
2925     xp.red = false;
2926     xpp.red = true;
2927     x = xpp;
2928     }
2929     else {
2930     if (x == xp.left) {
2931     root = rotateRight(root, x = xp);
2932     xpp = (xp = x.parent) == null ? null : xp.parent;
2933     }
2934     if (xp != null) {
2935     xp.red = false;
2936     if (xpp != null) {
2937     xpp.red = true;
2938     root = rotateLeft(root, xpp);
2939     }
2940     }
2941     }
2942     }
2943     }
2944     }
2945 dl 1.79
2946 dl 1.102 static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
2947     TreeNode<K,V> x) {
2948     for (TreeNode<K,V> xp, xpl, xpr;;) {
2949     if (x == null || x == root)
2950     return root;
2951     else if ((xp = x.parent) == null) {
2952     x.red = false;
2953     return x;
2954     }
2955     else if (x.red) {
2956     x.red = false;
2957     return root;
2958     }
2959     else if ((xpl = xp.left) == x) {
2960     if ((xpr = xp.right) != null && xpr.red) {
2961     xpr.red = false;
2962     xp.red = true;
2963     root = rotateLeft(root, xp);
2964     xpr = (xp = x.parent) == null ? null : xp.right;
2965     }
2966     if (xpr == null)
2967     x = xp;
2968     else {
2969     TreeNode<K,V> sl = xpr.left, sr = xpr.right;
2970     if ((sr == null || !sr.red) &&
2971     (sl == null || !sl.red)) {
2972     xpr.red = true;
2973     x = xp;
2974     }
2975     else {
2976     if (sr == null || !sr.red) {
2977     if (sl != null)
2978     sl.red = false;
2979     xpr.red = true;
2980     root = rotateRight(root, xpr);
2981     xpr = (xp = x.parent) == null ?
2982     null : xp.right;
2983     }
2984     if (xpr != null) {
2985     xpr.red = (xp == null) ? false : xp.red;
2986     if ((sr = xpr.right) != null)
2987     sr.red = false;
2988     }
2989     if (xp != null) {
2990     xp.red = false;
2991     root = rotateLeft(root, xp);
2992     }
2993     x = root;
2994     }
2995     }
2996     }
2997     else { // symmetric
2998     if (xpl != null && xpl.red) {
2999     xpl.red = false;
3000     xp.red = true;
3001     root = rotateRight(root, xp);
3002     xpl = (xp = x.parent) == null ? null : xp.left;
3003     }
3004     if (xpl == null)
3005     x = xp;
3006     else {
3007     TreeNode<K,V> sl = xpl.left, sr = xpl.right;
3008     if ((sl == null || !sl.red) &&
3009     (sr == null || !sr.red)) {
3010     xpl.red = true;
3011     x = xp;
3012     }
3013     else {
3014     if (sl == null || !sl.red) {
3015     if (sr != null)
3016     sr.red = false;
3017     xpl.red = true;
3018     root = rotateLeft(root, xpl);
3019     xpl = (xp = x.parent) == null ?
3020     null : xp.left;
3021     }
3022     if (xpl != null) {
3023     xpl.red = (xp == null) ? false : xp.red;
3024     if ((sl = xpl.left) != null)
3025     sl.red = false;
3026     }
3027     if (xp != null) {
3028     xp.red = false;
3029     root = rotateRight(root, xp);
3030     }
3031     x = root;
3032     }
3033     }
3034     }
3035     }
3036     }
3037 dl 1.79 /**
3038 dl 1.102 * Recursive invariant check
3039 dl 1.79 */
3040 dl 1.102 static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
3041     TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
3042     tb = t.prev, tn = (TreeNode<K,V>)t.next;
3043     if (tb != null && tb.next != t)
3044     return false;
3045     if (tn != null && tn.prev != t)
3046     return false;
3047     if (tp != null && t != tp.left && t != tp.right)
3048     return false;
3049     if (tl != null && (tl.parent != t || tl.hash > t.hash))
3050     return false;
3051     if (tr != null && (tr.parent != t || tr.hash < t.hash))
3052     return false;
3053     if (t.red && tl != null && tl.red && tr != null && tr.red)
3054     return false;
3055     if (tl != null && !checkInvariants(tl))
3056     return false;
3057     if (tr != null && !checkInvariants(tr))
3058     return false;
3059     return true;
3060 dl 1.79 }
3061    
3062 dl 1.102 private static final sun.misc.Unsafe U;
3063     private static final long LOCKSTATE;
3064     static {
3065     try {
3066     U = getUnsafe();
3067     Class<?> k = TreeBin.class;
3068     LOCKSTATE = U.objectFieldOffset
3069     (k.getDeclaredField("lockState"));
3070     } catch (Exception e) {
3071     throw new Error(e);
3072     }
3073     }
3074 dl 1.1 }
3075    
3076 dl 1.102 /* ----------------Table Traversal -------------- */
3077 dl 1.1
3078     /**
3079 dl 1.102 * Encapsulates traversal for methods such as containsValue; also
3080     * serves as a base class for other iterators and spliterators.
3081     *
3082     * Method advance visits once each still-valid node that was
3083     * reachable upon iterator construction. It might miss some that
3084     * were added to a bin after the bin was visited, which is OK wrt
3085     * consistency guarantees. Maintaining this property in the face
3086     * of possible ongoing resizes requires a fair amount of
3087     * bookkeeping state that is difficult to optimize away amidst
3088     * volatile accesses. Even so, traversal maintains reasonable
3089     * throughput.
3090 dl 1.1 *
3091 dl 1.102 * Normally, iteration proceeds bin-by-bin traversing lists.
3092     * However, if the table has been resized, then all future steps
3093     * must traverse both the bin at the current index as well as at
3094     * (index + baseSize); and so on for further resizings. To
3095     * paranoically cope with potential sharing by users of iterators
3096     * across threads, iteration terminates if a bounds checks fails
3097     * for a table read.
3098 dl 1.1 */
3099 dl 1.102 static class Traverser<K,V> {
3100     Node<K,V>[] tab; // current table; updated if resized
3101     Node<K,V> next; // the next entry to use
3102     int index; // index of bin to use next
3103     int baseIndex; // current index of initial table
3104     int baseLimit; // index bound for initial table
3105     final int baseSize; // initial table size
3106    
3107     Traverser(Node<K,V>[] tab, int size, int index, int limit) {
3108     this.tab = tab;
3109     this.baseSize = size;
3110     this.baseIndex = this.index = index;
3111     this.baseLimit = limit;
3112     this.next = null;
3113     }
3114 dl 1.1
3115 dl 1.102 /**
3116     * Advances if possible, returning next valid node, or null if none.
3117     */
3118     final Node<K,V> advance() {
3119     Node<K,V> e;
3120     if ((e = next) != null)
3121     e = e.next;
3122     for (;;) {
3123     Node<K,V>[] t; int i, n; K ek; // must use locals in checks
3124     if (e != null)
3125     return next = e;
3126     if (baseIndex >= baseLimit || (t = tab) == null ||
3127     (n = t.length) <= (i = index) || i < 0)
3128     return next = null;
3129     if ((e = tabAt(t, index)) != null && e.hash < 0) {
3130     if (e instanceof ForwardingNode) {
3131     tab = ((ForwardingNode<K,V>)e).nextTable;
3132     e = null;
3133     continue;
3134 dl 1.75 }
3135 dl 1.102 else if (e instanceof TreeBin)
3136     e = ((TreeBin<K,V>)e).first;
3137     else
3138     e = null;
3139 dl 1.75 }
3140 dl 1.102 if ((index += baseSize) >= n)
3141     index = ++baseIndex; // visit upper slots if present
3142 dl 1.24 }
3143     }
3144 dl 1.75 }
3145    
3146     /**
3147 dl 1.102 * Base of key, value, and entry Iterators. Adds fields to
3148     * Traverser to support iterator.remove
3149 dl 1.75 */
3150 dl 1.102 static class BaseIterator<K,V> extends Traverser<K,V> {
3151     final ConcurrentHashMapV8<K,V> map;
3152     Node<K,V> lastReturned;
3153     BaseIterator(Node<K,V>[] tab, int size, int index, int limit,
3154     ConcurrentHashMapV8<K,V> map) {
3155     super(tab, size, index, limit);
3156     this.map = map;
3157     advance();
3158     }
3159    
3160     public final boolean hasNext() { return next != null; }
3161     public final boolean hasMoreElements() { return next != null; }
3162    
3163     public final void remove() {
3164     Node<K,V> p;
3165     if ((p = lastReturned) == null)
3166     throw new IllegalStateException();
3167     lastReturned = null;
3168     map.replaceNode(p.key, null, null);
3169     }
3170 dl 1.75 }
3171    
3172 dl 1.102 static final class KeyIterator<K,V> extends BaseIterator<K,V>
3173     implements Iterator<K>, Enumeration<K> {
3174     KeyIterator(Node<K,V>[] tab, int index, int size, int limit,
3175     ConcurrentHashMapV8<K,V> map) {
3176     super(tab, index, size, limit, map);
3177     }
3178    
3179     public final K next() {
3180     Node<K,V> p;
3181     if ((p = next) == null)
3182     throw new NoSuchElementException();
3183     K k = p.key;
3184     lastReturned = p;
3185     advance();
3186     return k;
3187     }
3188    
3189     public final K nextElement() { return next(); }
3190 dl 1.75 }
3191    
3192 dl 1.102 static final class ValueIterator<K,V> extends BaseIterator<K,V>
3193     implements Iterator<V>, Enumeration<V> {
3194     ValueIterator(Node<K,V>[] tab, int index, int size, int limit,
3195     ConcurrentHashMapV8<K,V> map) {
3196     super(tab, index, size, limit, map);
3197     }
3198    
3199     public final V next() {
3200     Node<K,V> p;
3201     if ((p = next) == null)
3202     throw new NoSuchElementException();
3203     V v = p.val;
3204     lastReturned = p;
3205     advance();
3206     return v;
3207 dl 1.84 }
3208 dl 1.102
3209     public final V nextElement() { return next(); }
3210 dl 1.75 }
3211    
3212 dl 1.102 static final class EntryIterator<K,V> extends BaseIterator<K,V>
3213     implements Iterator<Map.Entry<K,V>> {
3214     EntryIterator(Node<K,V>[] tab, int index, int size, int limit,
3215     ConcurrentHashMapV8<K,V> map) {
3216     super(tab, index, size, limit, map);
3217     }
3218    
3219     public final Map.Entry<K,V> next() {
3220     Node<K,V> p;
3221     if ((p = next) == null)
3222     throw new NoSuchElementException();
3223     K k = p.key;
3224     V v = p.val;
3225     lastReturned = p;
3226     advance();
3227     return new MapEntry<K,V>(k, v, map);
3228 dl 1.84 }
3229 dl 1.75 }
3230    
3231     /**
3232 dl 1.102 * Exported Entry for EntryIterator
3233 dl 1.75 */
3234 dl 1.102 static final class MapEntry<K,V> implements Map.Entry<K,V> {
3235     final K key; // non-null
3236     V val; // non-null
3237     final ConcurrentHashMapV8<K,V> map;
3238     MapEntry(K key, V val, ConcurrentHashMapV8<K,V> map) {
3239     this.key = key;
3240     this.val = val;
3241     this.map = map;
3242     }
3243     public K getKey() { return key; }
3244     public V getValue() { return val; }
3245     public int hashCode() { return key.hashCode() ^ val.hashCode(); }
3246     public String toString() { return key + "=" + val; }
3247    
3248     public boolean equals(Object o) {
3249     Object k, v; Map.Entry<?,?> e;
3250     return ((o instanceof Map.Entry) &&
3251     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3252     (v = e.getValue()) != null &&
3253     (k == key || k.equals(key)) &&
3254     (v == val || v.equals(val)));
3255     }
3256    
3257     /**
3258     * Sets our entry's value and writes through to the map. The
3259     * value to return is somewhat arbitrary here. Since we do not
3260     * necessarily track asynchronous changes, the most recent
3261     * "previous" value could be different from what we return (or
3262     * could even have been removed, in which case the put will
3263     * re-establish). We do not and cannot guarantee more.
3264     */
3265     public V setValue(V value) {
3266     if (value == null) throw new NullPointerException();
3267     V v = val;
3268     val = value;
3269     map.put(key, value);
3270     return v;
3271 dl 1.84 }
3272 dl 1.75 }
3273    
3274 dl 1.102 static final class KeySpliterator<K,V> extends Traverser<K,V>
3275     implements ConcurrentHashMapSpliterator<K> {
3276     long est; // size estimate
3277     KeySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3278     long est) {
3279     super(tab, size, index, limit);
3280     this.est = est;
3281     }
3282    
3283     public ConcurrentHashMapSpliterator<K> trySplit() {
3284     int i, f, h;
3285     return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3286     new KeySpliterator<K,V>(tab, baseSize, baseLimit = h,
3287     f, est >>>= 1);
3288     }
3289    
3290     public void forEachRemaining(Action<? super K> action) {
3291     if (action == null) throw new NullPointerException();
3292     for (Node<K,V> p; (p = advance()) != null;)
3293     action.apply(p.key);
3294     }
3295    
3296     public boolean tryAdvance(Action<? super K> action) {
3297     if (action == null) throw new NullPointerException();
3298     Node<K,V> p;
3299     if ((p = advance()) == null)
3300     return false;
3301     action.apply(p.key);
3302     return true;
3303 dl 1.84 }
3304 dl 1.102
3305     public long estimateSize() { return est; }
3306    
3307 dl 1.75 }
3308    
3309 dl 1.102 static final class ValueSpliterator<K,V> extends Traverser<K,V>
3310     implements ConcurrentHashMapSpliterator<V> {
3311     long est; // size estimate
3312     ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit,
3313     long est) {
3314     super(tab, size, index, limit);
3315     this.est = est;
3316     }
3317    
3318     public ConcurrentHashMapSpliterator<V> trySplit() {
3319     int i, f, h;
3320     return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3321     new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h,
3322     f, est >>>= 1);
3323     }
3324    
3325     public void forEachRemaining(Action<? super V> action) {
3326     if (action == null) throw new NullPointerException();
3327     for (Node<K,V> p; (p = advance()) != null;)
3328     action.apply(p.val);
3329     }
3330    
3331     public boolean tryAdvance(Action<? super V> action) {
3332     if (action == null) throw new NullPointerException();
3333     Node<K,V> p;
3334     if ((p = advance()) == null)
3335     return false;
3336     action.apply(p.val);
3337     return true;
3338     }
3339    
3340     public long estimateSize() { return est; }
3341    
3342 dl 1.75 }
3343    
3344 dl 1.102 static final class EntrySpliterator<K,V> extends Traverser<K,V>
3345     implements ConcurrentHashMapSpliterator<Map.Entry<K,V>> {
3346     final ConcurrentHashMapV8<K,V> map; // To export MapEntry
3347     long est; // size estimate
3348     EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3349     long est, ConcurrentHashMapV8<K,V> map) {
3350     super(tab, size, index, limit);
3351     this.map = map;
3352     this.est = est;
3353     }
3354    
3355     public ConcurrentHashMapSpliterator<Map.Entry<K,V>> trySplit() {
3356     int i, f, h;
3357     return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3358     new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h,
3359     f, est >>>= 1, map);
3360     }
3361    
3362     public void forEachRemaining(Action<? super Map.Entry<K,V>> action) {
3363     if (action == null) throw new NullPointerException();
3364     for (Node<K,V> p; (p = advance()) != null; )
3365     action.apply(new MapEntry<K,V>(p.key, p.val, map));
3366     }
3367    
3368     public boolean tryAdvance(Action<? super Map.Entry<K,V>> action) {
3369     if (action == null) throw new NullPointerException();
3370     Node<K,V> p;
3371     if ((p = advance()) == null)
3372     return false;
3373     action.apply(new MapEntry<K,V>(p.key, p.val, map));
3374     return true;
3375     }
3376    
3377     public long estimateSize() { return est; }
3378    
3379 dl 1.75 }
3380    
3381 dl 1.102 // Parallel bulk operations
3382    
3383 dl 1.75 /**
3384 dl 1.102 * Computes initial batch value for bulk tasks. The returned value
3385     * is approximately exp2 of the number of times (minus one) to
3386     * split task by two before executing leaf action. This value is
3387     * faster to compute and more convenient to use as a guide to
3388     * splitting than is the depth, since it is used while dividing by
3389     * two anyway.
3390     */
3391     final int batchFor(long b) {
3392     long n;
3393     if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b)
3394     return 0;
3395     int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4
3396     return (b <= 0L || (n /= b) >= sp) ? sp : (int)n;
3397 dl 1.75 }
3398    
3399     /**
3400 dl 1.84 * Performs the given action for each (key, value).
3401     *
3402 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3403     * needed for this operation to be executed in parallel
3404 dl 1.84 * @param action the action
3405 dl 1.102 * @since 1.8
3406 dl 1.75 */
3407 dl 1.102 public void forEach(long parallelismThreshold,
3408     BiAction<? super K,? super V> action) {
3409     if (action == null) throw new NullPointerException();
3410     new ForEachMappingTask<K,V>
3411     (null, batchFor(parallelismThreshold), 0, 0, table,
3412     action).invoke();
3413 dl 1.84 }
3414 dl 1.75
3415 dl 1.84 /**
3416     * Performs the given action for each non-null transformation
3417     * of each (key, value).
3418     *
3419 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3420     * needed for this operation to be executed in parallel
3421 dl 1.84 * @param transformer a function returning the transformation
3422 jsr166 1.91 * for an element, or null if there is no transformation (in
3423 jsr166 1.93 * which case the action is not applied)
3424 dl 1.84 * @param action the action
3425 dl 1.102 * @since 1.8
3426 dl 1.84 */
3427 dl 1.102 public <U> void forEach(long parallelismThreshold,
3428     BiFun<? super K, ? super V, ? extends U> transformer,
3429     Action<? super U> action) {
3430     if (transformer == null || action == null)
3431     throw new NullPointerException();
3432     new ForEachTransformedMappingTask<K,V,U>
3433     (null, batchFor(parallelismThreshold), 0, 0, table,
3434     transformer, action).invoke();
3435 dl 1.84 }
3436 dl 1.75
3437 dl 1.84 /**
3438     * Returns a non-null result from applying the given search
3439     * function on each (key, value), or null if none. Upon
3440     * success, further element processing is suppressed and the
3441     * results of any other parallel invocations of the search
3442     * function are ignored.
3443     *
3444 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3445     * needed for this operation to be executed in parallel
3446 dl 1.84 * @param searchFunction a function returning a non-null
3447     * result on success, else null
3448     * @return a non-null result from applying the given search
3449     * function on each (key, value), or null if none
3450 dl 1.102 * @since 1.8
3451 dl 1.84 */
3452 dl 1.102 public <U> U search(long parallelismThreshold,
3453     BiFun<? super K, ? super V, ? extends U> searchFunction) {
3454     if (searchFunction == null) throw new NullPointerException();
3455     return new SearchMappingsTask<K,V,U>
3456     (null, batchFor(parallelismThreshold), 0, 0, table,
3457     searchFunction, new AtomicReference<U>()).invoke();
3458 dl 1.84 }
3459 dl 1.75
3460 dl 1.84 /**
3461     * Returns the result of accumulating the given transformation
3462     * of all (key, value) pairs using the given reducer to
3463     * combine values, or null if none.
3464     *
3465 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3466     * needed for this operation to be executed in parallel
3467 dl 1.84 * @param transformer a function returning the transformation
3468 jsr166 1.91 * for an element, or null if there is no transformation (in
3469 jsr166 1.93 * which case it is not combined)
3470 dl 1.84 * @param reducer a commutative associative combining function
3471     * @return the result of accumulating the given transformation
3472     * of all (key, value) pairs
3473 dl 1.102 * @since 1.8
3474 dl 1.84 */
3475 dl 1.102 public <U> U reduce(long parallelismThreshold,
3476     BiFun<? super K, ? super V, ? extends U> transformer,
3477     BiFun<? super U, ? super U, ? extends U> reducer) {
3478     if (transformer == null || reducer == null)
3479     throw new NullPointerException();
3480     return new MapReduceMappingsTask<K,V,U>
3481     (null, batchFor(parallelismThreshold), 0, 0, table,
3482     null, transformer, reducer).invoke();
3483 dl 1.84 }
3484 dl 1.75
3485 dl 1.84 /**
3486     * Returns the result of accumulating the given transformation
3487     * of all (key, value) pairs using the given reducer to
3488     * combine values, and the given basis as an identity value.
3489     *
3490 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3491     * needed for this operation to be executed in parallel
3492 dl 1.84 * @param transformer a function returning the transformation
3493     * for an element
3494     * @param basis the identity (initial default value) for the reduction
3495     * @param reducer a commutative associative combining function
3496     * @return the result of accumulating the given transformation
3497     * of all (key, value) pairs
3498 dl 1.102 * @since 1.8
3499 dl 1.84 */
3500 dl 1.102 public double reduceToDoubleIn(long parallelismThreshold,
3501     ObjectByObjectToDouble<? super K, ? super V> transformer,
3502     double basis,
3503     DoubleByDoubleToDouble reducer) {
3504     if (transformer == null || reducer == null)
3505     throw new NullPointerException();
3506     return new MapReduceMappingsToDoubleTask<K,V>
3507     (null, batchFor(parallelismThreshold), 0, 0, table,
3508     null, transformer, basis, reducer).invoke();
3509 dl 1.84 }
3510    
3511     /**
3512     * Returns the result of accumulating the given transformation
3513     * of all (key, value) pairs using the given reducer to
3514     * combine values, and the given basis as an identity value.
3515     *
3516 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3517     * needed for this operation to be executed in parallel
3518 dl 1.84 * @param transformer a function returning the transformation
3519     * for an element
3520     * @param basis the identity (initial default value) for the reduction
3521     * @param reducer a commutative associative combining function
3522     * @return the result of accumulating the given transformation
3523     * of all (key, value) pairs
3524 dl 1.102 * @since 1.8
3525 dl 1.84 */
3526 dl 1.102 public long reduceToLong(long parallelismThreshold,
3527     ObjectByObjectToLong<? super K, ? super V> transformer,
3528     long basis,
3529     LongByLongToLong reducer) {
3530     if (transformer == null || reducer == null)
3531     throw new NullPointerException();
3532     return new MapReduceMappingsToLongTask<K,V>
3533     (null, batchFor(parallelismThreshold), 0, 0, table,
3534     null, transformer, basis, reducer).invoke();
3535 dl 1.84 }
3536    
3537     /**
3538     * Returns the result of accumulating the given transformation
3539     * of all (key, value) pairs using the given reducer to
3540     * combine values, and the given basis as an identity value.
3541     *
3542 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3543     * needed for this operation to be executed in parallel
3544 dl 1.84 * @param transformer a function returning the transformation
3545     * for an element
3546     * @param basis the identity (initial default value) for the reduction
3547     * @param reducer a commutative associative combining function
3548     * @return the result of accumulating the given transformation
3549     * of all (key, value) pairs
3550 dl 1.102 * @since 1.8
3551 dl 1.84 */
3552 dl 1.102 public int reduceToInt(long parallelismThreshold,
3553     ObjectByObjectToInt<? super K, ? super V> transformer,
3554     int basis,
3555     IntByIntToInt reducer) {
3556     if (transformer == null || reducer == null)
3557     throw new NullPointerException();
3558     return new MapReduceMappingsToIntTask<K,V>
3559     (null, batchFor(parallelismThreshold), 0, 0, table,
3560     null, transformer, basis, reducer).invoke();
3561 dl 1.84 }
3562    
3563     /**
3564     * Performs the given action for each key.
3565     *
3566 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3567     * needed for this operation to be executed in parallel
3568 dl 1.84 * @param action the action
3569 dl 1.102 * @since 1.8
3570 dl 1.84 */
3571 dl 1.102 public void forEachKey(long parallelismThreshold,
3572     Action<? super K> action) {
3573     if (action == null) throw new NullPointerException();
3574     new ForEachKeyTask<K,V>
3575     (null, batchFor(parallelismThreshold), 0, 0, table,
3576     action).invoke();
3577 dl 1.84 }
3578    
3579     /**
3580     * Performs the given action for each non-null transformation
3581     * of each key.
3582     *
3583 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3584     * needed for this operation to be executed in parallel
3585 dl 1.84 * @param transformer a function returning the transformation
3586 jsr166 1.91 * for an element, or null if there is no transformation (in
3587 jsr166 1.93 * which case the action is not applied)
3588 dl 1.84 * @param action the action
3589 dl 1.102 * @since 1.8
3590 dl 1.84 */
3591 dl 1.102 public <U> void forEachKey(long parallelismThreshold,
3592     Fun<? super K, ? extends U> transformer,
3593     Action<? super U> action) {
3594     if (transformer == null || action == null)
3595     throw new NullPointerException();
3596     new ForEachTransformedKeyTask<K,V,U>
3597     (null, batchFor(parallelismThreshold), 0, 0, table,
3598     transformer, action).invoke();
3599 dl 1.84 }
3600    
3601     /**
3602     * Returns a non-null result from applying the given search
3603     * function on each key, or null if none. Upon success,
3604     * further element processing is suppressed and the results of
3605     * any other parallel invocations of the search function are
3606     * ignored.
3607     *
3608 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3609     * needed for this operation to be executed in parallel
3610 dl 1.84 * @param searchFunction a function returning a non-null
3611     * result on success, else null
3612     * @return a non-null result from applying the given search
3613     * function on each key, or null if none
3614 dl 1.102 * @since 1.8
3615 dl 1.84 */
3616 dl 1.102 public <U> U searchKeys(long parallelismThreshold,
3617     Fun<? super K, ? extends U> searchFunction) {
3618     if (searchFunction == null) throw new NullPointerException();
3619     return new SearchKeysTask<K,V,U>
3620     (null, batchFor(parallelismThreshold), 0, 0, table,
3621     searchFunction, new AtomicReference<U>()).invoke();
3622 dl 1.84 }
3623    
3624     /**
3625     * Returns the result of accumulating all keys using the given
3626     * reducer to combine values, or null if none.
3627     *
3628 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3629     * needed for this operation to be executed in parallel
3630 dl 1.84 * @param reducer a commutative associative combining function
3631     * @return the result of accumulating all keys using the given
3632     * reducer to combine values, or null if none
3633 dl 1.102 * @since 1.8
3634 dl 1.84 */
3635 dl 1.102 public K reduceKeys(long parallelismThreshold,
3636     BiFun<? super K, ? super K, ? extends K> reducer) {
3637     if (reducer == null) throw new NullPointerException();
3638     return new ReduceKeysTask<K,V>
3639     (null, batchFor(parallelismThreshold), 0, 0, table,
3640     null, reducer).invoke();
3641 dl 1.84 }
3642    
3643     /**
3644     * Returns the result of accumulating the given transformation
3645     * of all keys using the given reducer to combine values, or
3646     * null if none.
3647     *
3648 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3649     * needed for this operation to be executed in parallel
3650 dl 1.84 * @param transformer a function returning the transformation
3651 jsr166 1.91 * for an element, or null if there is no transformation (in
3652 jsr166 1.93 * which case it is not combined)
3653 dl 1.84 * @param reducer a commutative associative combining function
3654     * @return the result of accumulating the given transformation
3655     * of all keys
3656 dl 1.102 * @since 1.8
3657 dl 1.84 */
3658 dl 1.102 public <U> U reduceKeys(long parallelismThreshold,
3659     Fun<? super K, ? extends U> transformer,
3660 dl 1.84 BiFun<? super U, ? super U, ? extends U> reducer) {
3661 dl 1.102 if (transformer == null || reducer == null)
3662     throw new NullPointerException();
3663     return new MapReduceKeysTask<K,V,U>
3664     (null, batchFor(parallelismThreshold), 0, 0, table,
3665     null, transformer, reducer).invoke();
3666 dl 1.84 }
3667    
3668     /**
3669     * Returns the result of accumulating the given transformation
3670     * of all keys using the given reducer to combine values, and
3671     * the given basis as an identity value.
3672     *
3673 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3674     * needed for this operation to be executed in parallel
3675 dl 1.84 * @param transformer a function returning the transformation
3676     * for an element
3677     * @param basis the identity (initial default value) for the reduction
3678     * @param reducer a commutative associative combining function
3679 dl 1.102 * @return the result of accumulating the given transformation
3680 dl 1.84 * of all keys
3681 dl 1.102 * @since 1.8
3682 dl 1.84 */
3683 dl 1.102 public double reduceKeysToDouble(long parallelismThreshold,
3684     ObjectToDouble<? super K> transformer,
3685     double basis,
3686     DoubleByDoubleToDouble reducer) {
3687     if (transformer == null || reducer == null)
3688     throw new NullPointerException();
3689     return new MapReduceKeysToDoubleTask<K,V>
3690     (null, batchFor(parallelismThreshold), 0, 0, table,
3691     null, transformer, basis, reducer).invoke();
3692 dl 1.84 }
3693    
3694     /**
3695     * Returns the result of accumulating the given transformation
3696     * of all keys using the given reducer to combine values, and
3697     * the given basis as an identity value.
3698     *
3699 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3700     * needed for this operation to be executed in parallel
3701 dl 1.84 * @param transformer a function returning the transformation
3702     * for an element
3703     * @param basis the identity (initial default value) for the reduction
3704     * @param reducer a commutative associative combining function
3705     * @return the result of accumulating the given transformation
3706     * of all keys
3707 dl 1.102 * @since 1.8
3708 dl 1.84 */
3709 dl 1.102 public long reduceKeysToLong(long parallelismThreshold,
3710     ObjectToLong<? super K> transformer,
3711     long basis,
3712     LongByLongToLong reducer) {
3713     if (transformer == null || reducer == null)
3714     throw new NullPointerException();
3715     return new MapReduceKeysToLongTask<K,V>
3716     (null, batchFor(parallelismThreshold), 0, 0, table,
3717     null, transformer, basis, reducer).invoke();
3718 dl 1.84 }
3719    
3720     /**
3721     * Returns the result of accumulating the given transformation
3722     * of all keys using the given reducer to combine values, and
3723     * the given basis as an identity value.
3724     *
3725 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3726     * needed for this operation to be executed in parallel
3727 dl 1.84 * @param transformer a function returning the transformation
3728     * for an element
3729     * @param basis the identity (initial default value) for the reduction
3730     * @param reducer a commutative associative combining function
3731     * @return the result of accumulating the given transformation
3732     * of all keys
3733 dl 1.102 * @since 1.8
3734 dl 1.84 */
3735 dl 1.102 public int reduceKeysToInt(long parallelismThreshold,
3736     ObjectToInt<? super K> transformer,
3737     int basis,
3738     IntByIntToInt reducer) {
3739     if (transformer == null || reducer == null)
3740     throw new NullPointerException();
3741     return new MapReduceKeysToIntTask<K,V>
3742     (null, batchFor(parallelismThreshold), 0, 0, table,
3743     null, transformer, basis, reducer).invoke();
3744 dl 1.84 }
3745    
3746     /**
3747     * Performs the given action for each value.
3748     *
3749 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3750     * needed for this operation to be executed in parallel
3751 dl 1.84 * @param action the action
3752 dl 1.102 * @since 1.8
3753 dl 1.84 */
3754 dl 1.102 public void forEachValue(long parallelismThreshold,
3755     Action<? super V> action) {
3756     if (action == null)
3757     throw new NullPointerException();
3758     new ForEachValueTask<K,V>
3759     (null, batchFor(parallelismThreshold), 0, 0, table,
3760     action).invoke();
3761 dl 1.84 }
3762    
3763     /**
3764     * Performs the given action for each non-null transformation
3765     * of each value.
3766     *
3767 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3768     * needed for this operation to be executed in parallel
3769 dl 1.84 * @param transformer a function returning the transformation
3770 jsr166 1.91 * for an element, or null if there is no transformation (in
3771 jsr166 1.93 * which case the action is not applied)
3772 dl 1.102 * @param action the action
3773     * @since 1.8
3774 dl 1.84 */
3775 dl 1.102 public <U> void forEachValue(long parallelismThreshold,
3776     Fun<? super V, ? extends U> transformer,
3777     Action<? super U> action) {
3778     if (transformer == null || action == null)
3779     throw new NullPointerException();
3780     new ForEachTransformedValueTask<K,V,U>
3781     (null, batchFor(parallelismThreshold), 0, 0, table,
3782     transformer, action).invoke();
3783 dl 1.84 }
3784    
3785     /**
3786     * Returns a non-null result from applying the given search
3787     * function on each value, or null if none. Upon success,
3788     * further element processing is suppressed and the results of
3789     * any other parallel invocations of the search function are
3790     * ignored.
3791     *
3792 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3793     * needed for this operation to be executed in parallel
3794 dl 1.84 * @param searchFunction a function returning a non-null
3795     * result on success, else null
3796     * @return a non-null result from applying the given search
3797     * function on each value, or null if none
3798 dl 1.102 * @since 1.8
3799 dl 1.84 */
3800 dl 1.102 public <U> U searchValues(long parallelismThreshold,
3801     Fun<? super V, ? extends U> searchFunction) {
3802     if (searchFunction == null) throw new NullPointerException();
3803     return new SearchValuesTask<K,V,U>
3804     (null, batchFor(parallelismThreshold), 0, 0, table,
3805     searchFunction, new AtomicReference<U>()).invoke();
3806 dl 1.84 }
3807    
3808     /**
3809     * Returns the result of accumulating all values using the
3810     * given reducer to combine values, or null if none.
3811     *
3812 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3813     * needed for this operation to be executed in parallel
3814 dl 1.84 * @param reducer a commutative associative combining function
3815 dl 1.102 * @return the result of accumulating all values
3816     * @since 1.8
3817 dl 1.84 */
3818 dl 1.102 public V reduceValues(long parallelismThreshold,
3819     BiFun<? super V, ? super V, ? extends V> reducer) {
3820     if (reducer == null) throw new NullPointerException();
3821     return new ReduceValuesTask<K,V>
3822     (null, batchFor(parallelismThreshold), 0, 0, table,
3823     null, reducer).invoke();
3824 dl 1.84 }
3825    
3826     /**
3827     * Returns the result of accumulating the given transformation
3828     * of all values using the given reducer to combine values, or
3829     * null if none.
3830     *
3831 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3832     * needed for this operation to be executed in parallel
3833 dl 1.84 * @param transformer a function returning the transformation
3834 jsr166 1.91 * for an element, or null if there is no transformation (in
3835 jsr166 1.93 * which case it is not combined)
3836 dl 1.84 * @param reducer a commutative associative combining function
3837     * @return the result of accumulating the given transformation
3838     * of all values
3839 dl 1.102 * @since 1.8
3840 dl 1.84 */
3841 dl 1.102 public <U> U reduceValues(long parallelismThreshold,
3842     Fun<? super V, ? extends U> transformer,
3843     BiFun<? super U, ? super U, ? extends U> reducer) {
3844     if (transformer == null || reducer == null)
3845     throw new NullPointerException();
3846     return new MapReduceValuesTask<K,V,U>
3847     (null, batchFor(parallelismThreshold), 0, 0, table,
3848     null, transformer, reducer).invoke();
3849 dl 1.84 }
3850    
3851     /**
3852     * Returns the result of accumulating the given transformation
3853     * of all values using the given reducer to combine values,
3854     * and the given basis as an identity value.
3855     *
3856 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3857     * needed for this operation to be executed in parallel
3858 dl 1.84 * @param transformer a function returning the transformation
3859     * for an element
3860     * @param basis the identity (initial default value) for the reduction
3861     * @param reducer a commutative associative combining function
3862     * @return the result of accumulating the given transformation
3863     * of all values
3864 dl 1.102 * @since 1.8
3865 dl 1.84 */
3866 dl 1.102 public double reduceValuesToDouble(long parallelismThreshold,
3867     ObjectToDouble<? super V> transformer,
3868     double basis,
3869     DoubleByDoubleToDouble reducer) {
3870     if (transformer == null || reducer == null)
3871     throw new NullPointerException();
3872     return new MapReduceValuesToDoubleTask<K,V>
3873     (null, batchFor(parallelismThreshold), 0, 0, table,
3874     null, transformer, basis, reducer).invoke();
3875 dl 1.84 }
3876    
3877     /**
3878     * Returns the result of accumulating the given transformation
3879     * of all values using the given reducer to combine values,
3880     * and the given basis as an identity value.
3881     *
3882 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3883     * needed for this operation to be executed in parallel
3884 dl 1.84 * @param transformer a function returning the transformation
3885     * for an element
3886     * @param basis the identity (initial default value) for the reduction
3887     * @param reducer a commutative associative combining function
3888     * @return the result of accumulating the given transformation
3889     * of all values
3890 dl 1.102 * @since 1.8
3891 dl 1.84 */
3892 dl 1.102 public long reduceValuesToLong(long parallelismThreshold,
3893     ObjectToLong<? super V> transformer,
3894     long basis,
3895     LongByLongToLong reducer) {
3896     if (transformer == null || reducer == null)
3897     throw new NullPointerException();
3898     return new MapReduceValuesToLongTask<K,V>
3899     (null, batchFor(parallelismThreshold), 0, 0, table,
3900     null, transformer, basis, reducer).invoke();
3901 dl 1.84 }
3902    
3903     /**
3904     * Returns the result of accumulating the given transformation
3905     * of all values using the given reducer to combine values,
3906     * and the given basis as an identity value.
3907     *
3908 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3909     * needed for this operation to be executed in parallel
3910 dl 1.84 * @param transformer a function returning the transformation
3911     * for an element
3912     * @param basis the identity (initial default value) for the reduction
3913     * @param reducer a commutative associative combining function
3914     * @return the result of accumulating the given transformation
3915     * of all values
3916 dl 1.102 * @since 1.8
3917 dl 1.84 */
3918 dl 1.102 public int reduceValuesToInt(long parallelismThreshold,
3919     ObjectToInt<? super V> transformer,
3920     int basis,
3921     IntByIntToInt reducer) {
3922     if (transformer == null || reducer == null)
3923     throw new NullPointerException();
3924     return new MapReduceValuesToIntTask<K,V>
3925     (null, batchFor(parallelismThreshold), 0, 0, table,
3926     null, transformer, basis, reducer).invoke();
3927 dl 1.84 }
3928    
3929     /**
3930     * Performs the given action for each entry.
3931     *
3932 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3933     * needed for this operation to be executed in parallel
3934 dl 1.84 * @param action the action
3935 dl 1.102 * @since 1.8
3936 dl 1.84 */
3937 dl 1.102 public void forEachEntry(long parallelismThreshold,
3938     Action<? super Map.Entry<K,V>> action) {
3939     if (action == null) throw new NullPointerException();
3940     new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table,
3941     action).invoke();
3942 dl 1.84 }
3943    
3944     /**
3945     * Performs the given action for each non-null transformation
3946     * of each entry.
3947     *
3948 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3949     * needed for this operation to be executed in parallel
3950 dl 1.84 * @param transformer a function returning the transformation
3951 jsr166 1.91 * for an element, or null if there is no transformation (in
3952 jsr166 1.93 * which case the action is not applied)
3953 dl 1.84 * @param action the action
3954 dl 1.102 * @since 1.8
3955 dl 1.84 */
3956 dl 1.102 public <U> void forEachEntry(long parallelismThreshold,
3957     Fun<Map.Entry<K,V>, ? extends U> transformer,
3958     Action<? super U> action) {
3959     if (transformer == null || action == null)
3960     throw new NullPointerException();
3961     new ForEachTransformedEntryTask<K,V,U>
3962     (null, batchFor(parallelismThreshold), 0, 0, table,
3963     transformer, action).invoke();
3964 dl 1.84 }
3965    
3966     /**
3967     * Returns a non-null result from applying the given search
3968     * function on each entry, or null if none. Upon success,
3969     * further element processing is suppressed and the results of
3970     * any other parallel invocations of the search function are
3971     * ignored.
3972     *
3973 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3974     * needed for this operation to be executed in parallel
3975 dl 1.84 * @param searchFunction a function returning a non-null
3976     * result on success, else null
3977     * @return a non-null result from applying the given search
3978     * function on each entry, or null if none
3979 dl 1.102 * @since 1.8
3980 dl 1.84 */
3981 dl 1.102 public <U> U searchEntries(long parallelismThreshold,
3982     Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
3983     if (searchFunction == null) throw new NullPointerException();
3984     return new SearchEntriesTask<K,V,U>
3985     (null, batchFor(parallelismThreshold), 0, 0, table,
3986     searchFunction, new AtomicReference<U>()).invoke();
3987 dl 1.84 }
3988    
3989     /**
3990     * Returns the result of accumulating all entries using the
3991     * given reducer to combine values, or null if none.
3992     *
3993 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3994     * needed for this operation to be executed in parallel
3995 dl 1.84 * @param reducer a commutative associative combining function
3996     * @return the result of accumulating all entries
3997 dl 1.102 * @since 1.8
3998 dl 1.84 */
3999 dl 1.102 public Map.Entry<K,V> reduceEntries(long parallelismThreshold,
4000     BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4001     if (reducer == null) throw new NullPointerException();
4002     return new ReduceEntriesTask<K,V>
4003     (null, batchFor(parallelismThreshold), 0, 0, table,
4004     null, reducer).invoke();
4005 dl 1.84 }
4006    
4007     /**
4008     * Returns the result of accumulating the given transformation
4009     * of all entries using the given reducer to combine values,
4010     * or null if none.
4011     *
4012 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
4013     * needed for this operation to be executed in parallel
4014 dl 1.84 * @param transformer a function returning the transformation
4015 jsr166 1.91 * for an element, or null if there is no transformation (in
4016 jsr166 1.93 * which case it is not combined)
4017 dl 1.84 * @param reducer a commutative associative combining function
4018     * @return the result of accumulating the given transformation
4019     * of all entries
4020 dl 1.102 * @since 1.8
4021 dl 1.84 */
4022 dl 1.102 public <U> U reduceEntries(long parallelismThreshold,
4023     Fun<Map.Entry<K,V>, ? extends U> transformer,
4024     BiFun<? super U, ? super U, ? extends U> reducer) {
4025     if (transformer == null || reducer == null)
4026     throw new NullPointerException();
4027     return new MapReduceEntriesTask<K,V,U>
4028     (null, batchFor(parallelismThreshold), 0, 0, table,
4029     null, transformer, reducer).invoke();
4030 dl 1.84 }
4031    
4032     /**
4033     * Returns the result of accumulating the given transformation
4034     * of all entries using the given reducer to combine values,
4035     * and the given basis as an identity value.
4036     *
4037 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
4038     * needed for this operation to be executed in parallel
4039 dl 1.84 * @param transformer a function returning the transformation
4040     * for an element
4041     * @param basis the identity (initial default value) for the reduction
4042     * @param reducer a commutative associative combining function
4043     * @return the result of accumulating the given transformation
4044     * of all entries
4045 dl 1.102 * @since 1.8
4046 dl 1.84 */
4047 dl 1.102 public double reduceEntriesToDouble(long parallelismThreshold,
4048     ObjectToDouble<Map.Entry<K,V>> transformer,
4049     double basis,
4050     DoubleByDoubleToDouble reducer) {
4051     if (transformer == null || reducer == null)
4052     throw new NullPointerException();
4053     return new MapReduceEntriesToDoubleTask<K,V>
4054     (null, batchFor(parallelismThreshold), 0, 0, table,
4055     null, transformer, basis, reducer).invoke();
4056 dl 1.84 }
4057    
4058     /**
4059     * Returns the result of accumulating the given transformation
4060     * of all entries using the given reducer to combine values,
4061     * and the given basis as an identity value.
4062     *
4063 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
4064     * needed for this operation to be executed in parallel
4065 dl 1.84 * @param transformer a function returning the transformation
4066     * for an element
4067     * @param basis the identity (initial default value) for the reduction
4068     * @param reducer a commutative associative combining function
4069 dl 1.102 * @return the result of accumulating the given transformation
4070 dl 1.84 * of all entries
4071 dl 1.102 * @since 1.8
4072 dl 1.84 */
4073 dl 1.102 public long reduceEntriesToLong(long parallelismThreshold,
4074     ObjectToLong<Map.Entry<K,V>> transformer,
4075     long basis,
4076     LongByLongToLong reducer) {
4077     if (transformer == null || reducer == null)
4078     throw new NullPointerException();
4079     return new MapReduceEntriesToLongTask<K,V>
4080     (null, batchFor(parallelismThreshold), 0, 0, table,
4081     null, transformer, basis, reducer).invoke();
4082 dl 1.84 }
4083    
4084     /**
4085     * Returns the result of accumulating the given transformation
4086     * of all entries using the given reducer to combine values,
4087     * and the given basis as an identity value.
4088     *
4089 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
4090     * needed for this operation to be executed in parallel
4091 dl 1.84 * @param transformer a function returning the transformation
4092     * for an element
4093     * @param basis the identity (initial default value) for the reduction
4094     * @param reducer a commutative associative combining function
4095     * @return the result of accumulating the given transformation
4096     * of all entries
4097 dl 1.102 * @since 1.8
4098 dl 1.84 */
4099 dl 1.102 public int reduceEntriesToInt(long parallelismThreshold,
4100     ObjectToInt<Map.Entry<K,V>> transformer,
4101     int basis,
4102     IntByIntToInt reducer) {
4103     if (transformer == null || reducer == null)
4104     throw new NullPointerException();
4105     return new MapReduceEntriesToIntTask<K,V>
4106     (null, batchFor(parallelismThreshold), 0, 0, table,
4107     null, transformer, basis, reducer).invoke();
4108 dl 1.84 }
4109    
4110    
4111     /* ----------------Views -------------- */
4112    
4113     /**
4114     * Base class for views.
4115     */
4116 dl 1.102 abstract static class CollectionView<K,V,E>
4117     implements Collection<E>, java.io.Serializable {
4118     private static final long serialVersionUID = 7249069246763182397L;
4119 jsr166 1.99 final ConcurrentHashMapV8<K,V> map;
4120 dl 1.102 CollectionView(ConcurrentHashMapV8<K,V> map) { this.map = map; }
4121 dl 1.84
4122     /**
4123     * Returns the map backing this view.
4124     *
4125     * @return the map backing this view
4126     */
4127     public ConcurrentHashMapV8<K,V> getMap() { return map; }
4128    
4129 dl 1.102 /**
4130     * Removes all of the elements from this view, by removing all
4131     * the mappings from the map backing this view.
4132     */
4133     public final void clear() { map.clear(); }
4134     public final int size() { return map.size(); }
4135     public final boolean isEmpty() { return map.isEmpty(); }
4136 dl 1.84
4137     // implementations below rely on concrete classes supplying these
4138 dl 1.102 // abstract methods
4139     /**
4140     * Returns a "weakly consistent" iterator that will never
4141     * throw {@link ConcurrentModificationException}, and
4142     * guarantees to traverse elements as they existed upon
4143     * construction of the iterator, and may (but is not
4144     * guaranteed to) reflect any modifications subsequent to
4145     * construction.
4146     */
4147     public abstract Iterator<E> iterator();
4148 jsr166 1.88 public abstract boolean contains(Object o);
4149     public abstract boolean remove(Object o);
4150 dl 1.84
4151     private static final String oomeMsg = "Required array size too large";
4152 dl 1.75
4153     public final Object[] toArray() {
4154     long sz = map.mappingCount();
4155 dl 1.102 if (sz > MAX_ARRAY_SIZE)
4156 dl 1.75 throw new OutOfMemoryError(oomeMsg);
4157     int n = (int)sz;
4158     Object[] r = new Object[n];
4159     int i = 0;
4160 dl 1.102 for (E e : this) {
4161 dl 1.75 if (i == n) {
4162     if (n >= MAX_ARRAY_SIZE)
4163     throw new OutOfMemoryError(oomeMsg);
4164     if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4165     n = MAX_ARRAY_SIZE;
4166     else
4167     n += (n >>> 1) + 1;
4168     r = Arrays.copyOf(r, n);
4169     }
4170 dl 1.102 r[i++] = e;
4171 dl 1.75 }
4172     return (i == n) ? r : Arrays.copyOf(r, i);
4173     }
4174    
4175 dl 1.102 @SuppressWarnings("unchecked")
4176     public final <T> T[] toArray(T[] a) {
4177 dl 1.75 long sz = map.mappingCount();
4178 dl 1.102 if (sz > MAX_ARRAY_SIZE)
4179 dl 1.75 throw new OutOfMemoryError(oomeMsg);
4180     int m = (int)sz;
4181     T[] r = (a.length >= m) ? a :
4182     (T[])java.lang.reflect.Array
4183     .newInstance(a.getClass().getComponentType(), m);
4184     int n = r.length;
4185     int i = 0;
4186 dl 1.102 for (E e : this) {
4187 dl 1.75 if (i == n) {
4188     if (n >= MAX_ARRAY_SIZE)
4189     throw new OutOfMemoryError(oomeMsg);
4190     if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4191     n = MAX_ARRAY_SIZE;
4192     else
4193     n += (n >>> 1) + 1;
4194     r = Arrays.copyOf(r, n);
4195     }
4196 dl 1.102 r[i++] = (T)e;
4197 dl 1.75 }
4198     if (a == r && i < n) {
4199     r[i] = null; // null-terminate
4200     return r;
4201     }
4202     return (i == n) ? r : Arrays.copyOf(r, i);
4203     }
4204    
4205 dl 1.102 /**
4206     * Returns a string representation of this collection.
4207     * The string representation consists of the string representations
4208     * of the collection's elements in the order they are returned by
4209     * its iterator, enclosed in square brackets ({@code "[]"}).
4210     * Adjacent elements are separated by the characters {@code ", "}
4211     * (comma and space). Elements are converted to strings as by
4212     * {@link String#valueOf(Object)}.
4213     *
4214     * @return a string representation of this collection
4215     */
4216 dl 1.75 public final String toString() {
4217     StringBuilder sb = new StringBuilder();
4218     sb.append('[');
4219 dl 1.102 Iterator<E> it = iterator();
4220 dl 1.75 if (it.hasNext()) {
4221     for (;;) {
4222     Object e = it.next();
4223     sb.append(e == this ? "(this Collection)" : e);
4224     if (!it.hasNext())
4225     break;
4226     sb.append(',').append(' ');
4227     }
4228     }
4229     return sb.append(']').toString();
4230     }
4231    
4232     public final boolean containsAll(Collection<?> c) {
4233     if (c != this) {
4234 dl 1.102 for (Object e : c) {
4235 dl 1.75 if (e == null || !contains(e))
4236     return false;
4237     }
4238     }
4239     return true;
4240     }
4241    
4242     public final boolean removeAll(Collection<?> c) {
4243     boolean modified = false;
4244 dl 1.102 for (Iterator<E> it = iterator(); it.hasNext();) {
4245 dl 1.75 if (c.contains(it.next())) {
4246     it.remove();
4247     modified = true;
4248     }
4249     }
4250     return modified;
4251     }
4252    
4253     public final boolean retainAll(Collection<?> c) {
4254     boolean modified = false;
4255 dl 1.102 for (Iterator<E> it = iterator(); it.hasNext();) {
4256 dl 1.75 if (!c.contains(it.next())) {
4257     it.remove();
4258     modified = true;
4259     }
4260     }
4261     return modified;
4262     }
4263    
4264     }
4265    
4266     /**
4267     * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in
4268     * which additions may optionally be enabled by mapping to a
4269 dl 1.102 * common value. This class cannot be directly instantiated.
4270     * See {@link #keySet() keySet()},
4271     * {@link #keySet(Object) keySet(V)},
4272     * {@link #newKeySet() newKeySet()},
4273     * {@link #newKeySet(int) newKeySet(int)}.
4274     *
4275     * @since 1.8
4276 dl 1.75 */
4277 dl 1.102 public static class KeySetView<K,V> extends CollectionView<K,V,K>
4278 dl 1.82 implements Set<K>, java.io.Serializable {
4279 dl 1.75 private static final long serialVersionUID = 7249069246763182397L;
4280     private final V value;
4281 jsr166 1.99 KeySetView(ConcurrentHashMapV8<K,V> map, V value) { // non-public
4282 dl 1.75 super(map);
4283     this.value = value;
4284     }
4285    
4286     /**
4287     * Returns the default mapped value for additions,
4288     * or {@code null} if additions are not supported.
4289     *
4290     * @return the default mapped value for additions, or {@code null}
4291 jsr166 1.93 * if not supported
4292 dl 1.75 */
4293     public V getMappedValue() { return value; }
4294    
4295 dl 1.102 /**
4296     * {@inheritDoc}
4297     * @throws NullPointerException if the specified key is null
4298     */
4299     public boolean contains(Object o) { return map.containsKey(o); }
4300    
4301     /**
4302     * Removes the key from this map view, by removing the key (and its
4303     * corresponding value) from the backing map. This method does
4304     * nothing if the key is not in the map.
4305     *
4306     * @param o the key to be removed from the backing map
4307     * @return {@code true} if the backing map contained the specified key
4308     * @throws NullPointerException if the specified key is null
4309     */
4310     public boolean remove(Object o) { return map.remove(o) != null; }
4311 dl 1.75
4312 dl 1.102 /**
4313     * @return an iterator over the keys of the backing map
4314     */
4315     public Iterator<K> iterator() {
4316     Node<K,V>[] t;
4317     ConcurrentHashMapV8<K,V> m = map;
4318     int f = (t = m.table) == null ? 0 : t.length;
4319     return new KeyIterator<K,V>(t, f, 0, f, m);
4320     }
4321 dl 1.75
4322     /**
4323 dl 1.102 * Adds the specified key to this set view by mapping the key to
4324     * the default mapped value in the backing map, if defined.
4325 dl 1.75 *
4326 dl 1.102 * @param e key to be added
4327     * @return {@code true} if this set changed as a result of the call
4328     * @throws NullPointerException if the specified key is null
4329     * @throws UnsupportedOperationException if no default mapped value
4330     * for additions was provided
4331 dl 1.75 */
4332     public boolean add(K e) {
4333     V v;
4334     if ((v = value) == null)
4335     throw new UnsupportedOperationException();
4336 dl 1.102 return map.putVal(e, v, true) == null;
4337 dl 1.75 }
4338 dl 1.102
4339     /**
4340     * Adds all of the elements in the specified collection to this set,
4341     * as if by calling {@link #add} on each one.
4342     *
4343     * @param c the elements to be inserted into this set
4344     * @return {@code true} if this set changed as a result of the call
4345     * @throws NullPointerException if the collection or any of its
4346     * elements are {@code null}
4347     * @throws UnsupportedOperationException if no default mapped value
4348     * for additions was provided
4349     */
4350 dl 1.75 public boolean addAll(Collection<? extends K> c) {
4351     boolean added = false;
4352     V v;
4353     if ((v = value) == null)
4354     throw new UnsupportedOperationException();
4355     for (K e : c) {
4356 dl 1.102 if (map.putVal(e, v, true) == null)
4357 dl 1.75 added = true;
4358     }
4359     return added;
4360     }
4361 dl 1.102
4362     public int hashCode() {
4363     int h = 0;
4364     for (K e : this)
4365     h += e.hashCode();
4366     return h;
4367     }
4368    
4369 dl 1.75 public boolean equals(Object o) {
4370     Set<?> c;
4371     return ((o instanceof Set) &&
4372     ((c = (Set<?>)o) == this ||
4373     (containsAll(c) && c.containsAll(this))));
4374     }
4375 dl 1.102
4376     public ConcurrentHashMapSpliterator<K> spliterator() {
4377     Node<K,V>[] t;
4378     ConcurrentHashMapV8<K,V> m = map;
4379     long n = m.sumCount();
4380     int f = (t = m.table) == null ? 0 : t.length;
4381     return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4382     }
4383    
4384     public void forEach(Action<? super K> action) {
4385     if (action == null) throw new NullPointerException();
4386     Node<K,V>[] t;
4387     if ((t = map.table) != null) {
4388     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4389     for (Node<K,V> p; (p = it.advance()) != null; )
4390     action.apply(p.key);
4391     }
4392     }
4393 dl 1.75 }
4394    
4395     /**
4396     * A view of a ConcurrentHashMapV8 as a {@link Collection} of
4397     * values, in which additions are disabled. This class cannot be
4398 jsr166 1.96 * directly instantiated. See {@link #values()}.
4399 dl 1.75 */
4400 dl 1.102 static final class ValuesView<K,V> extends CollectionView<K,V,V>
4401     implements Collection<V>, java.io.Serializable {
4402     private static final long serialVersionUID = 2249069246763182397L;
4403     ValuesView(ConcurrentHashMapV8<K,V> map) { super(map); }
4404     public final boolean contains(Object o) {
4405     return map.containsValue(o);
4406     }
4407    
4408 dl 1.75 public final boolean remove(Object o) {
4409     if (o != null) {
4410 dl 1.102 for (Iterator<V> it = iterator(); it.hasNext();) {
4411 dl 1.75 if (o.equals(it.next())) {
4412     it.remove();
4413     return true;
4414     }
4415     }
4416     }
4417     return false;
4418     }
4419    
4420     public final Iterator<V> iterator() {
4421 dl 1.102 ConcurrentHashMapV8<K,V> m = map;
4422     Node<K,V>[] t;
4423     int f = (t = m.table) == null ? 0 : t.length;
4424     return new ValueIterator<K,V>(t, f, 0, f, m);
4425 dl 1.75 }
4426 dl 1.102
4427 dl 1.75 public final boolean add(V e) {
4428     throw new UnsupportedOperationException();
4429     }
4430     public final boolean addAll(Collection<? extends V> c) {
4431     throw new UnsupportedOperationException();
4432     }
4433    
4434 dl 1.102 public ConcurrentHashMapSpliterator<V> spliterator() {
4435     Node<K,V>[] t;
4436     ConcurrentHashMapV8<K,V> m = map;
4437     long n = m.sumCount();
4438     int f = (t = m.table) == null ? 0 : t.length;
4439     return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4440     }
4441    
4442     public void forEach(Action<? super V> action) {
4443     if (action == null) throw new NullPointerException();
4444     Node<K,V>[] t;
4445     if ((t = map.table) != null) {
4446     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4447     for (Node<K,V> p; (p = it.advance()) != null; )
4448     action.apply(p.val);
4449     }
4450     }
4451 dl 1.70 }
4452 dl 1.52
4453 dl 1.70 /**
4454 dl 1.75 * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value)
4455     * entries. This class cannot be directly instantiated. See
4456 jsr166 1.95 * {@link #entrySet()}.
4457 dl 1.70 */
4458 dl 1.102 static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>>
4459     implements Set<Map.Entry<K,V>>, java.io.Serializable {
4460     private static final long serialVersionUID = 2249069246763182397L;
4461 jsr166 1.99 EntrySetView(ConcurrentHashMapV8<K,V> map) { super(map); }
4462 dl 1.102
4463     public boolean contains(Object o) {
4464 dl 1.75 Object k, v, r; Map.Entry<?,?> e;
4465     return ((o instanceof Map.Entry) &&
4466     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4467     (r = map.get(k)) != null &&
4468     (v = e.getValue()) != null &&
4469     (v == r || v.equals(r)));
4470     }
4471 dl 1.102
4472     public boolean remove(Object o) {
4473 dl 1.75 Object k, v; Map.Entry<?,?> e;
4474     return ((o instanceof Map.Entry) &&
4475     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4476     (v = e.getValue()) != null &&
4477     map.remove(k, v));
4478     }
4479    
4480     /**
4481 dl 1.102 * @return an iterator over the entries of the backing map
4482 dl 1.75 */
4483 dl 1.102 public Iterator<Map.Entry<K,V>> iterator() {
4484     ConcurrentHashMapV8<K,V> m = map;
4485     Node<K,V>[] t;
4486     int f = (t = m.table) == null ? 0 : t.length;
4487     return new EntryIterator<K,V>(t, f, 0, f, m);
4488 dl 1.75 }
4489    
4490 dl 1.102 public boolean add(Entry<K,V> e) {
4491     return map.putVal(e.getKey(), e.getValue(), false) == null;
4492 dl 1.75 }
4493 dl 1.102
4494     public boolean addAll(Collection<? extends Entry<K,V>> c) {
4495 dl 1.75 boolean added = false;
4496     for (Entry<K,V> e : c) {
4497     if (add(e))
4498     added = true;
4499     }
4500     return added;
4501     }
4502 dl 1.52
4503 dl 1.102 public final int hashCode() {
4504     int h = 0;
4505     Node<K,V>[] t;
4506     if ((t = map.table) != null) {
4507     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4508     for (Node<K,V> p; (p = it.advance()) != null; ) {
4509     h += p.hashCode();
4510     }
4511     }
4512     return h;
4513 dl 1.52 }
4514    
4515 dl 1.102 public final boolean equals(Object o) {
4516     Set<?> c;
4517     return ((o instanceof Set) &&
4518     ((c = (Set<?>)o) == this ||
4519     (containsAll(c) && c.containsAll(this))));
4520 dl 1.52 }
4521    
4522 dl 1.102 public ConcurrentHashMapSpliterator<Map.Entry<K,V>> spliterator() {
4523     Node<K,V>[] t;
4524     ConcurrentHashMapV8<K,V> m = map;
4525     long n = m.sumCount();
4526     int f = (t = m.table) == null ? 0 : t.length;
4527     return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m);
4528 dl 1.52 }
4529    
4530 dl 1.102 public void forEach(Action<? super Map.Entry<K,V>> action) {
4531     if (action == null) throw new NullPointerException();
4532     Node<K,V>[] t;
4533     if ((t = map.table) != null) {
4534     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4535     for (Node<K,V> p; (p = it.advance()) != null; )
4536     action.apply(new MapEntry<K,V>(p.key, p.val, map));
4537     }
4538 dl 1.52 }
4539    
4540 dl 1.102 }
4541 dl 1.52
4542 dl 1.102 // -------------------------------------------------------
4543 dl 1.52
4544 dl 1.102 /**
4545     * Base class for bulk tasks. Repeats some fields and code from
4546     * class Traverser, because we need to subclass CountedCompleter.
4547     */
4548     abstract static class BulkTask<K,V,R> extends CountedCompleter<R> {
4549     Node<K,V>[] tab; // same as Traverser
4550     Node<K,V> next;
4551     int index;
4552     int baseIndex;
4553     int baseLimit;
4554     final int baseSize;
4555     int batch; // split control
4556    
4557     BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) {
4558     super(par);
4559     this.batch = b;
4560     this.index = this.baseIndex = i;
4561     if ((this.tab = t) == null)
4562     this.baseSize = this.baseLimit = 0;
4563     else if (par == null)
4564     this.baseSize = this.baseLimit = t.length;
4565     else {
4566     this.baseLimit = f;
4567     this.baseSize = par.baseSize;
4568     }
4569 dl 1.52 }
4570    
4571     /**
4572 dl 1.102 * Same as Traverser version
4573 dl 1.52 */
4574 dl 1.102 final Node<K,V> advance() {
4575     Node<K,V> e;
4576     if ((e = next) != null)
4577     e = e.next;
4578     for (;;) {
4579     Node<K,V>[] t; int i, n; K ek; // must use locals in checks
4580     if (e != null)
4581     return next = e;
4582     if (baseIndex >= baseLimit || (t = tab) == null ||
4583     (n = t.length) <= (i = index) || i < 0)
4584     return next = null;
4585     if ((e = tabAt(t, index)) != null && e.hash < 0) {
4586     if (e instanceof ForwardingNode) {
4587     tab = ((ForwardingNode<K,V>)e).nextTable;
4588     e = null;
4589     continue;
4590     }
4591     else if (e instanceof TreeBin)
4592     e = ((TreeBin<K,V>)e).first;
4593     else
4594     e = null;
4595     }
4596     if ((index += baseSize) >= n)
4597     index = ++baseIndex; // visit upper slots if present
4598     }
4599 dl 1.52 }
4600     }
4601    
4602     /*
4603     * Task classes. Coded in a regular but ugly format/style to
4604     * simplify checks that each variant differs in the right way from
4605 dl 1.82 * others. The null screenings exist because compilers cannot tell
4606     * that we've already null-checked task arguments, so we force
4607     * simplest hoisted bypass to help avoid convoluted traps.
4608 dl 1.52 */
4609 dl 1.102 @SuppressWarnings("serial")
4610     static final class ForEachKeyTask<K,V>
4611     extends BulkTask<K,V,Void> {
4612     final Action<? super K> action;
4613 dl 1.52 ForEachKeyTask
4614 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4615     Action<? super K> action) {
4616     super(p, b, i, f, t);
4617 dl 1.52 this.action = action;
4618     }
4619 dl 1.102 public final void compute() {
4620     final Action<? super K> action;
4621 dl 1.82 if ((action = this.action) != null) {
4622 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4623     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4624     addToPendingCount(1);
4625     new ForEachKeyTask<K,V>
4626     (this, batch >>>= 1, baseLimit = h, f, tab,
4627     action).fork();
4628     }
4629     for (Node<K,V> p; (p = advance()) != null;)
4630     action.apply(p.key);
4631 dl 1.82 propagateCompletion();
4632     }
4633 dl 1.52 }
4634     }
4635    
4636 dl 1.102 @SuppressWarnings("serial")
4637     static final class ForEachValueTask<K,V>
4638     extends BulkTask<K,V,Void> {
4639     final Action<? super V> action;
4640 dl 1.52 ForEachValueTask
4641 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4642     Action<? super V> action) {
4643     super(p, b, i, f, t);
4644 dl 1.52 this.action = action;
4645     }
4646 dl 1.102 public final void compute() {
4647     final Action<? super V> action;
4648 dl 1.82 if ((action = this.action) != null) {
4649 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4650     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4651     addToPendingCount(1);
4652     new ForEachValueTask<K,V>
4653     (this, batch >>>= 1, baseLimit = h, f, tab,
4654     action).fork();
4655     }
4656     for (Node<K,V> p; (p = advance()) != null;)
4657     action.apply(p.val);
4658 dl 1.82 propagateCompletion();
4659     }
4660 dl 1.52 }
4661     }
4662    
4663 dl 1.102 @SuppressWarnings("serial")
4664     static final class ForEachEntryTask<K,V>
4665     extends BulkTask<K,V,Void> {
4666     final Action<? super Entry<K,V>> action;
4667 dl 1.52 ForEachEntryTask
4668 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4669     Action<? super Entry<K,V>> action) {
4670     super(p, b, i, f, t);
4671 dl 1.52 this.action = action;
4672     }
4673 dl 1.102 public final void compute() {
4674     final Action<? super Entry<K,V>> action;
4675 dl 1.82 if ((action = this.action) != null) {
4676 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4677     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4678     addToPendingCount(1);
4679     new ForEachEntryTask<K,V>
4680     (this, batch >>>= 1, baseLimit = h, f, tab,
4681     action).fork();
4682     }
4683     for (Node<K,V> p; (p = advance()) != null; )
4684     action.apply(p);
4685 dl 1.82 propagateCompletion();
4686     }
4687 dl 1.52 }
4688     }
4689    
4690 dl 1.102 @SuppressWarnings("serial")
4691     static final class ForEachMappingTask<K,V>
4692     extends BulkTask<K,V,Void> {
4693     final BiAction<? super K, ? super V> action;
4694 dl 1.52 ForEachMappingTask
4695 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4696     BiAction<? super K,? super V> action) {
4697     super(p, b, i, f, t);
4698 dl 1.52 this.action = action;
4699     }
4700 dl 1.102 public final void compute() {
4701     final BiAction<? super K, ? super V> action;
4702 dl 1.82 if ((action = this.action) != null) {
4703 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4704     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4705     addToPendingCount(1);
4706     new ForEachMappingTask<K,V>
4707     (this, batch >>>= 1, baseLimit = h, f, tab,
4708     action).fork();
4709     }
4710     for (Node<K,V> p; (p = advance()) != null; )
4711     action.apply(p.key, p.val);
4712 dl 1.82 propagateCompletion();
4713     }
4714 dl 1.52 }
4715     }
4716    
4717 dl 1.102 @SuppressWarnings("serial")
4718     static final class ForEachTransformedKeyTask<K,V,U>
4719     extends BulkTask<K,V,Void> {
4720 dl 1.52 final Fun<? super K, ? extends U> transformer;
4721 dl 1.102 final Action<? super U> action;
4722 dl 1.52 ForEachTransformedKeyTask
4723 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4724     Fun<? super K, ? extends U> transformer, Action<? super U> action) {
4725     super(p, b, i, f, t);
4726 dl 1.79 this.transformer = transformer; this.action = action;
4727     }
4728 dl 1.102 public final void compute() {
4729 dl 1.79 final Fun<? super K, ? extends U> transformer;
4730 dl 1.102 final Action<? super U> action;
4731 dl 1.82 if ((transformer = this.transformer) != null &&
4732     (action = this.action) != null) {
4733 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4734     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4735     addToPendingCount(1);
4736 dl 1.82 new ForEachTransformedKeyTask<K,V,U>
4737 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
4738     transformer, action).fork();
4739     }
4740     for (Node<K,V> p; (p = advance()) != null; ) {
4741     U u;
4742     if ((u = transformer.apply(p.key)) != null)
4743 dl 1.82 action.apply(u);
4744     }
4745     propagateCompletion();
4746 dl 1.52 }
4747     }
4748     }
4749    
4750 dl 1.102 @SuppressWarnings("serial")
4751     static final class ForEachTransformedValueTask<K,V,U>
4752     extends BulkTask<K,V,Void> {
4753 dl 1.52 final Fun<? super V, ? extends U> transformer;
4754 dl 1.102 final Action<? super U> action;
4755 dl 1.52 ForEachTransformedValueTask
4756 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4757     Fun<? super V, ? extends U> transformer, Action<? super U> action) {
4758     super(p, b, i, f, t);
4759 dl 1.79 this.transformer = transformer; this.action = action;
4760     }
4761 dl 1.102 public final void compute() {
4762 dl 1.79 final Fun<? super V, ? extends U> transformer;
4763 dl 1.102 final Action<? super U> action;
4764 dl 1.82 if ((transformer = this.transformer) != null &&
4765     (action = this.action) != null) {
4766 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4767     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4768     addToPendingCount(1);
4769 dl 1.82 new ForEachTransformedValueTask<K,V,U>
4770 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
4771     transformer, action).fork();
4772     }
4773     for (Node<K,V> p; (p = advance()) != null; ) {
4774     U u;
4775     if ((u = transformer.apply(p.val)) != null)
4776 dl 1.82 action.apply(u);
4777     }
4778     propagateCompletion();
4779 dl 1.52 }
4780     }
4781     }
4782    
4783 dl 1.102 @SuppressWarnings("serial")
4784     static final class ForEachTransformedEntryTask<K,V,U>
4785     extends BulkTask<K,V,Void> {
4786 dl 1.52 final Fun<Map.Entry<K,V>, ? extends U> transformer;
4787 dl 1.102 final Action<? super U> action;
4788 dl 1.52 ForEachTransformedEntryTask
4789 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4790     Fun<Map.Entry<K,V>, ? extends U> transformer, Action<? super U> action) {
4791     super(p, b, i, f, t);
4792 dl 1.79 this.transformer = transformer; this.action = action;
4793     }
4794 dl 1.102 public final void compute() {
4795 dl 1.79 final Fun<Map.Entry<K,V>, ? extends U> transformer;
4796 dl 1.102 final Action<? super U> action;
4797 dl 1.82 if ((transformer = this.transformer) != null &&
4798     (action = this.action) != null) {
4799 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4800     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4801     addToPendingCount(1);
4802 dl 1.82 new ForEachTransformedEntryTask<K,V,U>
4803 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
4804     transformer, action).fork();
4805     }
4806     for (Node<K,V> p; (p = advance()) != null; ) {
4807     U u;
4808     if ((u = transformer.apply(p)) != null)
4809 dl 1.82 action.apply(u);
4810     }
4811     propagateCompletion();
4812 dl 1.52 }
4813     }
4814     }
4815    
4816 dl 1.102 @SuppressWarnings("serial")
4817     static final class ForEachTransformedMappingTask<K,V,U>
4818     extends BulkTask<K,V,Void> {
4819 dl 1.52 final BiFun<? super K, ? super V, ? extends U> transformer;
4820 dl 1.102 final Action<? super U> action;
4821 dl 1.52 ForEachTransformedMappingTask
4822 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4823 dl 1.52 BiFun<? super K, ? super V, ? extends U> transformer,
4824 dl 1.102 Action<? super U> action) {
4825     super(p, b, i, f, t);
4826 dl 1.79 this.transformer = transformer; this.action = action;
4827 dl 1.52 }
4828 dl 1.102 public final void compute() {
4829 dl 1.79 final BiFun<? super K, ? super V, ? extends U> transformer;
4830 dl 1.102 final Action<? super U> action;
4831 dl 1.82 if ((transformer = this.transformer) != null &&
4832     (action = this.action) != null) {
4833 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4834     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4835     addToPendingCount(1);
4836 dl 1.82 new ForEachTransformedMappingTask<K,V,U>
4837 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
4838     transformer, action).fork();
4839     }
4840     for (Node<K,V> p; (p = advance()) != null; ) {
4841     U u;
4842     if ((u = transformer.apply(p.key, p.val)) != null)
4843 dl 1.82 action.apply(u);
4844     }
4845     propagateCompletion();
4846 dl 1.52 }
4847     }
4848     }
4849    
4850 dl 1.102 @SuppressWarnings("serial")
4851     static final class SearchKeysTask<K,V,U>
4852     extends BulkTask<K,V,U> {
4853 dl 1.52 final Fun<? super K, ? extends U> searchFunction;
4854     final AtomicReference<U> result;
4855     SearchKeysTask
4856 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4857 dl 1.52 Fun<? super K, ? extends U> searchFunction,
4858     AtomicReference<U> result) {
4859 dl 1.102 super(p, b, i, f, t);
4860 dl 1.52 this.searchFunction = searchFunction; this.result = result;
4861     }
4862 dl 1.79 public final U getRawResult() { return result.get(); }
4863 dl 1.102 public final void compute() {
4864 dl 1.79 final Fun<? super K, ? extends U> searchFunction;
4865     final AtomicReference<U> result;
4866 dl 1.82 if ((searchFunction = this.searchFunction) != null &&
4867     (result = this.result) != null) {
4868 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4869     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4870 dl 1.82 if (result.get() != null)
4871     return;
4872 dl 1.102 addToPendingCount(1);
4873 dl 1.82 new SearchKeysTask<K,V,U>
4874 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
4875     searchFunction, result).fork();
4876 dl 1.61 }
4877 dl 1.82 while (result.get() == null) {
4878     U u;
4879 dl 1.102 Node<K,V> p;
4880     if ((p = advance()) == null) {
4881 dl 1.82 propagateCompletion();
4882     break;
4883     }
4884 dl 1.102 if ((u = searchFunction.apply(p.key)) != null) {
4885 dl 1.82 if (result.compareAndSet(null, u))
4886     quietlyCompleteRoot();
4887     break;
4888     }
4889 dl 1.52 }
4890     }
4891     }
4892     }
4893    
4894 dl 1.102 @SuppressWarnings("serial")
4895     static final class SearchValuesTask<K,V,U>
4896     extends BulkTask<K,V,U> {
4897 dl 1.52 final Fun<? super V, ? extends U> searchFunction;
4898     final AtomicReference<U> result;
4899     SearchValuesTask
4900 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4901 dl 1.52 Fun<? super V, ? extends U> searchFunction,
4902     AtomicReference<U> result) {
4903 dl 1.102 super(p, b, i, f, t);
4904 dl 1.52 this.searchFunction = searchFunction; this.result = result;
4905     }
4906 dl 1.79 public final U getRawResult() { return result.get(); }
4907 dl 1.102 public final void compute() {
4908 dl 1.79 final Fun<? super V, ? extends U> searchFunction;
4909     final AtomicReference<U> result;
4910 dl 1.82 if ((searchFunction = this.searchFunction) != null &&
4911     (result = this.result) != null) {
4912 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4913     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4914 dl 1.82 if (result.get() != null)
4915     return;
4916 dl 1.102 addToPendingCount(1);
4917 dl 1.82 new SearchValuesTask<K,V,U>
4918 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
4919     searchFunction, result).fork();
4920 dl 1.61 }
4921 dl 1.82 while (result.get() == null) {
4922 dl 1.102 U u;
4923     Node<K,V> p;
4924     if ((p = advance()) == null) {
4925 dl 1.82 propagateCompletion();
4926     break;
4927     }
4928 dl 1.102 if ((u = searchFunction.apply(p.val)) != null) {
4929 dl 1.82 if (result.compareAndSet(null, u))
4930     quietlyCompleteRoot();
4931     break;
4932     }
4933 dl 1.52 }
4934     }
4935     }
4936     }
4937    
4938 dl 1.102 @SuppressWarnings("serial")
4939     static final class SearchEntriesTask<K,V,U>
4940     extends BulkTask<K,V,U> {
4941 dl 1.52 final Fun<Entry<K,V>, ? extends U> searchFunction;
4942     final AtomicReference<U> result;
4943     SearchEntriesTask
4944 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4945 dl 1.52 Fun<Entry<K,V>, ? extends U> searchFunction,
4946     AtomicReference<U> result) {
4947 dl 1.102 super(p, b, i, f, t);
4948 dl 1.52 this.searchFunction = searchFunction; this.result = result;
4949     }
4950 dl 1.79 public final U getRawResult() { return result.get(); }
4951 dl 1.102 public final void compute() {
4952 dl 1.79 final Fun<Entry<K,V>, ? extends U> searchFunction;
4953     final AtomicReference<U> result;
4954 dl 1.82 if ((searchFunction = this.searchFunction) != null &&
4955     (result = this.result) != null) {
4956 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4957     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4958 dl 1.82 if (result.get() != null)
4959     return;
4960 dl 1.102 addToPendingCount(1);
4961 dl 1.82 new SearchEntriesTask<K,V,U>
4962 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
4963     searchFunction, result).fork();
4964 dl 1.61 }
4965 dl 1.82 while (result.get() == null) {
4966 dl 1.102 U u;
4967     Node<K,V> p;
4968     if ((p = advance()) == null) {
4969 dl 1.82 propagateCompletion();
4970     break;
4971     }
4972 dl 1.102 if ((u = searchFunction.apply(p)) != null) {
4973 dl 1.82 if (result.compareAndSet(null, u))
4974     quietlyCompleteRoot();
4975     return;
4976     }
4977 dl 1.52 }
4978     }
4979     }
4980     }
4981    
4982 dl 1.102 @SuppressWarnings("serial")
4983     static final class SearchMappingsTask<K,V,U>
4984     extends BulkTask<K,V,U> {
4985 dl 1.52 final BiFun<? super K, ? super V, ? extends U> searchFunction;
4986     final AtomicReference<U> result;
4987     SearchMappingsTask
4988 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4989 dl 1.52 BiFun<? super K, ? super V, ? extends U> searchFunction,
4990     AtomicReference<U> result) {
4991 dl 1.102 super(p, b, i, f, t);
4992 dl 1.52 this.searchFunction = searchFunction; this.result = result;
4993     }
4994 dl 1.79 public final U getRawResult() { return result.get(); }
4995 dl 1.102 public final void compute() {
4996 dl 1.79 final BiFun<? super K, ? super V, ? extends U> searchFunction;
4997     final AtomicReference<U> result;
4998 dl 1.82 if ((searchFunction = this.searchFunction) != null &&
4999     (result = this.result) != null) {
5000 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5001     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5002 dl 1.82 if (result.get() != null)
5003     return;
5004 dl 1.102 addToPendingCount(1);
5005 dl 1.82 new SearchMappingsTask<K,V,U>
5006 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5007     searchFunction, result).fork();
5008 dl 1.61 }
5009 dl 1.82 while (result.get() == null) {
5010 dl 1.102 U u;
5011     Node<K,V> p;
5012     if ((p = advance()) == null) {
5013 dl 1.82 propagateCompletion();
5014     break;
5015     }
5016 dl 1.102 if ((u = searchFunction.apply(p.key, p.val)) != null) {
5017 dl 1.82 if (result.compareAndSet(null, u))
5018     quietlyCompleteRoot();
5019     break;
5020     }
5021 dl 1.52 }
5022     }
5023     }
5024     }
5025    
5026 dl 1.102 @SuppressWarnings("serial")
5027     static final class ReduceKeysTask<K,V>
5028     extends BulkTask<K,V,K> {
5029 dl 1.52 final BiFun<? super K, ? super K, ? extends K> reducer;
5030     K result;
5031 dl 1.61 ReduceKeysTask<K,V> rights, nextRight;
5032 dl 1.52 ReduceKeysTask
5033 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5034 dl 1.61 ReduceKeysTask<K,V> nextRight,
5035 dl 1.52 BiFun<? super K, ? super K, ? extends K> reducer) {
5036 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5037 dl 1.52 this.reducer = reducer;
5038     }
5039 dl 1.79 public final K getRawResult() { return result; }
5040 dl 1.102 public final void compute() {
5041 dl 1.82 final BiFun<? super K, ? super K, ? extends K> reducer;
5042     if ((reducer = this.reducer) != null) {
5043 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5044     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5045     addToPendingCount(1);
5046 dl 1.82 (rights = new ReduceKeysTask<K,V>
5047 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5048     rights, reducer)).fork();
5049     }
5050 dl 1.82 K r = null;
5051 dl 1.102 for (Node<K,V> p; (p = advance()) != null; ) {
5052     K u = p.key;
5053     r = (r == null) ? u : u == null ? r : reducer.apply(r, u);
5054 dl 1.82 }
5055     result = r;
5056     CountedCompleter<?> c;
5057     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5058 dl 1.102 @SuppressWarnings("unchecked") ReduceKeysTask<K,V>
5059 dl 1.82 t = (ReduceKeysTask<K,V>)c,
5060     s = t.rights;
5061     while (s != null) {
5062     K tr, sr;
5063     if ((sr = s.result) != null)
5064     t.result = (((tr = t.result) == null) ? sr :
5065     reducer.apply(tr, sr));
5066     s = t.rights = s.nextRight;
5067     }
5068 dl 1.52 }
5069 dl 1.71 }
5070 dl 1.52 }
5071     }
5072    
5073 dl 1.102 @SuppressWarnings("serial")
5074     static final class ReduceValuesTask<K,V>
5075     extends BulkTask<K,V,V> {
5076 dl 1.52 final BiFun<? super V, ? super V, ? extends V> reducer;
5077     V result;
5078 dl 1.61 ReduceValuesTask<K,V> rights, nextRight;
5079 dl 1.52 ReduceValuesTask
5080 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5081 dl 1.61 ReduceValuesTask<K,V> nextRight,
5082 dl 1.52 BiFun<? super V, ? super V, ? extends V> reducer) {
5083 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5084 dl 1.52 this.reducer = reducer;
5085     }
5086 dl 1.79 public final V getRawResult() { return result; }
5087 dl 1.102 public final void compute() {
5088 dl 1.82 final BiFun<? super V, ? super V, ? extends V> reducer;
5089     if ((reducer = this.reducer) != null) {
5090 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5091     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5092     addToPendingCount(1);
5093 dl 1.82 (rights = new ReduceValuesTask<K,V>
5094 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5095     rights, reducer)).fork();
5096     }
5097 dl 1.82 V r = null;
5098 dl 1.102 for (Node<K,V> p; (p = advance()) != null; ) {
5099     V v = p.val;
5100     r = (r == null) ? v : reducer.apply(r, v);
5101 dl 1.82 }
5102     result = r;
5103     CountedCompleter<?> c;
5104     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5105 dl 1.102 @SuppressWarnings("unchecked") ReduceValuesTask<K,V>
5106 dl 1.82 t = (ReduceValuesTask<K,V>)c,
5107     s = t.rights;
5108     while (s != null) {
5109     V tr, sr;
5110     if ((sr = s.result) != null)
5111     t.result = (((tr = t.result) == null) ? sr :
5112     reducer.apply(tr, sr));
5113     s = t.rights = s.nextRight;
5114     }
5115 dl 1.52 }
5116     }
5117     }
5118     }
5119    
5120 dl 1.102 @SuppressWarnings("serial")
5121     static final class ReduceEntriesTask<K,V>
5122     extends BulkTask<K,V,Map.Entry<K,V>> {
5123 dl 1.52 final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5124     Map.Entry<K,V> result;
5125 dl 1.61 ReduceEntriesTask<K,V> rights, nextRight;
5126 dl 1.52 ReduceEntriesTask
5127 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5128 dl 1.63 ReduceEntriesTask<K,V> nextRight,
5129 dl 1.52 BiFun<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5130 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5131 dl 1.52 this.reducer = reducer;
5132     }
5133 dl 1.79 public final Map.Entry<K,V> getRawResult() { return result; }
5134 dl 1.102 public final void compute() {
5135 dl 1.82 final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5136     if ((reducer = this.reducer) != null) {
5137 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5138     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5139     addToPendingCount(1);
5140 dl 1.82 (rights = new ReduceEntriesTask<K,V>
5141 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5142     rights, reducer)).fork();
5143     }
5144 dl 1.82 Map.Entry<K,V> r = null;
5145 dl 1.102 for (Node<K,V> p; (p = advance()) != null; )
5146     r = (r == null) ? p : reducer.apply(r, p);
5147 dl 1.82 result = r;
5148     CountedCompleter<?> c;
5149     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5150 dl 1.102 @SuppressWarnings("unchecked") ReduceEntriesTask<K,V>
5151 dl 1.82 t = (ReduceEntriesTask<K,V>)c,
5152     s = t.rights;
5153     while (s != null) {
5154     Map.Entry<K,V> tr, sr;
5155     if ((sr = s.result) != null)
5156     t.result = (((tr = t.result) == null) ? sr :
5157     reducer.apply(tr, sr));
5158     s = t.rights = s.nextRight;
5159     }
5160 dl 1.52 }
5161     }
5162     }
5163     }
5164    
5165 dl 1.102 @SuppressWarnings("serial")
5166     static final class MapReduceKeysTask<K,V,U>
5167     extends BulkTask<K,V,U> {
5168 dl 1.52 final Fun<? super K, ? extends U> transformer;
5169     final BiFun<? super U, ? super U, ? extends U> reducer;
5170     U result;
5171 dl 1.61 MapReduceKeysTask<K,V,U> rights, nextRight;
5172 dl 1.52 MapReduceKeysTask
5173 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5174 dl 1.61 MapReduceKeysTask<K,V,U> nextRight,
5175 dl 1.52 Fun<? super K, ? extends U> transformer,
5176     BiFun<? super U, ? super U, ? extends U> reducer) {
5177 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5178 dl 1.52 this.transformer = transformer;
5179     this.reducer = reducer;
5180     }
5181 dl 1.79 public final U getRawResult() { return result; }
5182 dl 1.102 public final void compute() {
5183 dl 1.82 final Fun<? super K, ? extends U> transformer;
5184     final BiFun<? super U, ? super U, ? extends U> reducer;
5185     if ((transformer = this.transformer) != null &&
5186     (reducer = this.reducer) != null) {
5187 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5188     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5189     addToPendingCount(1);
5190 dl 1.82 (rights = new MapReduceKeysTask<K,V,U>
5191 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5192     rights, transformer, reducer)).fork();
5193     }
5194     U r = null;
5195     for (Node<K,V> p; (p = advance()) != null; ) {
5196     U u;
5197     if ((u = transformer.apply(p.key)) != null)
5198 dl 1.82 r = (r == null) ? u : reducer.apply(r, u);
5199     }
5200     result = r;
5201     CountedCompleter<?> c;
5202     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5203 dl 1.102 @SuppressWarnings("unchecked") MapReduceKeysTask<K,V,U>
5204 dl 1.82 t = (MapReduceKeysTask<K,V,U>)c,
5205     s = t.rights;
5206     while (s != null) {
5207     U tr, sr;
5208     if ((sr = s.result) != null)
5209     t.result = (((tr = t.result) == null) ? sr :
5210     reducer.apply(tr, sr));
5211     s = t.rights = s.nextRight;
5212     }
5213 dl 1.52 }
5214     }
5215     }
5216     }
5217    
5218 dl 1.102 @SuppressWarnings("serial")
5219     static final class MapReduceValuesTask<K,V,U>
5220     extends BulkTask<K,V,U> {
5221 dl 1.52 final Fun<? super V, ? extends U> transformer;
5222     final BiFun<? super U, ? super U, ? extends U> reducer;
5223     U result;
5224 dl 1.61 MapReduceValuesTask<K,V,U> rights, nextRight;
5225 dl 1.52 MapReduceValuesTask
5226 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5227 dl 1.61 MapReduceValuesTask<K,V,U> nextRight,
5228 dl 1.52 Fun<? super V, ? extends U> transformer,
5229     BiFun<? super U, ? super U, ? extends U> reducer) {
5230 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5231 dl 1.52 this.transformer = transformer;
5232     this.reducer = reducer;
5233     }
5234 dl 1.79 public final U getRawResult() { return result; }
5235 dl 1.102 public final void compute() {
5236 dl 1.82 final Fun<? super V, ? extends U> transformer;
5237     final BiFun<? super U, ? super U, ? extends U> reducer;
5238     if ((transformer = this.transformer) != null &&
5239     (reducer = this.reducer) != null) {
5240 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5241     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5242     addToPendingCount(1);
5243 dl 1.82 (rights = new MapReduceValuesTask<K,V,U>
5244 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5245     rights, transformer, reducer)).fork();
5246     }
5247     U r = null;
5248     for (Node<K,V> p; (p = advance()) != null; ) {
5249     U u;
5250     if ((u = transformer.apply(p.val)) != null)
5251 dl 1.82 r = (r == null) ? u : reducer.apply(r, u);
5252     }
5253     result = r;
5254     CountedCompleter<?> c;
5255     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5256 dl 1.102 @SuppressWarnings("unchecked") MapReduceValuesTask<K,V,U>
5257 dl 1.82 t = (MapReduceValuesTask<K,V,U>)c,
5258     s = t.rights;
5259     while (s != null) {
5260     U tr, sr;
5261     if ((sr = s.result) != null)
5262     t.result = (((tr = t.result) == null) ? sr :
5263     reducer.apply(tr, sr));
5264     s = t.rights = s.nextRight;
5265     }
5266 dl 1.52 }
5267 dl 1.71 }
5268 dl 1.52 }
5269     }
5270    
5271 dl 1.102 @SuppressWarnings("serial")
5272     static final class MapReduceEntriesTask<K,V,U>
5273     extends BulkTask<K,V,U> {
5274 dl 1.52 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5275     final BiFun<? super U, ? super U, ? extends U> reducer;
5276     U result;
5277 dl 1.61 MapReduceEntriesTask<K,V,U> rights, nextRight;
5278 dl 1.52 MapReduceEntriesTask
5279 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5280 dl 1.61 MapReduceEntriesTask<K,V,U> nextRight,
5281 dl 1.52 Fun<Map.Entry<K,V>, ? extends U> transformer,
5282     BiFun<? super U, ? super U, ? extends U> reducer) {
5283 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5284 dl 1.52 this.transformer = transformer;
5285     this.reducer = reducer;
5286     }
5287 dl 1.79 public final U getRawResult() { return result; }
5288 dl 1.102 public final void compute() {
5289 dl 1.82 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5290     final BiFun<? super U, ? super U, ? extends U> reducer;
5291     if ((transformer = this.transformer) != null &&
5292     (reducer = this.reducer) != null) {
5293 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5294     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5295     addToPendingCount(1);
5296 dl 1.82 (rights = new MapReduceEntriesTask<K,V,U>
5297 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5298     rights, transformer, reducer)).fork();
5299     }
5300     U r = null;
5301     for (Node<K,V> p; (p = advance()) != null; ) {
5302     U u;
5303     if ((u = transformer.apply(p)) != null)
5304 dl 1.82 r = (r == null) ? u : reducer.apply(r, u);
5305     }
5306     result = r;
5307     CountedCompleter<?> c;
5308     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5309 dl 1.102 @SuppressWarnings("unchecked") MapReduceEntriesTask<K,V,U>
5310 dl 1.82 t = (MapReduceEntriesTask<K,V,U>)c,
5311     s = t.rights;
5312     while (s != null) {
5313     U tr, sr;
5314     if ((sr = s.result) != null)
5315     t.result = (((tr = t.result) == null) ? sr :
5316     reducer.apply(tr, sr));
5317     s = t.rights = s.nextRight;
5318     }
5319 dl 1.52 }
5320     }
5321     }
5322     }
5323    
5324 dl 1.102 @SuppressWarnings("serial")
5325     static final class MapReduceMappingsTask<K,V,U>
5326     extends BulkTask<K,V,U> {
5327 dl 1.52 final BiFun<? super K, ? super V, ? extends U> transformer;
5328     final BiFun<? super U, ? super U, ? extends U> reducer;
5329     U result;
5330 dl 1.61 MapReduceMappingsTask<K,V,U> rights, nextRight;
5331 dl 1.52 MapReduceMappingsTask
5332 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5333 dl 1.61 MapReduceMappingsTask<K,V,U> nextRight,
5334 dl 1.52 BiFun<? super K, ? super V, ? extends U> transformer,
5335     BiFun<? super U, ? super U, ? extends U> reducer) {
5336 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5337 dl 1.52 this.transformer = transformer;
5338     this.reducer = reducer;
5339     }
5340 dl 1.79 public final U getRawResult() { return result; }
5341 dl 1.102 public final void compute() {
5342 dl 1.82 final BiFun<? super K, ? super V, ? extends U> transformer;
5343     final BiFun<? super U, ? super U, ? extends U> reducer;
5344     if ((transformer = this.transformer) != null &&
5345     (reducer = this.reducer) != null) {
5346 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5347     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5348     addToPendingCount(1);
5349 dl 1.82 (rights = new MapReduceMappingsTask<K,V,U>
5350 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5351     rights, transformer, reducer)).fork();
5352     }
5353     U r = null;
5354     for (Node<K,V> p; (p = advance()) != null; ) {
5355     U u;
5356     if ((u = transformer.apply(p.key, p.val)) != null)
5357 dl 1.82 r = (r == null) ? u : reducer.apply(r, u);
5358     }
5359     result = r;
5360     CountedCompleter<?> c;
5361     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5362 dl 1.102 @SuppressWarnings("unchecked") MapReduceMappingsTask<K,V,U>
5363 dl 1.82 t = (MapReduceMappingsTask<K,V,U>)c,
5364     s = t.rights;
5365     while (s != null) {
5366     U tr, sr;
5367     if ((sr = s.result) != null)
5368     t.result = (((tr = t.result) == null) ? sr :
5369     reducer.apply(tr, sr));
5370     s = t.rights = s.nextRight;
5371     }
5372 dl 1.52 }
5373 dl 1.71 }
5374 dl 1.52 }
5375     }
5376    
5377 dl 1.102 @SuppressWarnings("serial")
5378     static final class MapReduceKeysToDoubleTask<K,V>
5379     extends BulkTask<K,V,Double> {
5380 dl 1.52 final ObjectToDouble<? super K> transformer;
5381     final DoubleByDoubleToDouble reducer;
5382     final double basis;
5383     double result;
5384 dl 1.61 MapReduceKeysToDoubleTask<K,V> rights, nextRight;
5385 dl 1.52 MapReduceKeysToDoubleTask
5386 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5387 dl 1.61 MapReduceKeysToDoubleTask<K,V> nextRight,
5388 dl 1.52 ObjectToDouble<? super K> transformer,
5389     double basis,
5390     DoubleByDoubleToDouble reducer) {
5391 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5392 dl 1.52 this.transformer = transformer;
5393     this.basis = basis; this.reducer = reducer;
5394     }
5395 dl 1.79 public final Double getRawResult() { return result; }
5396 dl 1.102 public final void compute() {
5397 dl 1.82 final ObjectToDouble<? super K> transformer;
5398     final DoubleByDoubleToDouble reducer;
5399     if ((transformer = this.transformer) != null &&
5400     (reducer = this.reducer) != null) {
5401     double r = this.basis;
5402 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5403     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5404     addToPendingCount(1);
5405 dl 1.82 (rights = new MapReduceKeysToDoubleTask<K,V>
5406 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5407     rights, transformer, r, reducer)).fork();
5408     }
5409     for (Node<K,V> p; (p = advance()) != null; )
5410     r = reducer.apply(r, transformer.apply(p.key));
5411 dl 1.82 result = r;
5412     CountedCompleter<?> c;
5413     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5414 dl 1.102 @SuppressWarnings("unchecked") MapReduceKeysToDoubleTask<K,V>
5415 dl 1.82 t = (MapReduceKeysToDoubleTask<K,V>)c,
5416     s = t.rights;
5417     while (s != null) {
5418     t.result = reducer.apply(t.result, s.result);
5419     s = t.rights = s.nextRight;
5420     }
5421 dl 1.52 }
5422 dl 1.71 }
5423 dl 1.52 }
5424     }
5425    
5426 dl 1.102 @SuppressWarnings("serial")
5427     static final class MapReduceValuesToDoubleTask<K,V>
5428     extends BulkTask<K,V,Double> {
5429 dl 1.52 final ObjectToDouble<? super V> transformer;
5430     final DoubleByDoubleToDouble reducer;
5431     final double basis;
5432     double result;
5433 dl 1.61 MapReduceValuesToDoubleTask<K,V> rights, nextRight;
5434 dl 1.52 MapReduceValuesToDoubleTask
5435 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5436 dl 1.61 MapReduceValuesToDoubleTask<K,V> nextRight,
5437 dl 1.52 ObjectToDouble<? super V> transformer,
5438     double basis,
5439     DoubleByDoubleToDouble reducer) {
5440 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5441 dl 1.52 this.transformer = transformer;
5442     this.basis = basis; this.reducer = reducer;
5443     }
5444 dl 1.79 public final Double getRawResult() { return result; }
5445 dl 1.102 public final void compute() {
5446 dl 1.82 final ObjectToDouble<? super V> transformer;
5447     final DoubleByDoubleToDouble reducer;
5448     if ((transformer = this.transformer) != null &&
5449     (reducer = this.reducer) != null) {
5450     double r = this.basis;
5451 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5452     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5453     addToPendingCount(1);
5454 dl 1.82 (rights = new MapReduceValuesToDoubleTask<K,V>
5455 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5456     rights, transformer, r, reducer)).fork();
5457     }
5458     for (Node<K,V> p; (p = advance()) != null; )
5459     r = reducer.apply(r, transformer.apply(p.val));
5460 dl 1.82 result = r;
5461     CountedCompleter<?> c;
5462     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5463 dl 1.102 @SuppressWarnings("unchecked") MapReduceValuesToDoubleTask<K,V>
5464 dl 1.82 t = (MapReduceValuesToDoubleTask<K,V>)c,
5465     s = t.rights;
5466     while (s != null) {
5467     t.result = reducer.apply(t.result, s.result);
5468     s = t.rights = s.nextRight;
5469     }
5470 dl 1.52 }
5471 dl 1.71 }
5472 dl 1.52 }
5473     }
5474    
5475 dl 1.102 @SuppressWarnings("serial")
5476     static final class MapReduceEntriesToDoubleTask<K,V>
5477     extends BulkTask<K,V,Double> {
5478 dl 1.52 final ObjectToDouble<Map.Entry<K,V>> transformer;
5479     final DoubleByDoubleToDouble reducer;
5480     final double basis;
5481     double result;
5482 dl 1.61 MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
5483 dl 1.52 MapReduceEntriesToDoubleTask
5484 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5485 dl 1.61 MapReduceEntriesToDoubleTask<K,V> nextRight,
5486 dl 1.52 ObjectToDouble<Map.Entry<K,V>> transformer,
5487     double basis,
5488     DoubleByDoubleToDouble reducer) {
5489 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5490 dl 1.52 this.transformer = transformer;
5491     this.basis = basis; this.reducer = reducer;
5492     }
5493 dl 1.79 public final Double getRawResult() { return result; }
5494 dl 1.102 public final void compute() {
5495 dl 1.82 final ObjectToDouble<Map.Entry<K,V>> transformer;
5496     final DoubleByDoubleToDouble reducer;
5497     if ((transformer = this.transformer) != null &&
5498     (reducer = this.reducer) != null) {
5499     double r = this.basis;
5500 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5501     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5502     addToPendingCount(1);
5503 dl 1.82 (rights = new MapReduceEntriesToDoubleTask<K,V>
5504 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5505     rights, transformer, r, reducer)).fork();
5506     }
5507     for (Node<K,V> p; (p = advance()) != null; )
5508     r = reducer.apply(r, transformer.apply(p));
5509 dl 1.82 result = r;
5510     CountedCompleter<?> c;
5511     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5512 dl 1.102 @SuppressWarnings("unchecked") MapReduceEntriesToDoubleTask<K,V>
5513 dl 1.82 t = (MapReduceEntriesToDoubleTask<K,V>)c,
5514     s = t.rights;
5515     while (s != null) {
5516     t.result = reducer.apply(t.result, s.result);
5517     s = t.rights = s.nextRight;
5518     }
5519 dl 1.52 }
5520 dl 1.71 }
5521 dl 1.52 }
5522     }
5523    
5524 dl 1.102 @SuppressWarnings("serial")
5525     static final class MapReduceMappingsToDoubleTask<K,V>
5526     extends BulkTask<K,V,Double> {
5527 dl 1.52 final ObjectByObjectToDouble<? super K, ? super V> transformer;
5528     final DoubleByDoubleToDouble reducer;
5529     final double basis;
5530     double result;
5531 dl 1.61 MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
5532 dl 1.52 MapReduceMappingsToDoubleTask
5533 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5534 dl 1.61 MapReduceMappingsToDoubleTask<K,V> nextRight,
5535 dl 1.52 ObjectByObjectToDouble<? super K, ? super V> transformer,
5536     double basis,
5537     DoubleByDoubleToDouble reducer) {
5538 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5539 dl 1.52 this.transformer = transformer;
5540     this.basis = basis; this.reducer = reducer;
5541     }
5542 dl 1.79 public final Double getRawResult() { return result; }
5543 dl 1.102 public final void compute() {
5544 dl 1.82 final ObjectByObjectToDouble<? super K, ? super V> transformer;
5545     final DoubleByDoubleToDouble reducer;
5546     if ((transformer = this.transformer) != null &&
5547     (reducer = this.reducer) != null) {
5548     double r = this.basis;
5549 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5550     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5551     addToPendingCount(1);
5552 dl 1.82 (rights = new MapReduceMappingsToDoubleTask<K,V>
5553 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5554     rights, transformer, r, reducer)).fork();
5555     }
5556     for (Node<K,V> p; (p = advance()) != null; )
5557     r = reducer.apply(r, transformer.apply(p.key, p.val));
5558 dl 1.82 result = r;
5559     CountedCompleter<?> c;
5560     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5561 dl 1.102 @SuppressWarnings("unchecked") MapReduceMappingsToDoubleTask<K,V>
5562 dl 1.82 t = (MapReduceMappingsToDoubleTask<K,V>)c,
5563     s = t.rights;
5564     while (s != null) {
5565     t.result = reducer.apply(t.result, s.result);
5566     s = t.rights = s.nextRight;
5567     }
5568 dl 1.52 }
5569 dl 1.71 }
5570 dl 1.52 }
5571     }
5572    
5573 dl 1.102 @SuppressWarnings("serial")
5574     static final class MapReduceKeysToLongTask<K,V>
5575     extends BulkTask<K,V,Long> {
5576 dl 1.52 final ObjectToLong<? super K> transformer;
5577     final LongByLongToLong reducer;
5578     final long basis;
5579     long result;
5580 dl 1.61 MapReduceKeysToLongTask<K,V> rights, nextRight;
5581 dl 1.52 MapReduceKeysToLongTask
5582 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5583 dl 1.61 MapReduceKeysToLongTask<K,V> nextRight,
5584 dl 1.52 ObjectToLong<? super K> transformer,
5585     long basis,
5586     LongByLongToLong reducer) {
5587 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5588 dl 1.52 this.transformer = transformer;
5589     this.basis = basis; this.reducer = reducer;
5590     }
5591 dl 1.79 public final Long getRawResult() { return result; }
5592 dl 1.102 public final void compute() {
5593 dl 1.82 final ObjectToLong<? super K> transformer;
5594     final LongByLongToLong reducer;
5595     if ((transformer = this.transformer) != null &&
5596     (reducer = this.reducer) != null) {
5597     long r = this.basis;
5598 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5599     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5600     addToPendingCount(1);
5601 dl 1.82 (rights = new MapReduceKeysToLongTask<K,V>
5602 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5603     rights, transformer, r, reducer)).fork();
5604     }
5605     for (Node<K,V> p; (p = advance()) != null; )
5606     r = reducer.apply(r, transformer.apply(p.key));
5607 dl 1.82 result = r;
5608     CountedCompleter<?> c;
5609     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5610 dl 1.102 @SuppressWarnings("unchecked") MapReduceKeysToLongTask<K,V>
5611 dl 1.82 t = (MapReduceKeysToLongTask<K,V>)c,
5612     s = t.rights;
5613     while (s != null) {
5614     t.result = reducer.apply(t.result, s.result);
5615     s = t.rights = s.nextRight;
5616     }
5617 dl 1.52 }
5618 dl 1.71 }
5619 dl 1.52 }
5620     }
5621    
5622 dl 1.102 @SuppressWarnings("serial")
5623     static final class MapReduceValuesToLongTask<K,V>
5624     extends BulkTask<K,V,Long> {
5625 dl 1.52 final ObjectToLong<? super V> transformer;
5626     final LongByLongToLong reducer;
5627     final long basis;
5628     long result;
5629 dl 1.61 MapReduceValuesToLongTask<K,V> rights, nextRight;
5630 dl 1.52 MapReduceValuesToLongTask
5631 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5632 dl 1.61 MapReduceValuesToLongTask<K,V> nextRight,
5633 dl 1.52 ObjectToLong<? super V> transformer,
5634     long basis,
5635     LongByLongToLong reducer) {
5636 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5637 dl 1.52 this.transformer = transformer;
5638     this.basis = basis; this.reducer = reducer;
5639     }
5640 dl 1.79 public final Long getRawResult() { return result; }
5641 dl 1.102 public final void compute() {
5642 dl 1.82 final ObjectToLong<? super V> transformer;
5643     final LongByLongToLong reducer;
5644     if ((transformer = this.transformer) != null &&
5645     (reducer = this.reducer) != null) {
5646     long r = this.basis;
5647 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5648     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5649     addToPendingCount(1);
5650 dl 1.82 (rights = new MapReduceValuesToLongTask<K,V>
5651 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5652     rights, transformer, r, reducer)).fork();
5653     }
5654     for (Node<K,V> p; (p = advance()) != null; )
5655     r = reducer.apply(r, transformer.apply(p.val));
5656 dl 1.82 result = r;
5657     CountedCompleter<?> c;
5658     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5659 dl 1.102 @SuppressWarnings("unchecked") MapReduceValuesToLongTask<K,V>
5660 dl 1.82 t = (MapReduceValuesToLongTask<K,V>)c,
5661     s = t.rights;
5662     while (s != null) {
5663     t.result = reducer.apply(t.result, s.result);
5664     s = t.rights = s.nextRight;
5665     }
5666 dl 1.52 }
5667 dl 1.71 }
5668 dl 1.52 }
5669     }
5670    
5671 dl 1.102 @SuppressWarnings("serial")
5672     static final class MapReduceEntriesToLongTask<K,V>
5673     extends BulkTask<K,V,Long> {
5674 dl 1.52 final ObjectToLong<Map.Entry<K,V>> transformer;
5675     final LongByLongToLong reducer;
5676     final long basis;
5677     long result;
5678 dl 1.61 MapReduceEntriesToLongTask<K,V> rights, nextRight;
5679 dl 1.52 MapReduceEntriesToLongTask
5680 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5681 dl 1.61 MapReduceEntriesToLongTask<K,V> nextRight,
5682 dl 1.52 ObjectToLong<Map.Entry<K,V>> transformer,
5683     long basis,
5684     LongByLongToLong reducer) {
5685 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5686 dl 1.52 this.transformer = transformer;
5687     this.basis = basis; this.reducer = reducer;
5688     }
5689 dl 1.79 public final Long getRawResult() { return result; }
5690 dl 1.102 public final void compute() {
5691 dl 1.82 final ObjectToLong<Map.Entry<K,V>> transformer;
5692     final LongByLongToLong reducer;
5693     if ((transformer = this.transformer) != null &&
5694     (reducer = this.reducer) != null) {
5695     long r = this.basis;
5696 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5697     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5698     addToPendingCount(1);
5699 dl 1.82 (rights = new MapReduceEntriesToLongTask<K,V>
5700 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5701     rights, transformer, r, reducer)).fork();
5702     }
5703     for (Node<K,V> p; (p = advance()) != null; )
5704     r = reducer.apply(r, transformer.apply(p));
5705 dl 1.82 result = r;
5706     CountedCompleter<?> c;
5707     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5708 dl 1.102 @SuppressWarnings("unchecked") MapReduceEntriesToLongTask<K,V>
5709 dl 1.82 t = (MapReduceEntriesToLongTask<K,V>)c,
5710     s = t.rights;
5711     while (s != null) {
5712     t.result = reducer.apply(t.result, s.result);
5713     s = t.rights = s.nextRight;
5714     }
5715 dl 1.52 }
5716     }
5717     }
5718     }
5719    
5720 dl 1.102 @SuppressWarnings("serial")
5721     static final class MapReduceMappingsToLongTask<K,V>
5722     extends BulkTask<K,V,Long> {
5723 dl 1.52 final ObjectByObjectToLong<? super K, ? super V> transformer;
5724     final LongByLongToLong reducer;
5725     final long basis;
5726     long result;
5727 dl 1.61 MapReduceMappingsToLongTask<K,V> rights, nextRight;
5728 dl 1.52 MapReduceMappingsToLongTask
5729 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5730 dl 1.61 MapReduceMappingsToLongTask<K,V> nextRight,
5731 dl 1.52 ObjectByObjectToLong<? super K, ? super V> transformer,
5732     long basis,
5733     LongByLongToLong reducer) {
5734 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5735 dl 1.52 this.transformer = transformer;
5736     this.basis = basis; this.reducer = reducer;
5737     }
5738 dl 1.79 public final Long getRawResult() { return result; }
5739 dl 1.102 public final void compute() {
5740 dl 1.82 final ObjectByObjectToLong<? super K, ? super V> transformer;
5741     final LongByLongToLong reducer;
5742     if ((transformer = this.transformer) != null &&
5743     (reducer = this.reducer) != null) {
5744     long r = this.basis;
5745 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5746     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5747     addToPendingCount(1);
5748 dl 1.82 (rights = new MapReduceMappingsToLongTask<K,V>
5749 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5750     rights, transformer, r, reducer)).fork();
5751     }
5752     for (Node<K,V> p; (p = advance()) != null; )
5753     r = reducer.apply(r, transformer.apply(p.key, p.val));
5754 dl 1.82 result = r;
5755     CountedCompleter<?> c;
5756     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5757 dl 1.102 @SuppressWarnings("unchecked") MapReduceMappingsToLongTask<K,V>
5758 dl 1.82 t = (MapReduceMappingsToLongTask<K,V>)c,
5759     s = t.rights;
5760     while (s != null) {
5761     t.result = reducer.apply(t.result, s.result);
5762     s = t.rights = s.nextRight;
5763     }
5764 dl 1.52 }
5765 dl 1.71 }
5766 dl 1.52 }
5767     }
5768    
5769 dl 1.102 @SuppressWarnings("serial")
5770     static final class MapReduceKeysToIntTask<K,V>
5771     extends BulkTask<K,V,Integer> {
5772 dl 1.52 final ObjectToInt<? super K> transformer;
5773     final IntByIntToInt reducer;
5774     final int basis;
5775     int result;
5776 dl 1.61 MapReduceKeysToIntTask<K,V> rights, nextRight;
5777 dl 1.52 MapReduceKeysToIntTask
5778 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5779 dl 1.61 MapReduceKeysToIntTask<K,V> nextRight,
5780 dl 1.52 ObjectToInt<? super K> transformer,
5781     int basis,
5782     IntByIntToInt reducer) {
5783 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5784 dl 1.52 this.transformer = transformer;
5785     this.basis = basis; this.reducer = reducer;
5786     }
5787 dl 1.79 public final Integer getRawResult() { return result; }
5788 dl 1.102 public final void compute() {
5789 dl 1.82 final ObjectToInt<? super K> transformer;
5790     final IntByIntToInt reducer;
5791     if ((transformer = this.transformer) != null &&
5792     (reducer = this.reducer) != null) {
5793     int r = this.basis;
5794 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5795     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5796     addToPendingCount(1);
5797 dl 1.82 (rights = new MapReduceKeysToIntTask<K,V>
5798 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5799     rights, transformer, r, reducer)).fork();
5800     }
5801     for (Node<K,V> p; (p = advance()) != null; )
5802     r = reducer.apply(r, transformer.apply(p.key));
5803 dl 1.82 result = r;
5804     CountedCompleter<?> c;
5805     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5806 dl 1.102 @SuppressWarnings("unchecked") MapReduceKeysToIntTask<K,V>
5807 dl 1.82 t = (MapReduceKeysToIntTask<K,V>)c,
5808     s = t.rights;
5809     while (s != null) {
5810     t.result = reducer.apply(t.result, s.result);
5811     s = t.rights = s.nextRight;
5812     }
5813 dl 1.52 }
5814 dl 1.71 }
5815 dl 1.52 }
5816     }
5817    
5818 dl 1.102 @SuppressWarnings("serial")
5819     static final class MapReduceValuesToIntTask<K,V>
5820     extends BulkTask<K,V,Integer> {
5821 dl 1.52 final ObjectToInt<? super V> transformer;
5822     final IntByIntToInt reducer;
5823     final int basis;
5824     int result;
5825 dl 1.61 MapReduceValuesToIntTask<K,V> rights, nextRight;
5826 dl 1.52 MapReduceValuesToIntTask
5827 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5828 dl 1.61 MapReduceValuesToIntTask<K,V> nextRight,
5829 dl 1.52 ObjectToInt<? super V> transformer,
5830     int basis,
5831     IntByIntToInt reducer) {
5832 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5833 dl 1.52 this.transformer = transformer;
5834     this.basis = basis; this.reducer = reducer;
5835     }
5836 dl 1.79 public final Integer getRawResult() { return result; }
5837 dl 1.102 public final void compute() {
5838 dl 1.82 final ObjectToInt<? super V> transformer;
5839     final IntByIntToInt reducer;
5840     if ((transformer = this.transformer) != null &&
5841     (reducer = this.reducer) != null) {
5842     int r = this.basis;
5843 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5844     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5845     addToPendingCount(1);
5846 dl 1.82 (rights = new MapReduceValuesToIntTask<K,V>
5847 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5848     rights, transformer, r, reducer)).fork();
5849     }
5850     for (Node<K,V> p; (p = advance()) != null; )
5851     r = reducer.apply(r, transformer.apply(p.val));
5852 dl 1.82 result = r;
5853     CountedCompleter<?> c;
5854     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5855 dl 1.102 @SuppressWarnings("unchecked") MapReduceValuesToIntTask<K,V>
5856 dl 1.82 t = (MapReduceValuesToIntTask<K,V>)c,
5857     s = t.rights;
5858     while (s != null) {
5859     t.result = reducer.apply(t.result, s.result);
5860     s = t.rights = s.nextRight;
5861     }
5862 dl 1.52 }
5863 dl 1.71 }
5864 dl 1.52 }
5865     }
5866    
5867 dl 1.102 @SuppressWarnings("serial")
5868     static final class MapReduceEntriesToIntTask<K,V>
5869     extends BulkTask<K,V,Integer> {
5870 dl 1.52 final ObjectToInt<Map.Entry<K,V>> transformer;
5871     final IntByIntToInt reducer;
5872     final int basis;
5873     int result;
5874 dl 1.61 MapReduceEntriesToIntTask<K,V> rights, nextRight;
5875 dl 1.52 MapReduceEntriesToIntTask
5876 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5877 dl 1.61 MapReduceEntriesToIntTask<K,V> nextRight,
5878 dl 1.52 ObjectToInt<Map.Entry<K,V>> transformer,
5879     int basis,
5880     IntByIntToInt reducer) {
5881 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5882 dl 1.52 this.transformer = transformer;
5883     this.basis = basis; this.reducer = reducer;
5884     }
5885 dl 1.79 public final Integer getRawResult() { return result; }
5886 dl 1.102 public final void compute() {
5887 dl 1.82 final ObjectToInt<Map.Entry<K,V>> transformer;
5888     final IntByIntToInt reducer;
5889     if ((transformer = this.transformer) != null &&
5890     (reducer = this.reducer) != null) {
5891     int r = this.basis;
5892 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5893     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5894     addToPendingCount(1);
5895 dl 1.82 (rights = new MapReduceEntriesToIntTask<K,V>
5896 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5897     rights, transformer, r, reducer)).fork();
5898     }
5899     for (Node<K,V> p; (p = advance()) != null; )
5900     r = reducer.apply(r, transformer.apply(p));
5901 dl 1.82 result = r;
5902     CountedCompleter<?> c;
5903     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5904 dl 1.102 @SuppressWarnings("unchecked") MapReduceEntriesToIntTask<K,V>
5905 dl 1.82 t = (MapReduceEntriesToIntTask<K,V>)c,
5906     s = t.rights;
5907     while (s != null) {
5908     t.result = reducer.apply(t.result, s.result);
5909     s = t.rights = s.nextRight;
5910     }
5911 dl 1.52 }
5912     }
5913     }
5914     }
5915    
5916 dl 1.102 @SuppressWarnings("serial")
5917     static final class MapReduceMappingsToIntTask<K,V>
5918     extends BulkTask<K,V,Integer> {
5919 dl 1.52 final ObjectByObjectToInt<? super K, ? super V> transformer;
5920     final IntByIntToInt reducer;
5921     final int basis;
5922     int result;
5923 dl 1.61 MapReduceMappingsToIntTask<K,V> rights, nextRight;
5924 dl 1.52 MapReduceMappingsToIntTask
5925 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5926 dl 1.79 MapReduceMappingsToIntTask<K,V> nextRight,
5927 dl 1.52 ObjectByObjectToInt<? super K, ? super V> transformer,
5928     int basis,
5929     IntByIntToInt reducer) {
5930 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5931 dl 1.52 this.transformer = transformer;
5932     this.basis = basis; this.reducer = reducer;
5933     }
5934 dl 1.79 public final Integer getRawResult() { return result; }
5935 dl 1.102 public final void compute() {
5936 dl 1.82 final ObjectByObjectToInt<? super K, ? super V> transformer;
5937     final IntByIntToInt reducer;
5938     if ((transformer = this.transformer) != null &&
5939     (reducer = this.reducer) != null) {
5940     int r = this.basis;
5941 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5942     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5943     addToPendingCount(1);
5944 dl 1.82 (rights = new MapReduceMappingsToIntTask<K,V>
5945 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5946     rights, transformer, r, reducer)).fork();
5947     }
5948     for (Node<K,V> p; (p = advance()) != null; )
5949     r = reducer.apply(r, transformer.apply(p.key, p.val));
5950 dl 1.82 result = r;
5951     CountedCompleter<?> c;
5952     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5953 dl 1.102 @SuppressWarnings("unchecked") MapReduceMappingsToIntTask<K,V>
5954 dl 1.82 t = (MapReduceMappingsToIntTask<K,V>)c,
5955     s = t.rights;
5956     while (s != null) {
5957     t.result = reducer.apply(t.result, s.result);
5958     s = t.rights = s.nextRight;
5959     }
5960 dl 1.52 }
5961     }
5962     }
5963     }
5964    
5965 dl 1.102 /* ---------------- Counters -------------- */
5966    
5967     // Adapted from LongAdder and Striped64.
5968     // See their internal docs for explanation.
5969    
5970     // A padded cell for distributing counts
5971     static final class CounterCell {
5972     volatile long p0, p1, p2, p3, p4, p5, p6;
5973     volatile long value;
5974     volatile long q0, q1, q2, q3, q4, q5, q6;
5975     CounterCell(long x) { value = x; }
5976     }
5977    
5978     /**
5979     * Holder for the thread-local hash code determining which
5980     * CounterCell to use. The code is initialized via the
5981     * counterHashCodeGenerator, but may be moved upon collisions.
5982     */
5983     static final class CounterHashCode {
5984     int code;
5985     }
5986    
5987     /**
5988     * Generates initial value for per-thread CounterHashCodes
5989     */
5990     static final AtomicInteger counterHashCodeGenerator = new AtomicInteger();
5991    
5992     /**
5993     * Increment for counterHashCodeGenerator. See class ThreadLocal
5994     * for explanation.
5995     */
5996     static final int SEED_INCREMENT = 0x61c88647;
5997    
5998     /**
5999     * Per-thread counter hash codes. Shared across all instances.
6000     */
6001     static final ThreadLocal<CounterHashCode> threadCounterHashCode =
6002     new ThreadLocal<CounterHashCode>();
6003    
6004    
6005     final long sumCount() {
6006     CounterCell[] as = counterCells; CounterCell a;
6007     long sum = baseCount;
6008     if (as != null) {
6009     for (int i = 0; i < as.length; ++i) {
6010     if ((a = as[i]) != null)
6011     sum += a.value;
6012     }
6013     }
6014     return sum;
6015     }
6016    
6017     // See LongAdder version for explanation
6018     private final void fullAddCount(long x, CounterHashCode hc,
6019     boolean wasUncontended) {
6020     int h;
6021     if (hc == null) {
6022     hc = new CounterHashCode();
6023     int s = counterHashCodeGenerator.addAndGet(SEED_INCREMENT);
6024     h = hc.code = (s == 0) ? 1 : s; // Avoid zero
6025     threadCounterHashCode.set(hc);
6026     }
6027     else
6028     h = hc.code;
6029     boolean collide = false; // True if last slot nonempty
6030     for (;;) {
6031     CounterCell[] as; CounterCell a; int n; long v;
6032     if ((as = counterCells) != null && (n = as.length) > 0) {
6033     if ((a = as[(n - 1) & h]) == null) {
6034     if (cellsBusy == 0) { // Try to attach new Cell
6035     CounterCell r = new CounterCell(x); // Optimistic create
6036     if (cellsBusy == 0 &&
6037     U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
6038     boolean created = false;
6039     try { // Recheck under lock
6040     CounterCell[] rs; int m, j;
6041     if ((rs = counterCells) != null &&
6042     (m = rs.length) > 0 &&
6043     rs[j = (m - 1) & h] == null) {
6044     rs[j] = r;
6045     created = true;
6046     }
6047     } finally {
6048     cellsBusy = 0;
6049     }
6050     if (created)
6051     break;
6052     continue; // Slot is now non-empty
6053     }
6054     }
6055     collide = false;
6056     }
6057     else if (!wasUncontended) // CAS already known to fail
6058     wasUncontended = true; // Continue after rehash
6059     else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))
6060     break;
6061     else if (counterCells != as || n >= NCPU)
6062     collide = false; // At max size or stale
6063     else if (!collide)
6064     collide = true;
6065     else if (cellsBusy == 0 &&
6066     U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
6067     try {
6068     if (counterCells == as) {// Expand table unless stale
6069     CounterCell[] rs = new CounterCell[n << 1];
6070     for (int i = 0; i < n; ++i)
6071     rs[i] = as[i];
6072     counterCells = rs;
6073     }
6074     } finally {
6075     cellsBusy = 0;
6076     }
6077     collide = false;
6078     continue; // Retry with expanded table
6079     }
6080     h ^= h << 13; // Rehash
6081     h ^= h >>> 17;
6082     h ^= h << 5;
6083     }
6084     else if (cellsBusy == 0 && counterCells == as &&
6085     U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
6086     boolean init = false;
6087     try { // Initialize table
6088     if (counterCells == as) {
6089     CounterCell[] rs = new CounterCell[2];
6090     rs[h & 1] = new CounterCell(x);
6091     counterCells = rs;
6092     init = true;
6093     }
6094     } finally {
6095     cellsBusy = 0;
6096     }
6097     if (init)
6098     break;
6099     }
6100     else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x))
6101     break; // Fall back on using base
6102     }
6103     hc.code = h; // Record index for next time
6104     }
6105    
6106 dl 1.52 // Unsafe mechanics
6107 dl 1.82 private static final sun.misc.Unsafe U;
6108     private static final long SIZECTL;
6109     private static final long TRANSFERINDEX;
6110     private static final long TRANSFERORIGIN;
6111     private static final long BASECOUNT;
6112 dl 1.102 private static final long CELLSBUSY;
6113 dl 1.82 private static final long CELLVALUE;
6114 dl 1.52 private static final long ABASE;
6115     private static final int ASHIFT;
6116    
6117     static {
6118     try {
6119 dl 1.82 U = getUnsafe();
6120 dl 1.52 Class<?> k = ConcurrentHashMapV8.class;
6121 dl 1.82 SIZECTL = U.objectFieldOffset
6122 dl 1.52 (k.getDeclaredField("sizeCtl"));
6123 dl 1.82 TRANSFERINDEX = U.objectFieldOffset
6124     (k.getDeclaredField("transferIndex"));
6125     TRANSFERORIGIN = U.objectFieldOffset
6126     (k.getDeclaredField("transferOrigin"));
6127     BASECOUNT = U.objectFieldOffset
6128     (k.getDeclaredField("baseCount"));
6129 dl 1.102 CELLSBUSY = U.objectFieldOffset
6130     (k.getDeclaredField("cellsBusy"));
6131 dl 1.82 Class<?> ck = CounterCell.class;
6132     CELLVALUE = U.objectFieldOffset
6133     (ck.getDeclaredField("value"));
6134 jsr166 1.101 Class<?> ak = Node[].class;
6135     ABASE = U.arrayBaseOffset(ak);
6136     int scale = U.arrayIndexScale(ak);
6137 jsr166 1.89 if ((scale & (scale - 1)) != 0)
6138     throw new Error("data type scale not a power of two");
6139     ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
6140 dl 1.52 } catch (Exception e) {
6141     throw new Error(e);
6142     }
6143     }
6144    
6145     /**
6146     * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
6147     * Replace with a simple call to Unsafe.getUnsafe when integrating
6148     * into a jdk.
6149     *
6150     * @return a sun.misc.Unsafe
6151     */
6152     private static sun.misc.Unsafe getUnsafe() {
6153     try {
6154     return sun.misc.Unsafe.getUnsafe();
6155 jsr166 1.87 } catch (SecurityException tryReflectionInstead) {}
6156     try {
6157     return java.security.AccessController.doPrivileged
6158     (new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
6159     public sun.misc.Unsafe run() throws Exception {
6160     Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
6161     for (java.lang.reflect.Field f : k.getDeclaredFields()) {
6162     f.setAccessible(true);
6163     Object x = f.get(null);
6164     if (k.isInstance(x))
6165     return k.cast(x);
6166     }
6167     throw new NoSuchFieldError("the Unsafe");
6168     }});
6169     } catch (java.security.PrivilegedActionException e) {
6170     throw new RuntimeException("Could not initialize intrinsics",
6171     e.getCause());
6172 dl 1.52 }
6173     }
6174 dl 1.1 }