ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/jsr166e/ConcurrentHashMapV8.java
Revision: 1.109
Committed: Wed Jul 3 18:16:31 2013 UTC (10 years, 10 months ago) by dl
Branch: MAIN
Changes since 1.108: +17 -14 lines
Log Message:
More conservative use of volatiles

File Contents

# User Rev Content
1 dl 1.1 /*
2     * Written by Doug Lea with assistance from members of JCP JSR-166
3     * Expert Group and released to the public domain, as explained at
4     * http://creativecommons.org/publicdomain/zero/1.0/
5     */
6    
7     package jsr166e;
8 dl 1.71
9 dl 1.102 import jsr166e.ForkJoinPool;
10    
11     import java.io.ObjectStreamField;
12     import java.io.Serializable;
13     import java.lang.reflect.ParameterizedType;
14     import java.lang.reflect.Type;
15 dl 1.24 import java.util.Arrays;
16 dl 1.1 import java.util.Collection;
17 dl 1.102 import java.util.Comparator;
18     import java.util.ConcurrentModificationException;
19     import java.util.Enumeration;
20     import java.util.HashMap;
21 dl 1.1 import java.util.Hashtable;
22     import java.util.Iterator;
23 dl 1.102 import java.util.Map;
24 dl 1.1 import java.util.NoSuchElementException;
25 dl 1.102 import java.util.Set;
26 dl 1.1 import java.util.concurrent.ConcurrentMap;
27 dl 1.102 import java.util.concurrent.atomic.AtomicReference;
28 dl 1.82 import java.util.concurrent.atomic.AtomicInteger;
29 dl 1.102 import java.util.concurrent.locks.LockSupport;
30     import java.util.concurrent.locks.ReentrantLock;
31 dl 1.79
32 dl 1.1 /**
33     * A hash table supporting full concurrency of retrievals and
34     * high expected concurrency for updates. This class obeys the
35     * same functional specification as {@link java.util.Hashtable}, and
36     * includes versions of methods corresponding to each method of
37     * {@code Hashtable}. However, even though all operations are
38     * thread-safe, retrieval operations do <em>not</em> entail locking,
39     * and there is <em>not</em> any support for locking the entire table
40     * in a way that prevents all access. This class is fully
41     * interoperable with {@code Hashtable} in programs that rely on its
42     * thread safety but not on its synchronization details.
43     *
44 jsr166 1.78 * <p>Retrieval operations (including {@code get}) generally do not
45 dl 1.1 * block, so may overlap with update operations (including {@code put}
46     * and {@code remove}). Retrievals reflect the results of the most
47     * recently <em>completed</em> update operations holding upon their
48 dl 1.59 * onset. (More formally, an update operation for a given key bears a
49     * <em>happens-before</em> relation with any (non-null) retrieval for
50     * that key reporting the updated value.) For aggregate operations
51     * such as {@code putAll} and {@code clear}, concurrent retrievals may
52     * reflect insertion or removal of only some entries. Similarly,
53     * Iterators and Enumerations return elements reflecting the state of
54     * the hash table at some point at or since the creation of the
55     * iterator/enumeration. They do <em>not</em> throw {@link
56     * ConcurrentModificationException}. However, iterators are designed
57     * to be used by only one thread at a time. Bear in mind that the
58     * results of aggregate status methods including {@code size}, {@code
59     * isEmpty}, and {@code containsValue} are typically useful only when
60     * a map is not undergoing concurrent updates in other threads.
61     * Otherwise the results of these methods reflect transient states
62     * that may be adequate for monitoring or estimation purposes, but not
63     * for program control.
64 dl 1.1 *
65 jsr166 1.78 * <p>The table is dynamically expanded when there are too many
66 dl 1.16 * collisions (i.e., keys that have distinct hash codes but fall into
67     * the same slot modulo the table size), with the expected average
68 dl 1.24 * effect of maintaining roughly two bins per mapping (corresponding
69     * to a 0.75 load factor threshold for resizing). There may be much
70     * variance around this average as mappings are added and removed, but
71     * overall, this maintains a commonly accepted time/space tradeoff for
72     * hash tables. However, resizing this or any other kind of hash
73     * table may be a relatively slow operation. When possible, it is a
74     * good idea to provide a size estimate as an optional {@code
75 dl 1.16 * initialCapacity} constructor argument. An additional optional
76     * {@code loadFactor} constructor argument provides a further means of
77     * customizing initial table capacity by specifying the table density
78     * to be used in calculating the amount of space to allocate for the
79     * given number of elements. Also, for compatibility with previous
80     * versions of this class, constructors may optionally specify an
81     * expected {@code concurrencyLevel} as an additional hint for
82     * internal sizing. Note that using many keys with exactly the same
83 jsr166 1.31 * {@code hashCode()} is a sure way to slow down performance of any
84 dl 1.102 * hash table. To ameliorate impact, when keys are {@link Comparable},
85     * this class may use comparison order among keys to help break ties.
86 dl 1.1 *
87 jsr166 1.78 * <p>A {@link Set} projection of a ConcurrentHashMapV8 may be created
88 dl 1.70 * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
89     * (using {@link #keySet(Object)} when only keys are of interest, and the
90     * mapped values are (perhaps transiently) not used or all take the
91     * same mapping value.
92     *
93 dl 1.1 * <p>This class and its views and iterators implement all of the
94     * <em>optional</em> methods of the {@link Map} and {@link Iterator}
95     * interfaces.
96     *
97 jsr166 1.78 * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
98 dl 1.1 * does <em>not</em> allow {@code null} to be used as a key or value.
99     *
100 dl 1.102 * <p>ConcurrentHashMapV8s support a set of sequential and parallel bulk
101     * operations that are designed
102     * to be safely, and often sensibly, applied even with maps that are
103     * being concurrently updated by other threads; for example, when
104     * computing a snapshot summary of the values in a shared registry.
105     * There are three kinds of operation, each with four forms, accepting
106     * functions with Keys, Values, Entries, and (Key, Value) arguments
107     * and/or return values. Because the elements of a ConcurrentHashMapV8
108     * are not ordered in any particular way, and may be processed in
109     * different orders in different parallel executions, the correctness
110     * of supplied functions should not depend on any ordering, or on any
111     * other objects or values that may transiently change while
112     * computation is in progress; and except for forEach actions, should
113     * ideally be side-effect-free. Bulk operations on {@link java.util.Map.Entry}
114     * objects do not support method {@code setValue}.
115 dl 1.70 *
116     * <ul>
117     * <li> forEach: Perform a given action on each element.
118     * A variant form applies a given transformation on each element
119     * before performing the action.</li>
120     *
121     * <li> search: Return the first available non-null result of
122     * applying a given function on each element; skipping further
123     * search when a result is found.</li>
124     *
125     * <li> reduce: Accumulate each element. The supplied reduction
126     * function cannot rely on ordering (more formally, it should be
127     * both associative and commutative). There are five variants:
128     *
129     * <ul>
130     *
131     * <li> Plain reductions. (There is not a form of this method for
132     * (key, value) function arguments since there is no corresponding
133     * return type.)</li>
134     *
135     * <li> Mapped reductions that accumulate the results of a given
136     * function applied to each element.</li>
137     *
138     * <li> Reductions to scalar doubles, longs, and ints, using a
139     * given basis value.</li>
140     *
141 dl 1.102 * </ul>
142 dl 1.70 * </li>
143     * </ul>
144 dl 1.102 *
145     * <p>These bulk operations accept a {@code parallelismThreshold}
146     * argument. Methods proceed sequentially if the current map size is
147     * estimated to be less than the given threshold. Using a value of
148     * {@code Long.MAX_VALUE} suppresses all parallelism. Using a value
149     * of {@code 1} results in maximal parallelism by partitioning into
150     * enough subtasks to fully utilize the {@link
151     * ForkJoinPool#commonPool()} that is used for all parallel
152     * computations. Normally, you would initially choose one of these
153     * extreme values, and then measure performance of using in-between
154     * values that trade off overhead versus throughput.
155 dl 1.70 *
156     * <p>The concurrency properties of bulk operations follow
157     * from those of ConcurrentHashMapV8: Any non-null result returned
158     * from {@code get(key)} and related access methods bears a
159     * happens-before relation with the associated insertion or
160     * update. The result of any bulk operation reflects the
161     * composition of these per-element relations (but is not
162     * necessarily atomic with respect to the map as a whole unless it
163     * is somehow known to be quiescent). Conversely, because keys
164     * and values in the map are never null, null serves as a reliable
165     * atomic indicator of the current lack of any result. To
166     * maintain this property, null serves as an implicit basis for
167     * all non-scalar reduction operations. For the double, long, and
168     * int versions, the basis should be one that, when combined with
169     * any other value, returns that other value (more formally, it
170     * should be the identity element for the reduction). Most common
171     * reductions have these properties; for example, computing a sum
172     * with basis 0 or a minimum with basis MAX_VALUE.
173     *
174     * <p>Search and transformation functions provided as arguments
175     * should similarly return null to indicate the lack of any result
176     * (in which case it is not used). In the case of mapped
177     * reductions, this also enables transformations to serve as
178     * filters, returning null (or, in the case of primitive
179     * specializations, the identity basis) if the element should not
180     * be combined. You can create compound transformations and
181     * filterings by composing them yourself under this "null means
182     * there is nothing there now" rule before using them in search or
183     * reduce operations.
184     *
185     * <p>Methods accepting and/or returning Entry arguments maintain
186     * key-value associations. They may be useful for example when
187     * finding the key for the greatest value. Note that "plain" Entry
188     * arguments can be supplied using {@code new
189     * AbstractMap.SimpleEntry(k,v)}.
190     *
191 jsr166 1.78 * <p>Bulk operations may complete abruptly, throwing an
192 dl 1.70 * exception encountered in the application of a supplied
193     * function. Bear in mind when handling such exceptions that other
194     * concurrently executing functions could also have thrown
195     * exceptions, or would have done so if the first exception had
196     * not occurred.
197     *
198 dl 1.84 * <p>Speedups for parallel compared to sequential forms are common
199     * but not guaranteed. Parallel operations involving brief functions
200     * on small maps may execute more slowly than sequential forms if the
201     * underlying work to parallelize the computation is more expensive
202     * than the computation itself. Similarly, parallelization may not
203     * lead to much actual parallelism if all processors are busy
204     * performing unrelated tasks.
205 dl 1.70 *
206 jsr166 1.78 * <p>All arguments to all task methods must be non-null.
207 dl 1.70 *
208     * <p><em>jsr166e note: During transition, this class
209     * uses nested functional interfaces with different names but the
210 jsr166 1.77 * same forms as those expected for JDK8.</em>
211 dl 1.70 *
212 dl 1.1 * <p>This class is a member of the
213     * <a href="{@docRoot}/../technotes/guides/collections/index.html">
214     * Java Collections Framework</a>.
215     *
216 jsr166 1.22 * @since 1.5
217 dl 1.1 * @author Doug Lea
218     * @param <K> the type of keys maintained by this map
219     * @param <V> the type of mapped values
220     */
221 jsr166 1.99 public class ConcurrentHashMapV8<K,V>
222     implements ConcurrentMap<K,V>, Serializable {
223 dl 1.1 private static final long serialVersionUID = 7249069246763182397L;
224    
225     /**
226 dl 1.102 * An object for traversing and partitioning elements of a source.
227     * This interface provides a subset of the functionality of JDK8
228     * java.util.Spliterator.
229     */
230     public static interface ConcurrentHashMapSpliterator<T> {
231 jsr166 1.103 /**
232 dl 1.102 * If possible, returns a new spliterator covering
233     * approximately one half of the elements, which will not be
234     * covered by this spliterator. Returns null if cannot be
235     * split.
236     */
237     ConcurrentHashMapSpliterator<T> trySplit();
238     /**
239     * Returns an estimate of the number of elements covered by
240     * this Spliterator.
241     */
242     long estimateSize();
243    
244     /** Applies the action to each untraversed element */
245     void forEachRemaining(Action<? super T> action);
246     /** If an element remains, applies the action and returns true. */
247     boolean tryAdvance(Action<? super T> action);
248 dl 1.41 }
249    
250 dl 1.102 // Sams
251     /** Interface describing a void action of one argument */
252     public interface Action<A> { void apply(A a); }
253     /** Interface describing a void action of two arguments */
254     public interface BiAction<A,B> { void apply(A a, B b); }
255     /** Interface describing a function of one argument */
256     public interface Fun<A,T> { T apply(A a); }
257     /** Interface describing a function of two arguments */
258     public interface BiFun<A,B,T> { T apply(A a, B b); }
259     /** Interface describing a function mapping its argument to a double */
260     public interface ObjectToDouble<A> { double apply(A a); }
261     /** Interface describing a function mapping its argument to a long */
262     public interface ObjectToLong<A> { long apply(A a); }
263     /** Interface describing a function mapping its argument to an int */
264     public interface ObjectToInt<A> {int apply(A a); }
265     /** Interface describing a function mapping two arguments to a double */
266     public interface ObjectByObjectToDouble<A,B> { double apply(A a, B b); }
267     /** Interface describing a function mapping two arguments to a long */
268     public interface ObjectByObjectToLong<A,B> { long apply(A a, B b); }
269     /** Interface describing a function mapping two arguments to an int */
270     public interface ObjectByObjectToInt<A,B> {int apply(A a, B b); }
271     /** Interface describing a function mapping two doubles to a double */
272     public interface DoubleByDoubleToDouble { double apply(double a, double b); }
273     /** Interface describing a function mapping two longs to a long */
274     public interface LongByLongToLong { long apply(long a, long b); }
275     /** Interface describing a function mapping two ints to an int */
276     public interface IntByIntToInt { int apply(int a, int b); }
277    
278 dl 1.1 /*
279     * Overview:
280     *
281     * The primary design goal of this hash table is to maintain
282     * concurrent readability (typically method get(), but also
283     * iterators and related methods) while minimizing update
284 dl 1.24 * contention. Secondary goals are to keep space consumption about
285     * the same or better than java.util.HashMap, and to support high
286     * initial insertion rates on an empty table by many threads.
287 dl 1.1 *
288 dl 1.102 * This map usually acts as a binned (bucketed) hash table. Each
289     * key-value mapping is held in a Node. Most nodes are instances
290     * of the basic Node class with hash, key, value, and next
291     * fields. However, various subclasses exist: TreeNodes are
292     * arranged in balanced trees, not lists. TreeBins hold the roots
293     * of sets of TreeNodes. ForwardingNodes are placed at the heads
294     * of bins during resizing. ReservationNodes are used as
295     * placeholders while establishing values in computeIfAbsent and
296     * related methods. The types TreeBin, ForwardingNode, and
297     * ReservationNode do not hold normal user keys, values, or
298     * hashes, and are readily distinguishable during search etc
299     * because they have negative hash fields and null key and value
300     * fields. (These special nodes are either uncommon or transient,
301     * so the impact of carrying around some unused fields is
302 jsr166 1.108 * insignificant.)
303 dl 1.1 *
304     * The table is lazily initialized to a power-of-two size upon the
305 dl 1.38 * first insertion. Each bin in the table normally contains a
306     * list of Nodes (most often, the list has only zero or one Node).
307     * Table accesses require volatile/atomic reads, writes, and
308     * CASes. Because there is no other way to arrange this without
309     * adding further indirections, we use intrinsics
310 dl 1.102 * (sun.misc.Unsafe) operations.
311 dl 1.24 *
312 dl 1.82 * We use the top (sign) bit of Node hash fields for control
313     * purposes -- it is available anyway because of addressing
314 dl 1.102 * constraints. Nodes with negative hash fields are specially
315     * handled or ignored in map methods.
316 dl 1.14 *
317 dl 1.27 * Insertion (via put or its variants) of the first node in an
318 dl 1.14 * empty bin is performed by just CASing it to the bin. This is
319 dl 1.38 * by far the most common case for put operations under most
320     * key/hash distributions. Other update operations (insert,
321     * delete, and replace) require locks. We do not want to waste
322     * the space required to associate a distinct lock object with
323     * each bin, so instead use the first node of a bin list itself as
324 dl 1.82 * a lock. Locking support for these locks relies on builtin
325     * "synchronized" monitors.
326 dl 1.24 *
327     * Using the first node of a list as a lock does not by itself
328     * suffice though: When a node is locked, any update must first
329     * validate that it is still the first node after locking it, and
330     * retry if not. Because new nodes are always appended to lists,
331     * once a node is first in a bin, it remains first until deleted
332 dl 1.102 * or the bin becomes invalidated (upon resizing).
333 dl 1.14 *
334 dl 1.24 * The main disadvantage of per-bin locks is that other update
335 dl 1.14 * operations on other nodes in a bin list protected by the same
336     * lock can stall, for example when user equals() or mapping
337 dl 1.38 * functions take a long time. However, statistically, under
338     * random hash codes, this is not a common problem. Ideally, the
339     * frequency of nodes in bins follows a Poisson distribution
340 dl 1.14 * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
341 dl 1.16 * parameter of about 0.5 on average, given the resizing threshold
342     * of 0.75, although with a large variance because of resizing
343     * granularity. Ignoring variance, the expected occurrences of
344     * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
345 dl 1.38 * first values are:
346 dl 1.16 *
347 dl 1.38 * 0: 0.60653066
348     * 1: 0.30326533
349     * 2: 0.07581633
350     * 3: 0.01263606
351     * 4: 0.00157952
352     * 5: 0.00015795
353     * 6: 0.00001316
354     * 7: 0.00000094
355     * 8: 0.00000006
356     * more: less than 1 in ten million
357 dl 1.16 *
358     * Lock contention probability for two threads accessing distinct
359 dl 1.38 * elements is roughly 1 / (8 * #elements) under random hashes.
360     *
361     * Actual hash code distributions encountered in practice
362     * sometimes deviate significantly from uniform randomness. This
363     * includes the case when N > (1<<30), so some keys MUST collide.
364     * Similarly for dumb or hostile usages in which multiple keys are
365 dl 1.102 * designed to have identical hash codes or ones that differs only
366     * in masked-out high bits. So we use a secondary strategy that
367     * applies when the number of nodes in a bin exceeds a
368     * threshold. These TreeBins use a balanced tree to hold nodes (a
369     * specialized form of red-black trees), bounding search time to
370     * O(log N). Each search step in a TreeBin is at least twice as
371 dl 1.38 * slow as in a regular list, but given that N cannot exceed
372     * (1<<64) (before running out of addresses) this bounds search
373     * steps, lock hold times, etc, to reasonable constants (roughly
374     * 100 nodes inspected per operation worst case) so long as keys
375     * are Comparable (which is very common -- String, Long, etc).
376     * TreeBin nodes (TreeNodes) also maintain the same "next"
377     * traversal pointers as regular nodes, so can be traversed in
378     * iterators in the same way.
379 dl 1.1 *
380 dl 1.38 * The table is resized when occupancy exceeds a percentage
381 dl 1.82 * threshold (nominally, 0.75, but see below). Any thread
382     * noticing an overfull bin may assist in resizing after the
383     * initiating thread allocates and sets up the replacement
384     * array. However, rather than stalling, these other threads may
385     * proceed with insertions etc. The use of TreeBins shields us
386     * from the worst case effects of overfilling while resizes are in
387     * progress. Resizing proceeds by transferring bins, one by one,
388     * from the table to the next table. To enable concurrency, the
389     * next table must be (incrementally) prefilled with place-holders
390     * serving as reverse forwarders to the old table. Because we are
391     * using power-of-two expansion, the elements from each bin must
392     * either stay at same index, or move with a power of two
393     * offset. We eliminate unnecessary node creation by catching
394     * cases where old nodes can be reused because their next fields
395     * won't change. On average, only about one-sixth of them need
396     * cloning when a table doubles. The nodes they replace will be
397     * garbage collectable as soon as they are no longer referenced by
398     * any reader thread that may be in the midst of concurrently
399     * traversing table. Upon transfer, the old table bin contains
400     * only a special forwarding node (with hash field "MOVED") that
401     * contains the next table as its key. On encountering a
402     * forwarding node, access and update operations restart, using
403     * the new table.
404     *
405     * Each bin transfer requires its bin lock, which can stall
406     * waiting for locks while resizing. However, because other
407     * threads can join in and help resize rather than contend for
408     * locks, average aggregate waits become shorter as resizing
409     * progresses. The transfer operation must also ensure that all
410     * accessible bins in both the old and new table are usable by any
411     * traversal. This is arranged by proceeding from the last bin
412     * (table.length - 1) up towards the first. Upon seeing a
413     * forwarding node, traversals (see class Traverser) arrange to
414     * move to the new table without revisiting nodes. However, to
415     * ensure that no intervening nodes are skipped, bin splitting can
416     * only begin after the associated reverse-forwarders are in
417     * place.
418 dl 1.14 *
419 dl 1.24 * The traversal scheme also applies to partial traversals of
420 dl 1.52 * ranges of bins (via an alternate Traverser constructor)
421 dl 1.41 * to support partitioned aggregate operations. Also, read-only
422     * operations give up if ever forwarded to a null table, which
423     * provides support for shutdown-style clearing, which is also not
424     * currently implemented.
425 dl 1.14 *
426     * Lazy table initialization minimizes footprint until first use,
427     * and also avoids resizings when the first operation is from a
428     * putAll, constructor with map argument, or deserialization.
429 dl 1.24 * These cases attempt to override the initial capacity settings,
430     * but harmlessly fail to take effect in cases of races.
431 dl 1.1 *
432 dl 1.82 * The element count is maintained using a specialization of
433     * LongAdder. We need to incorporate a specialization rather than
434     * just use a LongAdder in order to access implicit
435     * contention-sensing that leads to creation of multiple
436     * CounterCells. The counter mechanics avoid contention on
437     * updates but can encounter cache thrashing if read too
438     * frequently during concurrent access. To avoid reading so often,
439     * resizing under contention is attempted only upon adding to a
440     * bin already holding two or more nodes. Under uniform hash
441     * distributions, the probability of this occurring at threshold
442     * is around 13%, meaning that only about 1 in 8 puts check
443 dl 1.102 * threshold (and after resizing, many fewer do so).
444     *
445     * TreeBins use a special form of comparison for search and
446     * related operations (which is the main reason we cannot use
447     * existing collections such as TreeMaps). TreeBins contain
448     * Comparable elements, but may contain others, as well as
449     * elements that are Comparable but not necessarily Comparable
450     * for the same T, so we cannot invoke compareTo among them. To
451     * handle this, the tree is ordered primarily by hash value, then
452     * by Comparable.compareTo order if applicable. On lookup at a
453     * node, if elements are not comparable or compare as 0 then both
454     * left and right children may need to be searched in the case of
455     * tied hash values. (This corresponds to the full list search
456     * that would be necessary if all elements were non-Comparable and
457     * had tied hashes.) The red-black balancing code is updated from
458     * pre-jdk-collections
459     * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
460     * based in turn on Cormen, Leiserson, and Rivest "Introduction to
461     * Algorithms" (CLR).
462     *
463     * TreeBins also require an additional locking mechanism. While
464     * list traversal is always possible by readers even during
465 jsr166 1.108 * updates, tree traversal is not, mainly because of tree-rotations
466 dl 1.102 * that may change the root node and/or its linkages. TreeBins
467     * include a simple read-write lock mechanism parasitic on the
468     * main bin-synchronization strategy: Structural adjustments
469     * associated with an insertion or removal are already bin-locked
470     * (and so cannot conflict with other writers) but must wait for
471     * ongoing readers to finish. Since there can be only one such
472     * waiter, we use a simple scheme using a single "waiter" field to
473     * block writers. However, readers need never block. If the root
474     * lock is held, they proceed along the slow traversal path (via
475     * next-pointers) until the lock becomes available or the list is
476     * exhausted, whichever comes first. These cases are not fast, but
477     * maximize aggregate expected throughput.
478 dl 1.14 *
479     * Maintaining API and serialization compatibility with previous
480     * versions of this class introduces several oddities. Mainly: We
481     * leave untouched but unused constructor arguments refering to
482 dl 1.24 * concurrencyLevel. We accept a loadFactor constructor argument,
483     * but apply it only to initial table capacity (which is the only
484     * time that we can guarantee to honor it.) We also declare an
485     * unused "Segment" class that is instantiated in minimal form
486     * only when serializing.
487 dl 1.102 *
488     * This file is organized to make things a little easier to follow
489     * while reading than they might otherwise: First the main static
490     * declarations and utilities, then fields, then main public
491     * methods (with a few factorings of multiple public methods into
492     * internal ones), then sizing methods, trees, traversers, and
493     * bulk operations.
494 dl 1.1 */
495    
496     /* ---------------- Constants -------------- */
497    
498     /**
499 dl 1.16 * The largest possible table capacity. This value must be
500     * exactly 1<<30 to stay within Java array allocation and indexing
501 dl 1.24 * bounds for power of two table sizes, and is further required
502     * because the top two bits of 32bit hash fields are used for
503     * control purposes.
504 dl 1.1 */
505 dl 1.14 private static final int MAXIMUM_CAPACITY = 1 << 30;
506 dl 1.1
507     /**
508 dl 1.14 * The default initial table capacity. Must be a power of 2
509     * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
510 dl 1.1 */
511 dl 1.14 private static final int DEFAULT_CAPACITY = 16;
512 dl 1.1
513     /**
514 dl 1.24 * The largest possible (non-power of two) array size.
515     * Needed by toArray and related methods.
516     */
517     static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
518    
519     /**
520     * The default concurrency level for this table. Unused but
521     * defined for compatibility with previous versions of this class.
522     */
523     private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
524    
525     /**
526 dl 1.16 * The load factor for this table. Overrides of this value in
527     * constructors affect only the initial table capacity. The
528 dl 1.24 * actual floating point value isn't normally used -- it is
529     * simpler to use expressions such as {@code n - (n >>> 2)} for
530     * the associated resizing threshold.
531 dl 1.1 */
532 dl 1.16 private static final float LOAD_FACTOR = 0.75f;
533 dl 1.1
534     /**
535 dl 1.38 * The bin count threshold for using a tree rather than list for a
536 dl 1.102 * bin. Bins are converted to trees when adding an element to a
537     * bin with at least this many nodes. The value must be greater
538     * than 2, and should be at least 8 to mesh with assumptions in
539     * tree removal about conversion back to plain bins upon
540     * shrinkage.
541     */
542     static final int TREEIFY_THRESHOLD = 8;
543    
544     /**
545     * The bin count threshold for untreeifying a (split) bin during a
546     * resize operation. Should be less than TREEIFY_THRESHOLD, and at
547     * most 6 to mesh with shrinkage detection under removal.
548     */
549     static final int UNTREEIFY_THRESHOLD = 6;
550    
551     /**
552     * The smallest table capacity for which bins may be treeified.
553     * (Otherwise the table is resized if too many nodes in a bin.)
554     * The value should be at least 4 * TREEIFY_THRESHOLD to avoid
555     * conflicts between resizing and treeification thresholds.
556 dl 1.38 */
557 dl 1.102 static final int MIN_TREEIFY_CAPACITY = 64;
558 dl 1.38
559 dl 1.82 /**
560     * Minimum number of rebinnings per transfer step. Ranges are
561     * subdivided to allow multiple resizer threads. This value
562     * serves as a lower bound to avoid resizers encountering
563     * excessive memory contention. The value should be at least
564     * DEFAULT_CAPACITY.
565     */
566     private static final int MIN_TRANSFER_STRIDE = 16;
567    
568 dl 1.24 /*
569 dl 1.82 * Encodings for Node hash fields. See above for explanation.
570 dl 1.1 */
571 dl 1.109 static final int MOVED = -1; // hash for forwarding nodes
572     static final int TREEBIN = -2; // hash for roots of trees
573     static final int RESERVED = -3; // hash for transient reservations
574 dl 1.82 static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
575    
576     /** Number of CPUS, to place bounds on some sizings */
577     static final int NCPU = Runtime.getRuntime().availableProcessors();
578    
579 dl 1.102 /** For serialization compatibility. */
580     private static final ObjectStreamField[] serialPersistentFields = {
581     new ObjectStreamField("segments", Segment[].class),
582     new ObjectStreamField("segmentMask", Integer.TYPE),
583     new ObjectStreamField("segmentShift", Integer.TYPE)
584     };
585    
586     /* ---------------- Nodes -------------- */
587    
588     /**
589     * Key-value entry. This class is never exported out as a
590     * user-mutable Map.Entry (i.e., one supporting setValue; see
591     * MapEntry below), but can be used for read-only traversals used
592 jsr166 1.108 * in bulk tasks. Subclasses of Node with a negative hash field
593 dl 1.102 * are special, and contain null keys and values (but are never
594     * exported). Otherwise, keys and vals are never null.
595     */
596     static class Node<K,V> implements Map.Entry<K,V> {
597     final int hash;
598     final K key;
599     volatile V val;
600 dl 1.109 volatile Node<K,V> next;
601 dl 1.102
602     Node(int hash, K key, V val, Node<K,V> next) {
603     this.hash = hash;
604     this.key = key;
605     this.val = val;
606     this.next = next;
607     }
608    
609     public final K getKey() { return key; }
610     public final V getValue() { return val; }
611     public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
612     public final String toString(){ return key + "=" + val; }
613     public final V setValue(V value) {
614     throw new UnsupportedOperationException();
615     }
616 dl 1.82
617 dl 1.102 public final boolean equals(Object o) {
618     Object k, v, u; Map.Entry<?,?> e;
619     return ((o instanceof Map.Entry) &&
620     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
621     (v = e.getValue()) != null &&
622     (k == key || k.equals(key)) &&
623     (v == (u = val) || v.equals(u)));
624     }
625 dl 1.82
626 dl 1.102 /**
627     * Virtualized support for map.get(); overridden in subclasses.
628     */
629     Node<K,V> find(int h, Object k) {
630     Node<K,V> e = this;
631     if (k != null) {
632     do {
633     K ek;
634     if (e.hash == h &&
635     ((ek = e.key) == k || (ek != null && k.equals(ek))))
636     return e;
637     } while ((e = e.next) != null);
638     }
639     return null;
640     }
641 dl 1.82 }
642    
643 dl 1.102 /* ---------------- Static utilities -------------- */
644    
645 dl 1.82 /**
646 dl 1.102 * Spreads (XORs) higher bits of hash to lower and also forces top
647     * bit to 0. Because the table uses power-of-two masking, sets of
648     * hashes that vary only in bits above the current mask will
649     * always collide. (Among known examples are sets of Float keys
650     * holding consecutive whole numbers in small tables.) So we
651     * apply a transform that spreads the impact of higher bits
652     * downward. There is a tradeoff between speed, utility, and
653     * quality of bit-spreading. Because many common sets of hashes
654     * are already reasonably distributed (so don't benefit from
655     * spreading), and because we use trees to handle large sets of
656     * collisions in bins, we just XOR some shifted bits in the
657     * cheapest possible way to reduce systematic lossage, as well as
658     * to incorporate impact of the highest bits that would otherwise
659     * never be used in index calculations because of table bounds.
660 dl 1.82 */
661 dl 1.102 static final int spread(int h) {
662     return (h ^ (h >>> 16)) & HASH_BITS;
663 dl 1.82 }
664    
665     /**
666 dl 1.102 * Returns a power of two table size for the given desired capacity.
667     * See Hackers Delight, sec 3.2
668 dl 1.82 */
669 dl 1.102 private static final int tableSizeFor(int c) {
670     int n = c - 1;
671     n |= n >>> 1;
672     n |= n >>> 2;
673     n |= n >>> 4;
674     n |= n >>> 8;
675     n |= n >>> 16;
676     return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
677     }
678 dl 1.82
679     /**
680 dl 1.102 * Returns x's Class if it is of the form "class C implements
681     * Comparable<C>", else null.
682 dl 1.82 */
683 dl 1.102 static Class<?> comparableClassFor(Object x) {
684     if (x instanceof Comparable) {
685     Class<?> c; Type[] ts, as; Type t; ParameterizedType p;
686     if ((c = x.getClass()) == String.class) // bypass checks
687     return c;
688     if ((ts = c.getGenericInterfaces()) != null) {
689     for (int i = 0; i < ts.length; ++i) {
690     if (((t = ts[i]) instanceof ParameterizedType) &&
691     ((p = (ParameterizedType)t).getRawType() ==
692     Comparable.class) &&
693     (as = p.getActualTypeArguments()) != null &&
694     as.length == 1 && as[0] == c) // type arg is c
695     return c;
696     }
697     }
698     }
699     return null;
700     }
701 dl 1.82
702     /**
703 dl 1.102 * Returns k.compareTo(x) if x matches kc (k's screened comparable
704     * class), else 0.
705 dl 1.82 */
706 dl 1.102 @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
707     static int compareComparables(Class<?> kc, Object k, Object x) {
708     return (x == null || x.getClass() != kc ? 0 :
709     ((Comparable)k).compareTo(x));
710     }
711    
712     /* ---------------- Table element access -------------- */
713    
714     /*
715     * Volatile access methods are used for table elements as well as
716     * elements of in-progress next table while resizing. All uses of
717     * the tab arguments must be null checked by callers. All callers
718     * also paranoically precheck that tab's length is not zero (or an
719     * equivalent check), thus ensuring that any index argument taking
720     * the form of a hash value anded with (length - 1) is a valid
721     * index. Note that, to be correct wrt arbitrary concurrency
722     * errors by users, these checks must operate on local variables,
723     * which accounts for some odd-looking inline assignments below.
724     * Note that calls to setTabAt always occur within locked regions,
725 dl 1.109 * and so in principle require only release ordering, not need
726     * full volatile semantics, but are currently coded as volatile
727     * writes to be conservative.
728 dl 1.102 */
729    
730     @SuppressWarnings("unchecked")
731     static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) {
732     return (Node<K,V>)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE);
733     }
734 dl 1.24
735 dl 1.102 static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i,
736     Node<K,V> c, Node<K,V> v) {
737     return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
738     }
739 jsr166 1.103
740 dl 1.102 static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) {
741 dl 1.109 U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v);
742 dl 1.102 }
743 jsr166 1.103
744 dl 1.24 /* ---------------- Fields -------------- */
745    
746     /**
747     * The array of bins. Lazily initialized upon first insertion.
748     * Size is always a power of two. Accessed directly by iterators.
749     */
750 dl 1.102 transient volatile Node<K,V>[] table;
751 dl 1.14
752 dl 1.16 /**
753 dl 1.82 * The next table to use; non-null only while resizing.
754     */
755 dl 1.102 private transient volatile Node<K,V>[] nextTable;
756 dl 1.82
757     /**
758     * Base counter value, used mainly when there is no contention,
759     * but also as a fallback during table initialization
760     * races. Updated via CAS.
761 dl 1.16 */
762 dl 1.82 private transient volatile long baseCount;
763 dl 1.24
764     /**
765     * Table initialization and resizing control. When negative, the
766 dl 1.82 * table is being initialized or resized: -1 for initialization,
767     * else -(1 + the number of active resizing threads). Otherwise,
768     * when table is null, holds the initial table size to use upon
769     * creation, or 0 for default. After initialization, holds the
770     * next element count value upon which to resize the table.
771 dl 1.24 */
772     private transient volatile int sizeCtl;
773    
774 dl 1.82 /**
775     * The next table index (plus one) to split while resizing.
776     */
777     private transient volatile int transferIndex;
778    
779     /**
780     * The least available table index to split while resizing.
781     */
782     private transient volatile int transferOrigin;
783    
784     /**
785 dl 1.102 * Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
786 dl 1.82 */
787 dl 1.102 private transient volatile int cellsBusy;
788 dl 1.82
789     /**
790     * Table of counter cells. When non-null, size is a power of 2.
791     */
792     private transient volatile CounterCell[] counterCells;
793    
794 dl 1.24 // views
795 dl 1.70 private transient KeySetView<K,V> keySet;
796 dl 1.75 private transient ValuesView<K,V> values;
797     private transient EntrySetView<K,V> entrySet;
798 dl 1.24
799 dl 1.16
800 dl 1.102 /* ---------------- Public operations -------------- */
801 dl 1.38
802 dl 1.102 /**
803     * Creates a new, empty map with the default initial table size (16).
804     */
805     public ConcurrentHashMapV8() {
806 dl 1.38 }
807    
808 dl 1.102 /**
809     * Creates a new, empty map with an initial table size
810     * accommodating the specified number of elements without the need
811     * to dynamically resize.
812     *
813     * @param initialCapacity The implementation performs internal
814     * sizing to accommodate this many elements.
815     * @throws IllegalArgumentException if the initial capacity of
816     * elements is negative
817     */
818     public ConcurrentHashMapV8(int initialCapacity) {
819     if (initialCapacity < 0)
820     throw new IllegalArgumentException();
821     int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
822     MAXIMUM_CAPACITY :
823     tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
824     this.sizeCtl = cap;
825 dl 1.38 }
826    
827 dl 1.102 /**
828     * Creates a new map with the same mappings as the given map.
829     *
830     * @param m the map
831     */
832     public ConcurrentHashMapV8(Map<? extends K, ? extends V> m) {
833     this.sizeCtl = DEFAULT_CAPACITY;
834     putAll(m);
835 dl 1.38 }
836    
837 dl 1.102 /**
838     * Creates a new, empty map with an initial table size based on
839     * the given number of elements ({@code initialCapacity}) and
840     * initial table density ({@code loadFactor}).
841     *
842     * @param initialCapacity the initial capacity. The implementation
843     * performs internal sizing to accommodate this many elements,
844     * given the specified load factor.
845     * @param loadFactor the load factor (table density) for
846     * establishing the initial table size
847     * @throws IllegalArgumentException if the initial capacity of
848     * elements is negative or the load factor is nonpositive
849     *
850     * @since 1.6
851     */
852     public ConcurrentHashMapV8(int initialCapacity, float loadFactor) {
853     this(initialCapacity, loadFactor, 1);
854     }
855 dl 1.1
856     /**
857 dl 1.102 * Creates a new, empty map with an initial table size based on
858     * the given number of elements ({@code initialCapacity}), table
859     * density ({@code loadFactor}), and number of concurrently
860     * updating threads ({@code concurrencyLevel}).
861     *
862     * @param initialCapacity the initial capacity. The implementation
863     * performs internal sizing to accommodate this many elements,
864     * given the specified load factor.
865     * @param loadFactor the load factor (table density) for
866     * establishing the initial table size
867     * @param concurrencyLevel the estimated number of concurrently
868     * updating threads. The implementation may use this value as
869     * a sizing hint.
870     * @throws IllegalArgumentException if the initial capacity is
871     * negative or the load factor or concurrencyLevel are
872     * nonpositive
873 dl 1.1 */
874 dl 1.102 public ConcurrentHashMapV8(int initialCapacity,
875     float loadFactor, int concurrencyLevel) {
876     if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
877     throw new IllegalArgumentException();
878     if (initialCapacity < concurrencyLevel) // Use at least as many bins
879     initialCapacity = concurrencyLevel; // as estimated threads
880     long size = (long)(1.0 + (long)initialCapacity / loadFactor);
881     int cap = (size >= (long)MAXIMUM_CAPACITY) ?
882     MAXIMUM_CAPACITY : tableSizeFor((int)size);
883     this.sizeCtl = cap;
884 dl 1.24 }
885 dl 1.1
886 dl 1.102 // Original (since JDK1.2) Map methods
887 dl 1.38
888     /**
889 dl 1.102 * {@inheritDoc}
890 dl 1.38 */
891 dl 1.102 public int size() {
892     long n = sumCount();
893     return ((n < 0L) ? 0 :
894     (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
895     (int)n);
896 dl 1.38 }
897 dl 1.1
898 dl 1.38 /**
899 dl 1.102 * {@inheritDoc}
900     */
901     public boolean isEmpty() {
902     return sumCount() <= 0L; // ignore transient negative values
903     }
904    
905     /**
906     * Returns the value to which the specified key is mapped,
907     * or {@code null} if this map contains no mapping for the key.
908 dl 1.38 *
909 dl 1.102 * <p>More formally, if this map contains a mapping from a key
910     * {@code k} to a value {@code v} such that {@code key.equals(k)},
911     * then this method returns {@code v}; otherwise it returns
912     * {@code null}. (There can be at most one such mapping.)
913 dl 1.38 *
914 dl 1.102 * @throws NullPointerException if the specified key is null
915 dl 1.1 */
916 dl 1.102 public V get(Object key) {
917     Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
918     int h = spread(key.hashCode());
919     if ((tab = table) != null && (n = tab.length) > 0 &&
920     (e = tabAt(tab, (n - 1) & h)) != null) {
921     if ((eh = e.hash) == h) {
922     if ((ek = e.key) == key || (ek != null && key.equals(ek)))
923     return e.val;
924     }
925     else if (eh < 0)
926     return (p = e.find(h, key)) != null ? p.val : null;
927     while ((e = e.next) != null) {
928     if (e.hash == h &&
929     ((ek = e.key) == key || (ek != null && key.equals(ek))))
930     return e.val;
931 dl 1.41 }
932     }
933 dl 1.102 return null;
934     }
935 dl 1.41
936 dl 1.102 /**
937     * Tests if the specified object is a key in this table.
938     *
939     * @param key possible key
940     * @return {@code true} if and only if the specified object
941     * is a key in this table, as determined by the
942     * {@code equals} method; {@code false} otherwise
943     * @throws NullPointerException if the specified key is null
944     */
945     public boolean containsKey(Object key) {
946     return get(key) != null;
947     }
948 dl 1.41
949 dl 1.102 /**
950     * Returns {@code true} if this map maps one or more keys to the
951     * specified value. Note: This method may require a full traversal
952     * of the map, and is much slower than method {@code containsKey}.
953     *
954     * @param value value whose presence in this map is to be tested
955     * @return {@code true} if this map maps one or more keys to the
956     * specified value
957     * @throws NullPointerException if the specified value is null
958     */
959     public boolean containsValue(Object value) {
960     if (value == null)
961     throw new NullPointerException();
962     Node<K,V>[] t;
963     if ((t = table) != null) {
964     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
965     for (Node<K,V> p; (p = it.advance()) != null; ) {
966     V v;
967     if ((v = p.val) == value || (v != null && value.equals(v)))
968     return true;
969 dl 1.38 }
970     }
971 dl 1.102 return false;
972     }
973 dl 1.38
974 dl 1.102 /**
975     * Maps the specified key to the specified value in this table.
976     * Neither the key nor the value can be null.
977     *
978     * <p>The value can be retrieved by calling the {@code get} method
979     * with a key that is equal to the original key.
980     *
981     * @param key key with which the specified value is to be associated
982     * @param value value to be associated with the specified key
983     * @return the previous value associated with {@code key}, or
984     * {@code null} if there was no mapping for {@code key}
985     * @throws NullPointerException if the specified key or value is null
986     */
987     public V put(K key, V value) {
988     return putVal(key, value, false);
989     }
990 dl 1.38
991 dl 1.102 /** Implementation for put and putIfAbsent */
992     final V putVal(K key, V value, boolean onlyIfAbsent) {
993     if (key == null || value == null) throw new NullPointerException();
994     int hash = spread(key.hashCode());
995     int binCount = 0;
996     for (Node<K,V>[] tab = table;;) {
997     Node<K,V> f; int n, i, fh;
998     if (tab == null || (n = tab.length) == 0)
999     tab = initTable();
1000     else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
1001     if (casTabAt(tab, i, null,
1002     new Node<K,V>(hash, key, value, null)))
1003     break; // no lock when adding to empty bin
1004 dl 1.38 }
1005 dl 1.102 else if ((fh = f.hash) == MOVED)
1006     tab = helpTransfer(tab, f);
1007 dl 1.38 else {
1008 dl 1.102 V oldVal = null;
1009     synchronized (f) {
1010     if (tabAt(tab, i) == f) {
1011     if (fh >= 0) {
1012     binCount = 1;
1013     for (Node<K,V> e = f;; ++binCount) {
1014     K ek;
1015     if (e.hash == hash &&
1016     ((ek = e.key) == key ||
1017     (ek != null && key.equals(ek)))) {
1018     oldVal = e.val;
1019     if (!onlyIfAbsent)
1020     e.val = value;
1021     break;
1022 dl 1.41 }
1023 dl 1.102 Node<K,V> pred = e;
1024     if ((e = e.next) == null) {
1025     pred.next = new Node<K,V>(hash, key,
1026     value, null);
1027     break;
1028 dl 1.41 }
1029 dl 1.38 }
1030     }
1031 dl 1.102 else if (f instanceof TreeBin) {
1032     Node<K,V> p;
1033     binCount = 2;
1034     if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
1035     value)) != null) {
1036     oldVal = p.val;
1037     if (!onlyIfAbsent)
1038     p.val = value;
1039 dl 1.38 }
1040     }
1041     }
1042     }
1043 dl 1.102 if (binCount != 0) {
1044     if (binCount >= TREEIFY_THRESHOLD)
1045     treeifyBin(tab, i);
1046     if (oldVal != null)
1047     return oldVal;
1048     break;
1049     }
1050 dl 1.41 }
1051 dl 1.38 }
1052 dl 1.102 addCount(1L, binCount);
1053     return null;
1054 dl 1.1 }
1055    
1056 dl 1.14 /**
1057 dl 1.102 * Copies all of the mappings from the specified map to this one.
1058     * These mappings replace any mappings that this map had for any of the
1059     * keys currently in the specified map.
1060     *
1061     * @param m mappings to be stored in this map
1062 dl 1.14 */
1063 dl 1.102 public void putAll(Map<? extends K, ? extends V> m) {
1064     tryPresize(m.size());
1065     for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
1066     putVal(e.getKey(), e.getValue(), false);
1067 dl 1.38 }
1068    
1069     /**
1070 dl 1.102 * Removes the key (and its corresponding value) from this map.
1071     * This method does nothing if the key is not in the map.
1072     *
1073     * @param key the key that needs to be removed
1074     * @return the previous value associated with {@code key}, or
1075     * {@code null} if there was no mapping for {@code key}
1076     * @throws NullPointerException if the specified key is null
1077 dl 1.38 */
1078 dl 1.102 public V remove(Object key) {
1079     return replaceNode(key, null, null);
1080 dl 1.1 }
1081    
1082 dl 1.27 /**
1083     * Implementation for the four public remove/replace methods:
1084     * Replaces node value with v, conditional upon match of cv if
1085     * non-null. If resulting value is null, delete.
1086     */
1087 dl 1.102 final V replaceNode(Object key, V value, Object cv) {
1088     int hash = spread(key.hashCode());
1089     for (Node<K,V>[] tab = table;;) {
1090     Node<K,V> f; int n, i, fh;
1091     if (tab == null || (n = tab.length) == 0 ||
1092     (f = tabAt(tab, i = (n - 1) & hash)) == null)
1093 dl 1.27 break;
1094 dl 1.102 else if ((fh = f.hash) == MOVED)
1095     tab = helpTransfer(tab, f);
1096     else {
1097     V oldVal = null;
1098     boolean validated = false;
1099     synchronized (f) {
1100     if (tabAt(tab, i) == f) {
1101     if (fh >= 0) {
1102     validated = true;
1103     for (Node<K,V> e = f, pred = null;;) {
1104     K ek;
1105     if (e.hash == hash &&
1106     ((ek = e.key) == key ||
1107     (ek != null && key.equals(ek)))) {
1108     V ev = e.val;
1109     if (cv == null || cv == ev ||
1110     (ev != null && cv.equals(ev))) {
1111     oldVal = ev;
1112     if (value != null)
1113     e.val = value;
1114     else if (pred != null)
1115     pred.next = e.next;
1116     else
1117     setTabAt(tab, i, e.next);
1118     }
1119     break;
1120     }
1121     pred = e;
1122     if ((e = e.next) == null)
1123     break;
1124     }
1125     }
1126     else if (f instanceof TreeBin) {
1127 dl 1.38 validated = true;
1128 dl 1.102 TreeBin<K,V> t = (TreeBin<K,V>)f;
1129     TreeNode<K,V> r, p;
1130     if ((r = t.root) != null &&
1131     (p = r.findTreeNode(hash, key, null)) != null) {
1132 dl 1.84 V pv = p.val;
1133 dl 1.102 if (cv == null || cv == pv ||
1134     (pv != null && cv.equals(pv))) {
1135 dl 1.38 oldVal = pv;
1136 dl 1.102 if (value != null)
1137     p.val = value;
1138     else if (t.removeTreeNode(p))
1139     setTabAt(tab, i, untreeify(t.first));
1140 dl 1.38 }
1141     }
1142     }
1143     }
1144 dl 1.102 }
1145     if (validated) {
1146     if (oldVal != null) {
1147     if (value == null)
1148 dl 1.82 addCount(-1L, -1);
1149 dl 1.102 return oldVal;
1150 dl 1.38 }
1151 dl 1.102 break;
1152 dl 1.38 }
1153     }
1154 dl 1.102 }
1155     return null;
1156     }
1157    
1158     /**
1159     * Removes all of the mappings from this map.
1160     */
1161     public void clear() {
1162     long delta = 0L; // negative number of deletions
1163     int i = 0;
1164     Node<K,V>[] tab = table;
1165     while (tab != null && i < tab.length) {
1166     int fh;
1167     Node<K,V> f = tabAt(tab, i);
1168     if (f == null)
1169     ++i;
1170     else if ((fh = f.hash) == MOVED) {
1171     tab = helpTransfer(tab, f);
1172     i = 0; // restart
1173     }
1174 dl 1.82 else {
1175 jsr166 1.83 synchronized (f) {
1176 dl 1.27 if (tabAt(tab, i) == f) {
1177 dl 1.102 Node<K,V> p = (fh >= 0 ? f :
1178     (f instanceof TreeBin) ?
1179     ((TreeBin<K,V>)f).first : null);
1180     while (p != null) {
1181     --delta;
1182     p = p.next;
1183 dl 1.27 }
1184 dl 1.102 setTabAt(tab, i++, null);
1185 dl 1.27 }
1186     }
1187     }
1188     }
1189 dl 1.102 if (delta != 0L)
1190     addCount(delta, -1);
1191     }
1192    
1193     /**
1194     * Returns a {@link Set} view of the keys contained in this map.
1195     * The set is backed by the map, so changes to the map are
1196     * reflected in the set, and vice-versa. The set supports element
1197     * removal, which removes the corresponding mapping from this map,
1198     * via the {@code Iterator.remove}, {@code Set.remove},
1199     * {@code removeAll}, {@code retainAll}, and {@code clear}
1200     * operations. It does not support the {@code add} or
1201     * {@code addAll} operations.
1202     *
1203     * <p>The view's {@code iterator} is a "weakly consistent" iterator
1204     * that will never throw {@link ConcurrentModificationException},
1205     * and guarantees to traverse elements as they existed upon
1206     * construction of the iterator, and may (but is not guaranteed to)
1207     * reflect any modifications subsequent to construction.
1208     *
1209     * @return the set view
1210     */
1211     public KeySetView<K,V> keySet() {
1212     KeySetView<K,V> ks;
1213     return (ks = keySet) != null ? ks : (keySet = new KeySetView<K,V>(this, null));
1214     }
1215    
1216     /**
1217     * Returns a {@link Collection} view of the values contained in this map.
1218     * The collection is backed by the map, so changes to the map are
1219     * reflected in the collection, and vice-versa. The collection
1220     * supports element removal, which removes the corresponding
1221     * mapping from this map, via the {@code Iterator.remove},
1222     * {@code Collection.remove}, {@code removeAll},
1223     * {@code retainAll}, and {@code clear} operations. It does not
1224     * support the {@code add} or {@code addAll} operations.
1225     *
1226     * <p>The view's {@code iterator} is a "weakly consistent" iterator
1227     * that will never throw {@link ConcurrentModificationException},
1228     * and guarantees to traverse elements as they existed upon
1229     * construction of the iterator, and may (but is not guaranteed to)
1230     * reflect any modifications subsequent to construction.
1231     *
1232     * @return the collection view
1233     */
1234     public Collection<V> values() {
1235     ValuesView<K,V> vs;
1236     return (vs = values) != null ? vs : (values = new ValuesView<K,V>(this));
1237     }
1238    
1239     /**
1240     * Returns a {@link Set} view of the mappings contained in this map.
1241     * The set is backed by the map, so changes to the map are
1242     * reflected in the set, and vice-versa. The set supports element
1243     * removal, which removes the corresponding mapping from the map,
1244     * via the {@code Iterator.remove}, {@code Set.remove},
1245     * {@code removeAll}, {@code retainAll}, and {@code clear}
1246     * operations.
1247     *
1248     * <p>The view's {@code iterator} is a "weakly consistent" iterator
1249     * that will never throw {@link ConcurrentModificationException},
1250     * and guarantees to traverse elements as they existed upon
1251     * construction of the iterator, and may (but is not guaranteed to)
1252     * reflect any modifications subsequent to construction.
1253     *
1254     * @return the set view
1255     */
1256     public Set<Map.Entry<K,V>> entrySet() {
1257     EntrySetView<K,V> es;
1258     return (es = entrySet) != null ? es : (entrySet = new EntrySetView<K,V>(this));
1259 dl 1.27 }
1260    
1261 dl 1.102 /**
1262     * Returns the hash code value for this {@link Map}, i.e.,
1263     * the sum of, for each key-value pair in the map,
1264     * {@code key.hashCode() ^ value.hashCode()}.
1265     *
1266     * @return the hash code value for this map
1267 dl 1.27 */
1268 dl 1.102 public int hashCode() {
1269     int h = 0;
1270     Node<K,V>[] t;
1271     if ((t = table) != null) {
1272     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1273     for (Node<K,V> p; (p = it.advance()) != null; )
1274     h += p.key.hashCode() ^ p.val.hashCode();
1275     }
1276     return h;
1277     }
1278 dl 1.27
1279 dl 1.102 /**
1280     * Returns a string representation of this map. The string
1281     * representation consists of a list of key-value mappings (in no
1282     * particular order) enclosed in braces ("{@code {}}"). Adjacent
1283     * mappings are separated by the characters {@code ", "} (comma
1284     * and space). Each key-value mapping is rendered as the key
1285     * followed by an equals sign ("{@code =}") followed by the
1286     * associated value.
1287     *
1288     * @return a string representation of this map
1289     */
1290     public String toString() {
1291     Node<K,V>[] t;
1292     int f = (t = table) == null ? 0 : t.length;
1293     Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1294     StringBuilder sb = new StringBuilder();
1295     sb.append('{');
1296     Node<K,V> p;
1297     if ((p = it.advance()) != null) {
1298     for (;;) {
1299     K k = p.key;
1300     V v = p.val;
1301     sb.append(k == this ? "(this Map)" : k);
1302     sb.append('=');
1303     sb.append(v == this ? "(this Map)" : v);
1304     if ((p = it.advance()) == null)
1305     break;
1306     sb.append(',').append(' ');
1307 dl 1.14 }
1308 dl 1.102 }
1309     return sb.append('}').toString();
1310     }
1311    
1312     /**
1313     * Compares the specified object with this map for equality.
1314     * Returns {@code true} if the given object is a map with the same
1315     * mappings as this map. This operation may return misleading
1316     * results if either map is concurrently modified during execution
1317     * of this method.
1318     *
1319     * @param o object to be compared for equality with this map
1320     * @return {@code true} if the specified object is equal to this map
1321     */
1322     public boolean equals(Object o) {
1323     if (o != this) {
1324     if (!(o instanceof Map))
1325     return false;
1326     Map<?,?> m = (Map<?,?>) o;
1327     Node<K,V>[] t;
1328     int f = (t = table) == null ? 0 : t.length;
1329     Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1330     for (Node<K,V> p; (p = it.advance()) != null; ) {
1331     V val = p.val;
1332     Object v = m.get(p.key);
1333     if (v == null || (v != val && !v.equals(val)))
1334     return false;
1335 dl 1.38 }
1336 dl 1.102 for (Map.Entry<?,?> e : m.entrySet()) {
1337     Object mk, mv, v;
1338     if ((mk = e.getKey()) == null ||
1339     (mv = e.getValue()) == null ||
1340     (v = get(mk)) == null ||
1341     (mv != v && !mv.equals(v)))
1342     return false;
1343 dl 1.1 }
1344     }
1345 dl 1.102 return true;
1346     }
1347    
1348     /**
1349     * Stripped-down version of helper class used in previous version,
1350     * declared for the sake of serialization compatibility
1351     */
1352     static class Segment<K,V> extends ReentrantLock implements Serializable {
1353     private static final long serialVersionUID = 2249069246763182397L;
1354     final float loadFactor;
1355     Segment(float lf) { this.loadFactor = lf; }
1356 dl 1.27 }
1357    
1358 dl 1.102 /**
1359     * Saves the state of the {@code ConcurrentHashMapV8} instance to a
1360     * stream (i.e., serializes it).
1361     * @param s the stream
1362     * @serialData
1363     * the key (Object) and value (Object)
1364     * for each key-value mapping, followed by a null pair.
1365     * The key-value mappings are emitted in no particular order.
1366     */
1367     private void writeObject(java.io.ObjectOutputStream s)
1368     throws java.io.IOException {
1369     // For serialization compatibility
1370     // Emulate segment calculation from previous version of this class
1371     int sshift = 0;
1372     int ssize = 1;
1373     while (ssize < DEFAULT_CONCURRENCY_LEVEL) {
1374     ++sshift;
1375     ssize <<= 1;
1376     }
1377     int segmentShift = 32 - sshift;
1378     int segmentMask = ssize - 1;
1379     @SuppressWarnings("unchecked") Segment<K,V>[] segments = (Segment<K,V>[])
1380     new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
1381     for (int i = 0; i < segments.length; ++i)
1382     segments[i] = new Segment<K,V>(LOAD_FACTOR);
1383     s.putFields().put("segments", segments);
1384     s.putFields().put("segmentShift", segmentShift);
1385     s.putFields().put("segmentMask", segmentMask);
1386     s.writeFields();
1387    
1388     Node<K,V>[] t;
1389     if ((t = table) != null) {
1390     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1391     for (Node<K,V> p; (p = it.advance()) != null; ) {
1392     s.writeObject(p.key);
1393     s.writeObject(p.val);
1394 dl 1.27 }
1395 dl 1.102 }
1396     s.writeObject(null);
1397     s.writeObject(null);
1398     segments = null; // throw away
1399     }
1400    
1401     /**
1402     * Reconstitutes the instance from a stream (that is, deserializes it).
1403     * @param s the stream
1404     */
1405     private void readObject(java.io.ObjectInputStream s)
1406     throws java.io.IOException, ClassNotFoundException {
1407     /*
1408     * To improve performance in typical cases, we create nodes
1409     * while reading, then place in table once size is known.
1410     * However, we must also validate uniqueness and deal with
1411     * overpopulated bins while doing so, which requires
1412     * specialized versions of putVal mechanics.
1413     */
1414     sizeCtl = -1; // force exclusion for table construction
1415     s.defaultReadObject();
1416     long size = 0L;
1417     Node<K,V> p = null;
1418     for (;;) {
1419     @SuppressWarnings("unchecked") K k = (K) s.readObject();
1420     @SuppressWarnings("unchecked") V v = (V) s.readObject();
1421     if (k != null && v != null) {
1422     p = new Node<K,V>(spread(k.hashCode()), k, v, p);
1423     ++size;
1424 dl 1.38 }
1425 dl 1.102 else
1426     break;
1427     }
1428     if (size == 0L)
1429     sizeCtl = 0;
1430     else {
1431     int n;
1432     if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
1433     n = MAXIMUM_CAPACITY;
1434 dl 1.27 else {
1435 dl 1.102 int sz = (int)size;
1436     n = tableSizeFor(sz + (sz >>> 1) + 1);
1437     }
1438     @SuppressWarnings({"rawtypes","unchecked"})
1439     Node<K,V>[] tab = (Node<K,V>[])new Node[n];
1440     int mask = n - 1;
1441     long added = 0L;
1442     while (p != null) {
1443     boolean insertAtFront;
1444     Node<K,V> next = p.next, first;
1445     int h = p.hash, j = h & mask;
1446     if ((first = tabAt(tab, j)) == null)
1447     insertAtFront = true;
1448     else {
1449     K k = p.key;
1450     if (first.hash < 0) {
1451     TreeBin<K,V> t = (TreeBin<K,V>)first;
1452     if (t.putTreeVal(h, k, p.val) == null)
1453     ++added;
1454     insertAtFront = false;
1455     }
1456     else {
1457     int binCount = 0;
1458     insertAtFront = true;
1459     Node<K,V> q; K qk;
1460     for (q = first; q != null; q = q.next) {
1461     if (q.hash == h &&
1462     ((qk = q.key) == k ||
1463     (qk != null && k.equals(qk)))) {
1464     insertAtFront = false;
1465 dl 1.82 break;
1466     }
1467 dl 1.102 ++binCount;
1468     }
1469     if (insertAtFront && binCount >= TREEIFY_THRESHOLD) {
1470     insertAtFront = false;
1471     ++added;
1472     p.next = first;
1473     TreeNode<K,V> hd = null, tl = null;
1474     for (q = p; q != null; q = q.next) {
1475     TreeNode<K,V> t = new TreeNode<K,V>
1476     (q.hash, q.key, q.val, null, null);
1477     if ((t.prev = tl) == null)
1478     hd = t;
1479     else
1480     tl.next = t;
1481     tl = t;
1482 dl 1.27 }
1483 dl 1.102 setTabAt(tab, j, new TreeBin<K,V>(hd));
1484 dl 1.27 }
1485 dl 1.38 }
1486 dl 1.82 }
1487 dl 1.102 if (insertAtFront) {
1488     ++added;
1489     p.next = first;
1490     setTabAt(tab, j, p);
1491     }
1492     p = next;
1493     }
1494     table = tab;
1495     sizeCtl = n - (n >>> 2);
1496     baseCount = added;
1497     }
1498     }
1499    
1500     // ConcurrentMap methods
1501    
1502     /**
1503     * {@inheritDoc}
1504     *
1505     * @return the previous value associated with the specified key,
1506     * or {@code null} if there was no mapping for the key
1507     * @throws NullPointerException if the specified key or value is null
1508     */
1509     public V putIfAbsent(K key, V value) {
1510     return putVal(key, value, true);
1511     }
1512    
1513     /**
1514     * {@inheritDoc}
1515     *
1516     * @throws NullPointerException if the specified key is null
1517     */
1518     public boolean remove(Object key, Object value) {
1519     if (key == null)
1520     throw new NullPointerException();
1521     return value != null && replaceNode(key, null, value) != null;
1522     }
1523    
1524     /**
1525     * {@inheritDoc}
1526     *
1527     * @throws NullPointerException if any of the arguments are null
1528     */
1529     public boolean replace(K key, V oldValue, V newValue) {
1530     if (key == null || oldValue == null || newValue == null)
1531     throw new NullPointerException();
1532     return replaceNode(key, newValue, oldValue) != null;
1533     }
1534    
1535     /**
1536     * {@inheritDoc}
1537     *
1538     * @return the previous value associated with the specified key,
1539     * or {@code null} if there was no mapping for the key
1540     * @throws NullPointerException if the specified key or value is null
1541     */
1542     public V replace(K key, V value) {
1543     if (key == null || value == null)
1544     throw new NullPointerException();
1545     return replaceNode(key, value, null);
1546     }
1547    
1548     // Overrides of JDK8+ Map extension method defaults
1549    
1550     /**
1551     * Returns the value to which the specified key is mapped, or the
1552     * given default value if this map contains no mapping for the
1553     * key.
1554     *
1555     * @param key the key whose associated value is to be returned
1556     * @param defaultValue the value to return if this map contains
1557     * no mapping for the given key
1558     * @return the mapping for the key, if present; else the default value
1559     * @throws NullPointerException if the specified key is null
1560     */
1561     public V getOrDefault(Object key, V defaultValue) {
1562     V v;
1563     return (v = get(key)) == null ? defaultValue : v;
1564     }
1565    
1566     public void forEach(BiAction<? super K, ? super V> action) {
1567     if (action == null) throw new NullPointerException();
1568     Node<K,V>[] t;
1569     if ((t = table) != null) {
1570     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1571     for (Node<K,V> p; (p = it.advance()) != null; ) {
1572     action.apply(p.key, p.val);
1573     }
1574     }
1575     }
1576    
1577     public void replaceAll(BiFun<? super K, ? super V, ? extends V> function) {
1578     if (function == null) throw new NullPointerException();
1579     Node<K,V>[] t;
1580     if ((t = table) != null) {
1581     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1582     for (Node<K,V> p; (p = it.advance()) != null; ) {
1583     V oldValue = p.val;
1584     for (K key = p.key;;) {
1585     V newValue = function.apply(key, oldValue);
1586     if (newValue == null)
1587     throw new NullPointerException();
1588     if (replaceNode(key, newValue, oldValue) != null ||
1589     (oldValue = get(key)) == null)
1590     break;
1591 dl 1.1 }
1592     }
1593     }
1594     }
1595    
1596 dl 1.102 /**
1597     * If the specified key is not already associated with a value,
1598     * attempts to compute its value using the given mapping function
1599     * and enters it into this map unless {@code null}. The entire
1600     * method invocation is performed atomically, so the function is
1601     * applied at most once per key. Some attempted update operations
1602     * on this map by other threads may be blocked while computation
1603     * is in progress, so the computation should be short and simple,
1604     * and must not attempt to update any other mappings of this map.
1605     *
1606     * @param key key with which the specified value is to be associated
1607     * @param mappingFunction the function to compute a value
1608     * @return the current (existing or computed) value associated with
1609     * the specified key, or null if the computed value is null
1610     * @throws NullPointerException if the specified key or mappingFunction
1611     * is null
1612     * @throws IllegalStateException if the computation detectably
1613     * attempts a recursive update to this map that would
1614     * otherwise never complete
1615     * @throws RuntimeException or Error if the mappingFunction does so,
1616     * in which case the mapping is left unestablished
1617     */
1618     public V computeIfAbsent(K key, Fun<? super K, ? extends V> mappingFunction) {
1619     if (key == null || mappingFunction == null)
1620 dl 1.82 throw new NullPointerException();
1621 dl 1.102 int h = spread(key.hashCode());
1622 dl 1.84 V val = null;
1623 dl 1.102 int binCount = 0;
1624     for (Node<K,V>[] tab = table;;) {
1625     Node<K,V> f; int n, i, fh;
1626     if (tab == null || (n = tab.length) == 0)
1627 dl 1.24 tab = initTable();
1628 dl 1.102 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1629     Node<K,V> r = new ReservationNode<K,V>();
1630     synchronized (r) {
1631     if (casTabAt(tab, i, null, r)) {
1632     binCount = 1;
1633     Node<K,V> node = null;
1634 dl 1.82 try {
1635 dl 1.102 if ((val = mappingFunction.apply(key)) != null)
1636     node = new Node<K,V>(h, key, val, null);
1637 dl 1.82 } finally {
1638 dl 1.102 setTabAt(tab, i, node);
1639 dl 1.1 }
1640     }
1641     }
1642 dl 1.102 if (binCount != 0)
1643 dl 1.10 break;
1644 dl 1.1 }
1645 dl 1.102 else if ((fh = f.hash) == MOVED)
1646     tab = helpTransfer(tab, f);
1647     else {
1648     boolean added = false;
1649     synchronized (f) {
1650     if (tabAt(tab, i) == f) {
1651     if (fh >= 0) {
1652     binCount = 1;
1653     for (Node<K,V> e = f;; ++binCount) {
1654     K ek; V ev;
1655     if (e.hash == h &&
1656     ((ek = e.key) == key ||
1657     (ek != null && key.equals(ek)))) {
1658     val = e.val;
1659     break;
1660     }
1661     Node<K,V> pred = e;
1662     if ((e = e.next) == null) {
1663     if ((val = mappingFunction.apply(key)) != null) {
1664     added = true;
1665     pred.next = new Node<K,V>(h, key, val, null);
1666     }
1667     break;
1668 dl 1.38 }
1669     }
1670 dl 1.102 }
1671     else if (f instanceof TreeBin) {
1672     binCount = 2;
1673     TreeBin<K,V> t = (TreeBin<K,V>)f;
1674     TreeNode<K,V> r, p;
1675     if ((r = t.root) != null &&
1676     (p = r.findTreeNode(h, key, null)) != null)
1677     val = p.val;
1678     else if ((val = mappingFunction.apply(key)) != null) {
1679     added = true;
1680     t.putTreeVal(h, key, val);
1681 dl 1.41 }
1682 dl 1.38 }
1683     }
1684     }
1685 dl 1.102 if (binCount != 0) {
1686     if (binCount >= TREEIFY_THRESHOLD)
1687     treeifyBin(tab, i);
1688     if (!added)
1689     return val;
1690     break;
1691     }
1692 dl 1.38 }
1693 dl 1.102 }
1694     if (val != null)
1695     addCount(1L, binCount);
1696     return val;
1697     }
1698    
1699     /**
1700     * If the value for the specified key is present, attempts to
1701     * compute a new mapping given the key and its current mapped
1702     * value. The entire method invocation is performed atomically.
1703     * Some attempted update operations on this map by other threads
1704     * may be blocked while computation is in progress, so the
1705     * computation should be short and simple, and must not attempt to
1706     * update any other mappings of this map.
1707     *
1708     * @param key key with which a value may be associated
1709     * @param remappingFunction the function to compute a value
1710     * @return the new value associated with the specified key, or null if none
1711     * @throws NullPointerException if the specified key or remappingFunction
1712     * is null
1713     * @throws IllegalStateException if the computation detectably
1714     * attempts a recursive update to this map that would
1715     * otherwise never complete
1716     * @throws RuntimeException or Error if the remappingFunction does so,
1717     * in which case the mapping is unchanged
1718     */
1719     public V computeIfPresent(K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
1720     if (key == null || remappingFunction == null)
1721     throw new NullPointerException();
1722     int h = spread(key.hashCode());
1723     V val = null;
1724     int delta = 0;
1725     int binCount = 0;
1726     for (Node<K,V>[] tab = table;;) {
1727     Node<K,V> f; int n, i, fh;
1728     if (tab == null || (n = tab.length) == 0)
1729     tab = initTable();
1730     else if ((f = tabAt(tab, i = (n - 1) & h)) == null)
1731     break;
1732     else if ((fh = f.hash) == MOVED)
1733     tab = helpTransfer(tab, f);
1734 dl 1.82 else {
1735 jsr166 1.83 synchronized (f) {
1736 dl 1.24 if (tabAt(tab, i) == f) {
1737 dl 1.102 if (fh >= 0) {
1738     binCount = 1;
1739     for (Node<K,V> e = f, pred = null;; ++binCount) {
1740     K ek;
1741     if (e.hash == h &&
1742     ((ek = e.key) == key ||
1743     (ek != null && key.equals(ek)))) {
1744     val = remappingFunction.apply(key, e.val);
1745     if (val != null)
1746     e.val = val;
1747     else {
1748     delta = -1;
1749     Node<K,V> en = e.next;
1750     if (pred != null)
1751     pred.next = en;
1752     else
1753     setTabAt(tab, i, en);
1754     }
1755     break;
1756     }
1757     pred = e;
1758     if ((e = e.next) == null)
1759     break;
1760     }
1761     }
1762     else if (f instanceof TreeBin) {
1763     binCount = 2;
1764     TreeBin<K,V> t = (TreeBin<K,V>)f;
1765     TreeNode<K,V> r, p;
1766     if ((r = t.root) != null &&
1767     (p = r.findTreeNode(h, key, null)) != null) {
1768     val = remappingFunction.apply(key, p.val);
1769 dl 1.27 if (val != null)
1770 dl 1.102 p.val = val;
1771 dl 1.41 else {
1772     delta = -1;
1773 dl 1.102 if (t.removeTreeNode(p))
1774     setTabAt(tab, i, untreeify(t.first));
1775 dl 1.1 }
1776     }
1777     }
1778     }
1779     }
1780 dl 1.102 if (binCount != 0)
1781 dl 1.10 break;
1782 dl 1.1 }
1783 dl 1.10 }
1784 dl 1.82 if (delta != 0)
1785 dl 1.102 addCount((long)delta, binCount);
1786 dl 1.84 return val;
1787 dl 1.1 }
1788    
1789 dl 1.102 /**
1790     * Attempts to compute a mapping for the specified key and its
1791     * current mapped value (or {@code null} if there is no current
1792     * mapping). The entire method invocation is performed atomically.
1793     * Some attempted update operations on this map by other threads
1794     * may be blocked while computation is in progress, so the
1795     * computation should be short and simple, and must not attempt to
1796     * update any other mappings of this Map.
1797     *
1798     * @param key key with which the specified value is to be associated
1799     * @param remappingFunction the function to compute a value
1800     * @return the new value associated with the specified key, or null if none
1801     * @throws NullPointerException if the specified key or remappingFunction
1802     * is null
1803     * @throws IllegalStateException if the computation detectably
1804     * attempts a recursive update to this map that would
1805     * otherwise never complete
1806     * @throws RuntimeException or Error if the remappingFunction does so,
1807     * in which case the mapping is unchanged
1808     */
1809     public V compute(K key,
1810     BiFun<? super K, ? super V, ? extends V> remappingFunction) {
1811     if (key == null || remappingFunction == null)
1812 dl 1.82 throw new NullPointerException();
1813 dl 1.102 int h = spread(key.hashCode());
1814 dl 1.84 V val = null;
1815 dl 1.52 int delta = 0;
1816 dl 1.102 int binCount = 0;
1817     for (Node<K,V>[] tab = table;;) {
1818     Node<K,V> f; int n, i, fh;
1819     if (tab == null || (n = tab.length) == 0)
1820 dl 1.52 tab = initTable();
1821 dl 1.102 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1822     Node<K,V> r = new ReservationNode<K,V>();
1823     synchronized (r) {
1824     if (casTabAt(tab, i, null, r)) {
1825     binCount = 1;
1826     Node<K,V> node = null;
1827     try {
1828     if ((val = remappingFunction.apply(key, null)) != null) {
1829     delta = 1;
1830     node = new Node<K,V>(h, key, val, null);
1831     }
1832     } finally {
1833     setTabAt(tab, i, node);
1834     }
1835     }
1836     }
1837     if (binCount != 0)
1838 dl 1.52 break;
1839     }
1840 dl 1.102 else if ((fh = f.hash) == MOVED)
1841     tab = helpTransfer(tab, f);
1842     else {
1843     synchronized (f) {
1844     if (tabAt(tab, i) == f) {
1845     if (fh >= 0) {
1846     binCount = 1;
1847     for (Node<K,V> e = f, pred = null;; ++binCount) {
1848     K ek;
1849     if (e.hash == h &&
1850     ((ek = e.key) == key ||
1851     (ek != null && key.equals(ek)))) {
1852     val = remappingFunction.apply(key, e.val);
1853     if (val != null)
1854     e.val = val;
1855     else {
1856     delta = -1;
1857     Node<K,V> en = e.next;
1858     if (pred != null)
1859     pred.next = en;
1860     else
1861     setTabAt(tab, i, en);
1862     }
1863     break;
1864     }
1865     pred = e;
1866     if ((e = e.next) == null) {
1867     val = remappingFunction.apply(key, null);
1868     if (val != null) {
1869     delta = 1;
1870     pred.next =
1871     new Node<K,V>(h, key, val, null);
1872     }
1873     break;
1874     }
1875     }
1876     }
1877     else if (f instanceof TreeBin) {
1878     binCount = 1;
1879     TreeBin<K,V> t = (TreeBin<K,V>)f;
1880     TreeNode<K,V> r, p;
1881     if ((r = t.root) != null)
1882     p = r.findTreeNode(h, key, null);
1883     else
1884     p = null;
1885     V pv = (p == null) ? null : p.val;
1886     val = remappingFunction.apply(key, pv);
1887 dl 1.52 if (val != null) {
1888     if (p != null)
1889     p.val = val;
1890     else {
1891     delta = 1;
1892 dl 1.102 t.putTreeVal(h, key, val);
1893 dl 1.52 }
1894     }
1895     else if (p != null) {
1896     delta = -1;
1897 dl 1.102 if (t.removeTreeNode(p))
1898     setTabAt(tab, i, untreeify(t.first));
1899 dl 1.52 }
1900     }
1901     }
1902     }
1903 dl 1.102 if (binCount != 0) {
1904     if (binCount >= TREEIFY_THRESHOLD)
1905     treeifyBin(tab, i);
1906     break;
1907 dl 1.52 }
1908     }
1909     }
1910 dl 1.82 if (delta != 0)
1911 dl 1.102 addCount((long)delta, binCount);
1912 dl 1.84 return val;
1913 dl 1.52 }
1914    
1915 dl 1.102 /**
1916     * If the specified key is not already associated with a
1917     * (non-null) value, associates it with the given value.
1918     * Otherwise, replaces the value with the results of the given
1919     * remapping function, or removes if {@code null}. The entire
1920     * method invocation is performed atomically. Some attempted
1921     * update operations on this map by other threads may be blocked
1922     * while computation is in progress, so the computation should be
1923     * short and simple, and must not attempt to update any other
1924     * mappings of this Map.
1925     *
1926     * @param key key with which the specified value is to be associated
1927     * @param value the value to use if absent
1928     * @param remappingFunction the function to recompute a value if present
1929     * @return the new value associated with the specified key, or null if none
1930     * @throws NullPointerException if the specified key or the
1931     * remappingFunction is null
1932     * @throws RuntimeException or Error if the remappingFunction does so,
1933     * in which case the mapping is unchanged
1934     */
1935     public V merge(K key, V value, BiFun<? super V, ? super V, ? extends V> remappingFunction) {
1936     if (key == null || value == null || remappingFunction == null)
1937     throw new NullPointerException();
1938     int h = spread(key.hashCode());
1939     V val = null;
1940     int delta = 0;
1941     int binCount = 0;
1942     for (Node<K,V>[] tab = table;;) {
1943     Node<K,V> f; int n, i, fh;
1944     if (tab == null || (n = tab.length) == 0)
1945     tab = initTable();
1946     else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1947     if (casTabAt(tab, i, null, new Node<K,V>(h, key, value, null))) {
1948     delta = 1;
1949     val = value;
1950 dl 1.27 break;
1951     }
1952 dl 1.102 }
1953     else if ((fh = f.hash) == MOVED)
1954     tab = helpTransfer(tab, f);
1955     else {
1956     synchronized (f) {
1957     if (tabAt(tab, i) == f) {
1958     if (fh >= 0) {
1959     binCount = 1;
1960     for (Node<K,V> e = f, pred = null;; ++binCount) {
1961     K ek;
1962     if (e.hash == h &&
1963     ((ek = e.key) == key ||
1964     (ek != null && key.equals(ek)))) {
1965     val = remappingFunction.apply(e.val, value);
1966     if (val != null)
1967     e.val = val;
1968 dl 1.38 else {
1969 dl 1.102 delta = -1;
1970     Node<K,V> en = e.next;
1971     if (pred != null)
1972     pred.next = en;
1973     else
1974     setTabAt(tab, i, en);
1975 dl 1.38 }
1976 dl 1.102 break;
1977     }
1978     pred = e;
1979     if ((e = e.next) == null) {
1980     delta = 1;
1981     val = value;
1982     pred.next =
1983     new Node<K,V>(h, key, val, null);
1984     break;
1985 dl 1.38 }
1986     }
1987     }
1988 dl 1.102 else if (f instanceof TreeBin) {
1989     binCount = 2;
1990     TreeBin<K,V> t = (TreeBin<K,V>)f;
1991     TreeNode<K,V> r = t.root;
1992     TreeNode<K,V> p = (r == null) ? null :
1993     r.findTreeNode(h, key, null);
1994     val = (p == null) ? value :
1995     remappingFunction.apply(p.val, value);
1996     if (val != null) {
1997     if (p != null)
1998     p.val = val;
1999     else {
2000     delta = 1;
2001     t.putTreeVal(h, key, val);
2002 dl 1.27 }
2003     }
2004 dl 1.102 else if (p != null) {
2005     delta = -1;
2006     if (t.removeTreeNode(p))
2007     setTabAt(tab, i, untreeify(t.first));
2008 dl 1.90 }
2009 dl 1.24 }
2010     }
2011 dl 1.1 }
2012 dl 1.102 if (binCount != 0) {
2013     if (binCount >= TREEIFY_THRESHOLD)
2014     treeifyBin(tab, i);
2015     break;
2016     }
2017 dl 1.1 }
2018     }
2019 dl 1.102 if (delta != 0)
2020     addCount((long)delta, binCount);
2021     return val;
2022 dl 1.1 }
2023    
2024 dl 1.102 // Hashtable legacy methods
2025    
2026 dl 1.82 /**
2027 dl 1.102 * Legacy method testing if some key maps into the specified value
2028     * in this table. This method is identical in functionality to
2029     * {@link #containsValue(Object)}, and exists solely to ensure
2030     * full compatibility with class {@link java.util.Hashtable},
2031     * which supported this method prior to introduction of the
2032     * Java Collections framework.
2033     *
2034     * @param value a value to search for
2035     * @return {@code true} if and only if some key maps to the
2036     * {@code value} argument in this table as
2037     * determined by the {@code equals} method;
2038     * {@code false} otherwise
2039     * @throws NullPointerException if the specified value is null
2040     */
2041     @Deprecated public boolean contains(Object value) {
2042     return containsValue(value);
2043     }
2044    
2045     /**
2046     * Returns an enumeration of the keys in this table.
2047     *
2048     * @return an enumeration of the keys in this table
2049     * @see #keySet()
2050     */
2051     public Enumeration<K> keys() {
2052     Node<K,V>[] t;
2053     int f = (t = table) == null ? 0 : t.length;
2054     return new KeyIterator<K,V>(t, f, 0, f, this);
2055     }
2056    
2057     /**
2058     * Returns an enumeration of the values in this table.
2059     *
2060     * @return an enumeration of the values in this table
2061     * @see #values()
2062     */
2063     public Enumeration<V> elements() {
2064     Node<K,V>[] t;
2065     int f = (t = table) == null ? 0 : t.length;
2066     return new ValueIterator<K,V>(t, f, 0, f, this);
2067     }
2068    
2069     // ConcurrentHashMapV8-only methods
2070    
2071     /**
2072     * Returns the number of mappings. This method should be used
2073     * instead of {@link #size} because a ConcurrentHashMapV8 may
2074     * contain more mappings than can be represented as an int. The
2075     * value returned is an estimate; the actual count may differ if
2076     * there are concurrent insertions or removals.
2077     *
2078     * @return the number of mappings
2079     * @since 1.8
2080     */
2081     public long mappingCount() {
2082     long n = sumCount();
2083     return (n < 0L) ? 0L : n; // ignore transient negative values
2084     }
2085    
2086     /**
2087     * Creates a new {@link Set} backed by a ConcurrentHashMapV8
2088     * from the given type to {@code Boolean.TRUE}.
2089     *
2090     * @return the new set
2091     * @since 1.8
2092     */
2093     public static <K> KeySetView<K,Boolean> newKeySet() {
2094     return new KeySetView<K,Boolean>
2095     (new ConcurrentHashMapV8<K,Boolean>(), Boolean.TRUE);
2096     }
2097    
2098     /**
2099     * Creates a new {@link Set} backed by a ConcurrentHashMapV8
2100     * from the given type to {@code Boolean.TRUE}.
2101     *
2102     * @param initialCapacity The implementation performs internal
2103     * sizing to accommodate this many elements.
2104     * @throws IllegalArgumentException if the initial capacity of
2105     * elements is negative
2106     * @return the new set
2107     * @since 1.8
2108     */
2109     public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2110     return new KeySetView<K,Boolean>
2111     (new ConcurrentHashMapV8<K,Boolean>(initialCapacity), Boolean.TRUE);
2112     }
2113    
2114     /**
2115     * Returns a {@link Set} view of the keys in this map, using the
2116     * given common mapped value for any additions (i.e., {@link
2117     * Collection#add} and {@link Collection#addAll(Collection)}).
2118     * This is of course only appropriate if it is acceptable to use
2119     * the same value for all additions from this view.
2120     *
2121     * @param mappedValue the mapped value to use for any additions
2122     * @return the set view
2123     * @throws NullPointerException if the mappedValue is null
2124 dl 1.82 */
2125 dl 1.102 public KeySetView<K,V> keySet(V mappedValue) {
2126     if (mappedValue == null)
2127     throw new NullPointerException();
2128     return new KeySetView<K,V>(this, mappedValue);
2129     }
2130    
2131     /* ---------------- Special Nodes -------------- */
2132    
2133     /**
2134     * A node inserted at head of bins during transfer operations.
2135     */
2136     static final class ForwardingNode<K,V> extends Node<K,V> {
2137     final Node<K,V>[] nextTable;
2138     ForwardingNode(Node<K,V>[] tab) {
2139     super(MOVED, null, null, null);
2140     this.nextTable = tab;
2141     }
2142    
2143     Node<K,V> find(int h, Object k) {
2144     Node<K,V> e; int n;
2145     Node<K,V>[] tab = nextTable;
2146     if (k != null && tab != null && (n = tab.length) > 0 &&
2147     (e = tabAt(tab, (n - 1) & h)) != null) {
2148     do {
2149     int eh; K ek;
2150     if ((eh = e.hash) == h &&
2151     ((ek = e.key) == k || (ek != null && k.equals(ek))))
2152     return e;
2153     if (eh < 0)
2154     return e.find(h, k);
2155     } while ((e = e.next) != null);
2156 dl 1.82 }
2157 dl 1.102 return null;
2158 dl 1.82 }
2159     }
2160    
2161 dl 1.24 /**
2162 dl 1.102 * A place-holder node used in computeIfAbsent and compute
2163 dl 1.24 */
2164 dl 1.102 static final class ReservationNode<K,V> extends Node<K,V> {
2165     ReservationNode() {
2166     super(RESERVED, null, null, null);
2167     }
2168    
2169     Node<K,V> find(int h, Object k) {
2170     return null;
2171     }
2172 dl 1.24 }
2173    
2174 dl 1.102 /* ---------------- Table Initialization and Resizing -------------- */
2175    
2176 dl 1.24 /**
2177     * Initializes table, using the size recorded in sizeCtl.
2178     */
2179 dl 1.102 private final Node<K,V>[] initTable() {
2180     Node<K,V>[] tab; int sc;
2181     while ((tab = table) == null || tab.length == 0) {
2182 dl 1.24 if ((sc = sizeCtl) < 0)
2183     Thread.yield(); // lost initialization race; just spin
2184 dl 1.82 else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
2185 dl 1.24 try {
2186 dl 1.102 if ((tab = table) == null || tab.length == 0) {
2187 dl 1.24 int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
2188 dl 1.102 @SuppressWarnings({"rawtypes","unchecked"})
2189     Node<K,V>[] nt = (Node<K,V>[])new Node[n];
2190     table = tab = nt;
2191 dl 1.27 sc = n - (n >>> 2);
2192 dl 1.24 }
2193     } finally {
2194     sizeCtl = sc;
2195     }
2196     break;
2197     }
2198     }
2199     return tab;
2200     }
2201    
2202     /**
2203 dl 1.82 * Adds to count, and if table is too small and not already
2204     * resizing, initiates transfer. If already resizing, helps
2205     * perform transfer if work is available. Rechecks occupancy
2206     * after a transfer to see if another resize is already needed
2207     * because resizings are lagging additions.
2208     *
2209     * @param x the count to add
2210     * @param check if <0, don't check resize, if <= 1 only check if uncontended
2211     */
2212     private final void addCount(long x, int check) {
2213     CounterCell[] as; long b, s;
2214     if ((as = counterCells) != null ||
2215     !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
2216     CounterHashCode hc; CounterCell a; long v; int m;
2217     boolean uncontended = true;
2218     if ((hc = threadCounterHashCode.get()) == null ||
2219     as == null || (m = as.length - 1) < 0 ||
2220     (a = as[m & hc.code]) == null ||
2221     !(uncontended =
2222     U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
2223     fullAddCount(x, hc, uncontended);
2224     return;
2225     }
2226     if (check <= 1)
2227     return;
2228     s = sumCount();
2229     }
2230     if (check >= 0) {
2231 dl 1.102 Node<K,V>[] tab, nt; int sc;
2232 dl 1.82 while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
2233     tab.length < MAXIMUM_CAPACITY) {
2234     if (sc < 0) {
2235     if (sc == -1 || transferIndex <= transferOrigin ||
2236     (nt = nextTable) == null)
2237     break;
2238     if (U.compareAndSwapInt(this, SIZECTL, sc, sc - 1))
2239     transfer(tab, nt);
2240 dl 1.24 }
2241 dl 1.82 else if (U.compareAndSwapInt(this, SIZECTL, sc, -2))
2242     transfer(tab, null);
2243     s = sumCount();
2244 dl 1.24 }
2245     }
2246     }
2247    
2248 dl 1.27 /**
2249 dl 1.102 * Helps transfer if a resize is in progress.
2250     */
2251     final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) {
2252     Node<K,V>[] nextTab; int sc;
2253     if ((f instanceof ForwardingNode) &&
2254     (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) {
2255     if (nextTab == nextTable && tab == table &&
2256     transferIndex > transferOrigin && (sc = sizeCtl) < -1 &&
2257     U.compareAndSwapInt(this, SIZECTL, sc, sc - 1))
2258     transfer(tab, nextTab);
2259     return nextTab;
2260     }
2261     return table;
2262     }
2263    
2264     /**
2265 dl 1.27 * Tries to presize table to accommodate the given number of elements.
2266     *
2267     * @param size number of elements (doesn't need to be perfectly accurate)
2268     */
2269 dl 1.102 private final void tryPresize(int size) {
2270 dl 1.27 int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
2271     tableSizeFor(size + (size >>> 1) + 1);
2272     int sc;
2273     while ((sc = sizeCtl) >= 0) {
2274 dl 1.102 Node<K,V>[] tab = table; int n;
2275 dl 1.27 if (tab == null || (n = tab.length) == 0) {
2276 jsr166 1.30 n = (sc > c) ? sc : c;
2277 dl 1.82 if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
2278 dl 1.27 try {
2279     if (table == tab) {
2280 dl 1.102 @SuppressWarnings({"rawtypes","unchecked"})
2281     Node<K,V>[] nt = (Node<K,V>[])new Node[n];
2282     table = nt;
2283 dl 1.27 sc = n - (n >>> 2);
2284     }
2285     } finally {
2286     sizeCtl = sc;
2287     }
2288     }
2289     }
2290     else if (c <= sc || n >= MAXIMUM_CAPACITY)
2291     break;
2292 dl 1.82 else if (tab == table &&
2293     U.compareAndSwapInt(this, SIZECTL, sc, -2))
2294     transfer(tab, null);
2295 dl 1.27 }
2296     }
2297    
2298 jsr166 1.92 /**
2299 dl 1.24 * Moves and/or copies the nodes in each bin to new table. See
2300     * above for explanation.
2301     */
2302 dl 1.102 private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
2303 dl 1.82 int n = tab.length, stride;
2304     if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
2305     stride = MIN_TRANSFER_STRIDE; // subdivide range
2306     if (nextTab == null) { // initiating
2307     try {
2308 dl 1.102 @SuppressWarnings({"rawtypes","unchecked"})
2309     Node<K,V>[] nt = (Node<K,V>[])new Node[n << 1];
2310     nextTab = nt;
2311 jsr166 1.83 } catch (Throwable ex) { // try to cope with OOME
2312 dl 1.82 sizeCtl = Integer.MAX_VALUE;
2313     return;
2314     }
2315     nextTable = nextTab;
2316     transferOrigin = n;
2317     transferIndex = n;
2318 dl 1.102 ForwardingNode<K,V> rev = new ForwardingNode<K,V>(tab);
2319 dl 1.82 for (int k = n; k > 0;) { // progressively reveal ready slots
2320 jsr166 1.83 int nextk = (k > stride) ? k - stride : 0;
2321 dl 1.82 for (int m = nextk; m < k; ++m)
2322     nextTab[m] = rev;
2323     for (int m = n + nextk; m < n + k; ++m)
2324     nextTab[m] = rev;
2325     U.putOrderedInt(this, TRANSFERORIGIN, k = nextk);
2326     }
2327     }
2328     int nextn = nextTab.length;
2329 dl 1.102 ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
2330 dl 1.82 boolean advance = true;
2331     for (int i = 0, bound = 0;;) {
2332 dl 1.102 int nextIndex, nextBound, fh; Node<K,V> f;
2333 dl 1.82 while (advance) {
2334     if (--i >= bound)
2335     advance = false;
2336     else if ((nextIndex = transferIndex) <= transferOrigin) {
2337     i = -1;
2338     advance = false;
2339     }
2340     else if (U.compareAndSwapInt
2341     (this, TRANSFERINDEX, nextIndex,
2342 jsr166 1.83 nextBound = (nextIndex > stride ?
2343 dl 1.82 nextIndex - stride : 0))) {
2344     bound = nextBound;
2345     i = nextIndex - 1;
2346     advance = false;
2347     }
2348     }
2349     if (i < 0 || i >= n || i + n >= nextn) {
2350     for (int sc;;) {
2351     if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, ++sc)) {
2352     if (sc == -1) {
2353     nextTable = null;
2354     table = nextTab;
2355     sizeCtl = (n << 1) - (n >>> 1);
2356     }
2357     return;
2358     }
2359     }
2360     }
2361     else if ((f = tabAt(tab, i)) == null) {
2362     if (casTabAt(tab, i, null, fwd)) {
2363 dl 1.24 setTabAt(nextTab, i, null);
2364     setTabAt(nextTab, i + n, null);
2365 dl 1.82 advance = true;
2366 dl 1.24 }
2367     }
2368 dl 1.102 else if ((fh = f.hash) == MOVED)
2369     advance = true; // already processed
2370     else {
2371 jsr166 1.83 synchronized (f) {
2372 dl 1.82 if (tabAt(tab, i) == f) {
2373 dl 1.102 Node<K,V> ln, hn;
2374     if (fh >= 0) {
2375     int runBit = fh & n;
2376     Node<K,V> lastRun = f;
2377     for (Node<K,V> p = f.next; p != null; p = p.next) {
2378     int b = p.hash & n;
2379     if (b != runBit) {
2380     runBit = b;
2381     lastRun = p;
2382     }
2383 dl 1.82 }
2384 dl 1.102 if (runBit == 0) {
2385     ln = lastRun;
2386     hn = null;
2387 dl 1.82 }
2388     else {
2389 dl 1.102 hn = lastRun;
2390     ln = null;
2391     }
2392     for (Node<K,V> p = f; p != lastRun; p = p.next) {
2393     int ph = p.hash; K pk = p.key; V pv = p.val;
2394     if ((ph & n) == 0)
2395     ln = new Node<K,V>(ph, pk, pv, ln);
2396     else
2397     hn = new Node<K,V>(ph, pk, pv, hn);
2398 dl 1.82 }
2399 dl 1.109 setTabAt(nextTab, i, ln);
2400     setTabAt(nextTab, i + n, hn);
2401     setTabAt(tab, i, fwd);
2402     advance = true;
2403 dl 1.82 }
2404 dl 1.102 else if (f instanceof TreeBin) {
2405     TreeBin<K,V> t = (TreeBin<K,V>)f;
2406     TreeNode<K,V> lo = null, loTail = null;
2407     TreeNode<K,V> hi = null, hiTail = null;
2408     int lc = 0, hc = 0;
2409     for (Node<K,V> e = t.first; e != null; e = e.next) {
2410     int h = e.hash;
2411     TreeNode<K,V> p = new TreeNode<K,V>
2412     (h, e.key, e.val, null, null);
2413     if ((h & n) == 0) {
2414     if ((p.prev = loTail) == null)
2415     lo = p;
2416     else
2417     loTail.next = p;
2418     loTail = p;
2419     ++lc;
2420     }
2421     else {
2422     if ((p.prev = hiTail) == null)
2423     hi = p;
2424     else
2425     hiTail.next = p;
2426     hiTail = p;
2427     ++hc;
2428     }
2429     }
2430 jsr166 1.104 ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
2431     (hc != 0) ? new TreeBin<K,V>(lo) : t;
2432     hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
2433     (lc != 0) ? new TreeBin<K,V>(hi) : t;
2434 dl 1.109 setTabAt(nextTab, i, ln);
2435     setTabAt(nextTab, i + n, hn);
2436     setTabAt(tab, i, fwd);
2437     advance = true;
2438 dl 1.82 }
2439 dl 1.24 }
2440     }
2441     }
2442     }
2443     }
2444    
2445 dl 1.102 /* ---------------- Conversion from/to TreeBins -------------- */
2446 dl 1.82
2447 dl 1.102 /**
2448     * Replaces all linked nodes in bin at given index unless table is
2449     * too small, in which case resizes instead.
2450     */
2451     private final void treeifyBin(Node<K,V>[] tab, int index) {
2452     Node<K,V> b; int n, sc;
2453     if (tab != null) {
2454     if ((n = tab.length) < MIN_TREEIFY_CAPACITY) {
2455     if (tab == table && (sc = sizeCtl) >= 0 &&
2456     U.compareAndSwapInt(this, SIZECTL, sc, -2))
2457     transfer(tab, null);
2458 dl 1.38 }
2459 dl 1.109 else if ((b = tabAt(tab, index)) != null && b.hash >= 0) {
2460 dl 1.102 synchronized (b) {
2461     if (tabAt(tab, index) == b) {
2462     TreeNode<K,V> hd = null, tl = null;
2463     for (Node<K,V> e = b; e != null; e = e.next) {
2464     TreeNode<K,V> p =
2465     new TreeNode<K,V>(e.hash, e.key, e.val,
2466     null, null);
2467     if ((p.prev = tl) == null)
2468     hd = p;
2469     else
2470     tl.next = p;
2471     tl = p;
2472 dl 1.38 }
2473 dl 1.102 setTabAt(tab, index, new TreeBin<K,V>(hd));
2474 dl 1.38 }
2475     }
2476 dl 1.82 }
2477 dl 1.27 }
2478     }
2479    
2480 dl 1.102 /**
2481 jsr166 1.105 * Returns a list on non-TreeNodes replacing those in given list.
2482 dl 1.102 */
2483     static <K,V> Node<K,V> untreeify(Node<K,V> b) {
2484     Node<K,V> hd = null, tl = null;
2485     for (Node<K,V> q = b; q != null; q = q.next) {
2486     Node<K,V> p = new Node<K,V>(q.hash, q.key, q.val, null);
2487     if (tl == null)
2488     hd = p;
2489     else
2490     tl.next = p;
2491     tl = p;
2492     }
2493     return hd;
2494     }
2495    
2496     /* ---------------- TreeNodes -------------- */
2497 dl 1.14
2498 dl 1.1 /**
2499 dl 1.102 * Nodes for use in TreeBins
2500 dl 1.14 */
2501 dl 1.102 static final class TreeNode<K,V> extends Node<K,V> {
2502     TreeNode<K,V> parent; // red-black tree links
2503     TreeNode<K,V> left;
2504     TreeNode<K,V> right;
2505     TreeNode<K,V> prev; // needed to unlink next upon deletion
2506     boolean red;
2507    
2508     TreeNode(int hash, K key, V val, Node<K,V> next,
2509     TreeNode<K,V> parent) {
2510     super(hash, key, val, next);
2511     this.parent = parent;
2512     }
2513    
2514     Node<K,V> find(int h, Object k) {
2515     return findTreeNode(h, k, null);
2516     }
2517    
2518     /**
2519     * Returns the TreeNode (or null if not found) for the given key
2520     * starting at given root.
2521     */
2522     final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) {
2523     if (k != null) {
2524     TreeNode<K,V> p = this;
2525     do {
2526     int ph, dir; K pk; TreeNode<K,V> q;
2527     TreeNode<K,V> pl = p.left, pr = p.right;
2528     if ((ph = p.hash) > h)
2529     p = pl;
2530     else if (ph < h)
2531     p = pr;
2532     else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2533     return p;
2534     else if (pl == null && pr == null)
2535     break;
2536 jsr166 1.103 else if ((kc != null ||
2537 dl 1.102 (kc = comparableClassFor(k)) != null) &&
2538     (dir = compareComparables(kc, k, pk)) != 0)
2539     p = (dir < 0) ? pl : pr;
2540     else if (pl == null)
2541     p = pr;
2542 jsr166 1.103 else if (pr == null ||
2543 dl 1.102 (q = pr.findTreeNode(h, k, kc)) == null)
2544     p = pl;
2545     else
2546     return q;
2547     } while (p != null);
2548     }
2549     return null;
2550     }
2551     }
2552    
2553     /* ---------------- TreeBins -------------- */
2554    
2555     /**
2556     * TreeNodes used at the heads of bins. TreeBins do not hold user
2557     * keys or values, but instead point to list of TreeNodes and
2558     * their root. They also maintain a parasitic read-write lock
2559     * forcing writers (who hold bin lock) to wait for readers (who do
2560     * not) to complete before tree restructuring operations.
2561     */
2562     static final class TreeBin<K,V> extends Node<K,V> {
2563     TreeNode<K,V> root;
2564     volatile TreeNode<K,V> first;
2565     volatile Thread waiter;
2566     volatile int lockState;
2567     // values for lockState
2568     static final int WRITER = 1; // set while holding write lock
2569     static final int WAITER = 2; // set when waiting for write lock
2570     static final int READER = 4; // increment value for setting read lock
2571    
2572     /**
2573     * Creates bin with initial set of nodes headed by b.
2574     */
2575     TreeBin(TreeNode<K,V> b) {
2576     super(TREEBIN, null, null, null);
2577     this.first = b;
2578     TreeNode<K,V> r = null;
2579     for (TreeNode<K,V> x = b, next; x != null; x = next) {
2580     next = (TreeNode<K,V>)x.next;
2581     x.left = x.right = null;
2582     if (r == null) {
2583     x.parent = null;
2584     x.red = false;
2585     r = x;
2586     }
2587     else {
2588     Object key = x.key;
2589     int hash = x.hash;
2590     Class<?> kc = null;
2591     for (TreeNode<K,V> p = r;;) {
2592     int dir, ph;
2593     if ((ph = p.hash) > hash)
2594     dir = -1;
2595     else if (ph < hash)
2596     dir = 1;
2597     else if ((kc != null ||
2598     (kc = comparableClassFor(key)) != null))
2599     dir = compareComparables(kc, key, p.key);
2600     else
2601     dir = 0;
2602     TreeNode<K,V> xp = p;
2603     if ((p = (dir <= 0) ? p.left : p.right) == null) {
2604     x.parent = xp;
2605     if (dir <= 0)
2606     xp.left = x;
2607     else
2608     xp.right = x;
2609     r = balanceInsertion(r, x);
2610     break;
2611     }
2612     }
2613     }
2614     }
2615     this.root = r;
2616     }
2617    
2618     /**
2619 jsr166 1.105 * Acquires write lock for tree restructuring.
2620 dl 1.102 */
2621     private final void lockRoot() {
2622     if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER))
2623     contendedLock(); // offload to separate method
2624     }
2625    
2626     /**
2627 jsr166 1.105 * Releases write lock for tree restructuring.
2628 dl 1.102 */
2629     private final void unlockRoot() {
2630     lockState = 0;
2631     }
2632 dl 1.14
2633 dl 1.102 /**
2634 jsr166 1.105 * Possibly blocks awaiting root lock.
2635 dl 1.102 */
2636     private final void contendedLock() {
2637     boolean waiting = false;
2638     for (int s;;) {
2639     if (((s = lockState) & WRITER) == 0) {
2640     if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) {
2641     if (waiting)
2642     waiter = null;
2643     return;
2644     }
2645     }
2646     else if ((s | WAITER) == 0) {
2647     if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) {
2648     waiting = true;
2649     waiter = Thread.currentThread();
2650     }
2651     }
2652     else if (waiting)
2653     LockSupport.park(this);
2654     }
2655 dl 1.14 }
2656    
2657 dl 1.102 /**
2658     * Returns matching node or null if none. Tries to search
2659 jsr166 1.108 * using tree comparisons from root, but continues linear
2660 dl 1.102 * search when lock not available.
2661     */
2662     final Node<K,V> find(int h, Object k) {
2663     if (k != null) {
2664     for (Node<K,V> e = first; e != null; e = e.next) {
2665     int s; K ek;
2666     if (((s = lockState) & (WAITER|WRITER)) != 0) {
2667     if (e.hash == h &&
2668     ((ek = e.key) == k || (ek != null && k.equals(ek))))
2669     return e;
2670     }
2671     else if (U.compareAndSwapInt(this, LOCKSTATE, s,
2672     s + READER)) {
2673     TreeNode<K,V> r, p;
2674     try {
2675     p = ((r = root) == null ? null :
2676     r.findTreeNode(h, k, null));
2677     } finally {
2678     Thread w;
2679     int ls;
2680     do {} while (!U.compareAndSwapInt
2681     (this, LOCKSTATE,
2682     ls = lockState, ls - READER));
2683     if (ls == (READER|WAITER) && (w = waiter) != null)
2684     LockSupport.unpark(w);
2685     }
2686     return p;
2687     }
2688     }
2689 dl 1.79 }
2690 dl 1.102 return null;
2691 dl 1.41 }
2692    
2693     /**
2694 dl 1.102 * Finds or adds a node.
2695     * @return null if added
2696 dl 1.41 */
2697 dl 1.102 final TreeNode<K,V> putTreeVal(int h, K k, V v) {
2698     Class<?> kc = null;
2699     for (TreeNode<K,V> p = root;;) {
2700     int dir, ph; K pk; TreeNode<K,V> q, pr;
2701     if (p == null) {
2702     first = root = new TreeNode<K,V>(h, k, v, null, null);
2703     break;
2704     }
2705     else if ((ph = p.hash) > h)
2706     dir = -1;
2707     else if (ph < h)
2708     dir = 1;
2709     else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2710     return p;
2711     else if ((kc == null &&
2712     (kc = comparableClassFor(k)) == null) ||
2713     (dir = compareComparables(kc, k, pk)) == 0) {
2714     if (p.left == null)
2715     dir = 1;
2716     else if ((pr = p.right) == null ||
2717     (q = pr.findTreeNode(h, k, kc)) == null)
2718     dir = -1;
2719     else
2720     return q;
2721     }
2722     TreeNode<K,V> xp = p;
2723     if ((p = (dir < 0) ? p.left : p.right) == null) {
2724     TreeNode<K,V> x, f = first;
2725     first = x = new TreeNode<K,V>(h, k, v, f, xp);
2726     if (f != null)
2727     f.prev = x;
2728     if (dir < 0)
2729     xp.left = x;
2730 dl 1.63 else
2731 dl 1.102 xp.right = x;
2732     if (!xp.red)
2733     x.red = true;
2734     else {
2735     lockRoot();
2736     try {
2737     root = balanceInsertion(root, x);
2738     } finally {
2739     unlockRoot();
2740 dl 1.38 }
2741 dl 1.102 }
2742     break;
2743 dl 1.1 }
2744 dl 1.102 }
2745     assert checkInvariants(root);
2746     return null;
2747 dl 1.1 }
2748 dl 1.41
2749 dl 1.102 /**
2750     * Removes the given node, that must be present before this
2751     * call. This is messier than typical red-black deletion code
2752     * because we cannot swap the contents of an interior node
2753     * with a leaf successor that is pinned by "next" pointers
2754     * that are accessible independently of lock. So instead we
2755     * swap the tree linkages.
2756     *
2757 jsr166 1.106 * @return true if now too small, so should be untreeified
2758 dl 1.102 */
2759     final boolean removeTreeNode(TreeNode<K,V> p) {
2760     TreeNode<K,V> next = (TreeNode<K,V>)p.next;
2761     TreeNode<K,V> pred = p.prev; // unlink traversal pointers
2762     TreeNode<K,V> r, rl;
2763     if (pred == null)
2764     first = next;
2765     else
2766     pred.next = next;
2767     if (next != null)
2768     next.prev = pred;
2769     if (first == null) {
2770     root = null;
2771     return true;
2772     }
2773     if ((r = root) == null || r.right == null || // too small
2774     (rl = r.left) == null || rl.left == null)
2775     return true;
2776     lockRoot();
2777     try {
2778     TreeNode<K,V> replacement;
2779     TreeNode<K,V> pl = p.left;
2780     TreeNode<K,V> pr = p.right;
2781     if (pl != null && pr != null) {
2782     TreeNode<K,V> s = pr, sl;
2783     while ((sl = s.left) != null) // find successor
2784     s = sl;
2785     boolean c = s.red; s.red = p.red; p.red = c; // swap colors
2786     TreeNode<K,V> sr = s.right;
2787     TreeNode<K,V> pp = p.parent;
2788     if (s == pr) { // p was s's direct parent
2789     p.parent = s;
2790     s.right = p;
2791     }
2792     else {
2793     TreeNode<K,V> sp = s.parent;
2794     if ((p.parent = sp) != null) {
2795     if (s == sp.left)
2796     sp.left = p;
2797     else
2798     sp.right = p;
2799     }
2800     if ((s.right = pr) != null)
2801     pr.parent = s;
2802     }
2803     p.left = null;
2804     if ((p.right = sr) != null)
2805     sr.parent = p;
2806     if ((s.left = pl) != null)
2807     pl.parent = s;
2808     if ((s.parent = pp) == null)
2809     r = s;
2810     else if (p == pp.left)
2811     pp.left = s;
2812     else
2813     pp.right = s;
2814     if (sr != null)
2815     replacement = sr;
2816     else
2817     replacement = p;
2818     }
2819     else if (pl != null)
2820     replacement = pl;
2821     else if (pr != null)
2822     replacement = pr;
2823     else
2824     replacement = p;
2825     if (replacement != p) {
2826     TreeNode<K,V> pp = replacement.parent = p.parent;
2827     if (pp == null)
2828     r = replacement;
2829     else if (p == pp.left)
2830     pp.left = replacement;
2831     else
2832     pp.right = replacement;
2833     p.left = p.right = p.parent = null;
2834     }
2835    
2836     root = (p.red) ? r : balanceDeletion(r, replacement);
2837    
2838     if (p == replacement) { // detach pointers
2839     TreeNode<K,V> pp;
2840     if ((pp = p.parent) != null) {
2841     if (p == pp.left)
2842     pp.left = null;
2843     else if (p == pp.right)
2844     pp.right = null;
2845     p.parent = null;
2846     }
2847     }
2848     } finally {
2849     unlockRoot();
2850     }
2851     assert checkInvariants(root);
2852     return false;
2853 dl 1.41 }
2854    
2855 dl 1.102 /* ------------------------------------------------------------ */
2856     // Red-black tree methods, all adapted from CLR
2857    
2858     static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
2859     TreeNode<K,V> p) {
2860     TreeNode<K,V> r, pp, rl;
2861     if (p != null && (r = p.right) != null) {
2862     if ((rl = p.right = r.left) != null)
2863     rl.parent = p;
2864     if ((pp = r.parent = p.parent) == null)
2865     (root = r).red = false;
2866     else if (pp.left == p)
2867     pp.left = r;
2868     else
2869     pp.right = r;
2870     r.left = p;
2871     p.parent = r;
2872     }
2873     return root;
2874 dl 1.41 }
2875    
2876 dl 1.102 static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
2877     TreeNode<K,V> p) {
2878     TreeNode<K,V> l, pp, lr;
2879     if (p != null && (l = p.left) != null) {
2880     if ((lr = p.left = l.right) != null)
2881     lr.parent = p;
2882     if ((pp = l.parent = p.parent) == null)
2883     (root = l).red = false;
2884     else if (pp.right == p)
2885     pp.right = l;
2886     else
2887     pp.left = l;
2888     l.right = p;
2889     p.parent = l;
2890     }
2891     return root;
2892     }
2893 dl 1.79
2894 dl 1.102 static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
2895     TreeNode<K,V> x) {
2896     x.red = true;
2897     for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
2898     if ((xp = x.parent) == null) {
2899     x.red = false;
2900     return x;
2901     }
2902     else if (!xp.red || (xpp = xp.parent) == null)
2903     return root;
2904     if (xp == (xppl = xpp.left)) {
2905     if ((xppr = xpp.right) != null && xppr.red) {
2906     xppr.red = false;
2907     xp.red = false;
2908     xpp.red = true;
2909     x = xpp;
2910     }
2911     else {
2912     if (x == xp.right) {
2913     root = rotateLeft(root, x = xp);
2914     xpp = (xp = x.parent) == null ? null : xp.parent;
2915     }
2916     if (xp != null) {
2917     xp.red = false;
2918     if (xpp != null) {
2919     xpp.red = true;
2920     root = rotateRight(root, xpp);
2921     }
2922     }
2923     }
2924     }
2925     else {
2926     if (xppl != null && xppl.red) {
2927     xppl.red = false;
2928     xp.red = false;
2929     xpp.red = true;
2930     x = xpp;
2931     }
2932     else {
2933     if (x == xp.left) {
2934     root = rotateRight(root, x = xp);
2935     xpp = (xp = x.parent) == null ? null : xp.parent;
2936     }
2937     if (xp != null) {
2938     xp.red = false;
2939     if (xpp != null) {
2940     xpp.red = true;
2941     root = rotateLeft(root, xpp);
2942     }
2943     }
2944     }
2945     }
2946     }
2947     }
2948 dl 1.79
2949 dl 1.102 static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
2950     TreeNode<K,V> x) {
2951     for (TreeNode<K,V> xp, xpl, xpr;;) {
2952     if (x == null || x == root)
2953     return root;
2954     else if ((xp = x.parent) == null) {
2955     x.red = false;
2956     return x;
2957     }
2958     else if (x.red) {
2959     x.red = false;
2960     return root;
2961     }
2962     else if ((xpl = xp.left) == x) {
2963     if ((xpr = xp.right) != null && xpr.red) {
2964     xpr.red = false;
2965     xp.red = true;
2966     root = rotateLeft(root, xp);
2967     xpr = (xp = x.parent) == null ? null : xp.right;
2968     }
2969     if (xpr == null)
2970     x = xp;
2971     else {
2972     TreeNode<K,V> sl = xpr.left, sr = xpr.right;
2973     if ((sr == null || !sr.red) &&
2974     (sl == null || !sl.red)) {
2975     xpr.red = true;
2976     x = xp;
2977     }
2978     else {
2979     if (sr == null || !sr.red) {
2980     if (sl != null)
2981     sl.red = false;
2982     xpr.red = true;
2983     root = rotateRight(root, xpr);
2984     xpr = (xp = x.parent) == null ?
2985     null : xp.right;
2986     }
2987     if (xpr != null) {
2988     xpr.red = (xp == null) ? false : xp.red;
2989     if ((sr = xpr.right) != null)
2990     sr.red = false;
2991     }
2992     if (xp != null) {
2993     xp.red = false;
2994     root = rotateLeft(root, xp);
2995     }
2996     x = root;
2997     }
2998     }
2999     }
3000     else { // symmetric
3001     if (xpl != null && xpl.red) {
3002     xpl.red = false;
3003     xp.red = true;
3004     root = rotateRight(root, xp);
3005     xpl = (xp = x.parent) == null ? null : xp.left;
3006     }
3007     if (xpl == null)
3008     x = xp;
3009     else {
3010     TreeNode<K,V> sl = xpl.left, sr = xpl.right;
3011     if ((sl == null || !sl.red) &&
3012     (sr == null || !sr.red)) {
3013     xpl.red = true;
3014     x = xp;
3015     }
3016     else {
3017     if (sl == null || !sl.red) {
3018     if (sr != null)
3019     sr.red = false;
3020     xpl.red = true;
3021     root = rotateLeft(root, xpl);
3022     xpl = (xp = x.parent) == null ?
3023     null : xp.left;
3024     }
3025     if (xpl != null) {
3026     xpl.red = (xp == null) ? false : xp.red;
3027     if ((sl = xpl.left) != null)
3028     sl.red = false;
3029     }
3030     if (xp != null) {
3031     xp.red = false;
3032     root = rotateRight(root, xp);
3033     }
3034     x = root;
3035     }
3036     }
3037     }
3038     }
3039     }
3040 jsr166 1.103
3041 dl 1.79 /**
3042 dl 1.102 * Recursive invariant check
3043 dl 1.79 */
3044 dl 1.102 static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
3045     TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
3046     tb = t.prev, tn = (TreeNode<K,V>)t.next;
3047     if (tb != null && tb.next != t)
3048     return false;
3049     if (tn != null && tn.prev != t)
3050     return false;
3051     if (tp != null && t != tp.left && t != tp.right)
3052     return false;
3053     if (tl != null && (tl.parent != t || tl.hash > t.hash))
3054     return false;
3055     if (tr != null && (tr.parent != t || tr.hash < t.hash))
3056     return false;
3057     if (t.red && tl != null && tl.red && tr != null && tr.red)
3058     return false;
3059     if (tl != null && !checkInvariants(tl))
3060     return false;
3061     if (tr != null && !checkInvariants(tr))
3062     return false;
3063     return true;
3064 dl 1.79 }
3065    
3066 dl 1.102 private static final sun.misc.Unsafe U;
3067     private static final long LOCKSTATE;
3068     static {
3069     try {
3070     U = getUnsafe();
3071     Class<?> k = TreeBin.class;
3072     LOCKSTATE = U.objectFieldOffset
3073     (k.getDeclaredField("lockState"));
3074     } catch (Exception e) {
3075     throw new Error(e);
3076     }
3077     }
3078 dl 1.1 }
3079    
3080 dl 1.102 /* ----------------Table Traversal -------------- */
3081 dl 1.1
3082     /**
3083 dl 1.102 * Encapsulates traversal for methods such as containsValue; also
3084     * serves as a base class for other iterators and spliterators.
3085     *
3086     * Method advance visits once each still-valid node that was
3087     * reachable upon iterator construction. It might miss some that
3088     * were added to a bin after the bin was visited, which is OK wrt
3089     * consistency guarantees. Maintaining this property in the face
3090     * of possible ongoing resizes requires a fair amount of
3091     * bookkeeping state that is difficult to optimize away amidst
3092     * volatile accesses. Even so, traversal maintains reasonable
3093     * throughput.
3094 dl 1.1 *
3095 dl 1.102 * Normally, iteration proceeds bin-by-bin traversing lists.
3096     * However, if the table has been resized, then all future steps
3097     * must traverse both the bin at the current index as well as at
3098     * (index + baseSize); and so on for further resizings. To
3099     * paranoically cope with potential sharing by users of iterators
3100     * across threads, iteration terminates if a bounds checks fails
3101     * for a table read.
3102 dl 1.1 */
3103 dl 1.102 static class Traverser<K,V> {
3104     Node<K,V>[] tab; // current table; updated if resized
3105     Node<K,V> next; // the next entry to use
3106     int index; // index of bin to use next
3107     int baseIndex; // current index of initial table
3108     int baseLimit; // index bound for initial table
3109     final int baseSize; // initial table size
3110    
3111     Traverser(Node<K,V>[] tab, int size, int index, int limit) {
3112     this.tab = tab;
3113     this.baseSize = size;
3114     this.baseIndex = this.index = index;
3115     this.baseLimit = limit;
3116     this.next = null;
3117     }
3118 dl 1.1
3119 dl 1.102 /**
3120     * Advances if possible, returning next valid node, or null if none.
3121     */
3122     final Node<K,V> advance() {
3123     Node<K,V> e;
3124     if ((e = next) != null)
3125     e = e.next;
3126     for (;;) {
3127     Node<K,V>[] t; int i, n; K ek; // must use locals in checks
3128     if (e != null)
3129     return next = e;
3130     if (baseIndex >= baseLimit || (t = tab) == null ||
3131     (n = t.length) <= (i = index) || i < 0)
3132     return next = null;
3133     if ((e = tabAt(t, index)) != null && e.hash < 0) {
3134     if (e instanceof ForwardingNode) {
3135     tab = ((ForwardingNode<K,V>)e).nextTable;
3136     e = null;
3137     continue;
3138 dl 1.75 }
3139 dl 1.102 else if (e instanceof TreeBin)
3140     e = ((TreeBin<K,V>)e).first;
3141     else
3142     e = null;
3143 dl 1.75 }
3144 dl 1.102 if ((index += baseSize) >= n)
3145     index = ++baseIndex; // visit upper slots if present
3146 dl 1.24 }
3147     }
3148 dl 1.75 }
3149    
3150     /**
3151 dl 1.102 * Base of key, value, and entry Iterators. Adds fields to
3152 jsr166 1.105 * Traverser to support iterator.remove.
3153 dl 1.75 */
3154 dl 1.102 static class BaseIterator<K,V> extends Traverser<K,V> {
3155     final ConcurrentHashMapV8<K,V> map;
3156     Node<K,V> lastReturned;
3157     BaseIterator(Node<K,V>[] tab, int size, int index, int limit,
3158     ConcurrentHashMapV8<K,V> map) {
3159     super(tab, size, index, limit);
3160     this.map = map;
3161     advance();
3162     }
3163    
3164     public final boolean hasNext() { return next != null; }
3165     public final boolean hasMoreElements() { return next != null; }
3166    
3167     public final void remove() {
3168     Node<K,V> p;
3169     if ((p = lastReturned) == null)
3170     throw new IllegalStateException();
3171     lastReturned = null;
3172     map.replaceNode(p.key, null, null);
3173     }
3174 dl 1.75 }
3175    
3176 dl 1.102 static final class KeyIterator<K,V> extends BaseIterator<K,V>
3177     implements Iterator<K>, Enumeration<K> {
3178     KeyIterator(Node<K,V>[] tab, int index, int size, int limit,
3179     ConcurrentHashMapV8<K,V> map) {
3180     super(tab, index, size, limit, map);
3181     }
3182    
3183     public final K next() {
3184     Node<K,V> p;
3185     if ((p = next) == null)
3186     throw new NoSuchElementException();
3187     K k = p.key;
3188     lastReturned = p;
3189     advance();
3190     return k;
3191     }
3192    
3193     public final K nextElement() { return next(); }
3194 dl 1.75 }
3195    
3196 dl 1.102 static final class ValueIterator<K,V> extends BaseIterator<K,V>
3197     implements Iterator<V>, Enumeration<V> {
3198     ValueIterator(Node<K,V>[] tab, int index, int size, int limit,
3199     ConcurrentHashMapV8<K,V> map) {
3200     super(tab, index, size, limit, map);
3201     }
3202    
3203     public final V next() {
3204     Node<K,V> p;
3205     if ((p = next) == null)
3206     throw new NoSuchElementException();
3207     V v = p.val;
3208     lastReturned = p;
3209     advance();
3210     return v;
3211 dl 1.84 }
3212 dl 1.102
3213     public final V nextElement() { return next(); }
3214 dl 1.75 }
3215    
3216 dl 1.102 static final class EntryIterator<K,V> extends BaseIterator<K,V>
3217     implements Iterator<Map.Entry<K,V>> {
3218     EntryIterator(Node<K,V>[] tab, int index, int size, int limit,
3219     ConcurrentHashMapV8<K,V> map) {
3220     super(tab, index, size, limit, map);
3221     }
3222    
3223     public final Map.Entry<K,V> next() {
3224     Node<K,V> p;
3225     if ((p = next) == null)
3226     throw new NoSuchElementException();
3227     K k = p.key;
3228     V v = p.val;
3229     lastReturned = p;
3230     advance();
3231     return new MapEntry<K,V>(k, v, map);
3232 dl 1.84 }
3233 dl 1.75 }
3234    
3235     /**
3236 dl 1.102 * Exported Entry for EntryIterator
3237 dl 1.75 */
3238 dl 1.102 static final class MapEntry<K,V> implements Map.Entry<K,V> {
3239     final K key; // non-null
3240     V val; // non-null
3241     final ConcurrentHashMapV8<K,V> map;
3242     MapEntry(K key, V val, ConcurrentHashMapV8<K,V> map) {
3243     this.key = key;
3244     this.val = val;
3245     this.map = map;
3246     }
3247     public K getKey() { return key; }
3248     public V getValue() { return val; }
3249     public int hashCode() { return key.hashCode() ^ val.hashCode(); }
3250     public String toString() { return key + "=" + val; }
3251    
3252     public boolean equals(Object o) {
3253     Object k, v; Map.Entry<?,?> e;
3254     return ((o instanceof Map.Entry) &&
3255     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3256     (v = e.getValue()) != null &&
3257     (k == key || k.equals(key)) &&
3258     (v == val || v.equals(val)));
3259     }
3260    
3261     /**
3262     * Sets our entry's value and writes through to the map. The
3263     * value to return is somewhat arbitrary here. Since we do not
3264     * necessarily track asynchronous changes, the most recent
3265     * "previous" value could be different from what we return (or
3266     * could even have been removed, in which case the put will
3267     * re-establish). We do not and cannot guarantee more.
3268     */
3269     public V setValue(V value) {
3270     if (value == null) throw new NullPointerException();
3271     V v = val;
3272     val = value;
3273     map.put(key, value);
3274     return v;
3275 dl 1.84 }
3276 dl 1.75 }
3277    
3278 dl 1.102 static final class KeySpliterator<K,V> extends Traverser<K,V>
3279     implements ConcurrentHashMapSpliterator<K> {
3280     long est; // size estimate
3281     KeySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3282     long est) {
3283     super(tab, size, index, limit);
3284     this.est = est;
3285     }
3286    
3287     public ConcurrentHashMapSpliterator<K> trySplit() {
3288     int i, f, h;
3289     return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3290     new KeySpliterator<K,V>(tab, baseSize, baseLimit = h,
3291     f, est >>>= 1);
3292     }
3293    
3294     public void forEachRemaining(Action<? super K> action) {
3295     if (action == null) throw new NullPointerException();
3296     for (Node<K,V> p; (p = advance()) != null;)
3297     action.apply(p.key);
3298     }
3299    
3300     public boolean tryAdvance(Action<? super K> action) {
3301     if (action == null) throw new NullPointerException();
3302     Node<K,V> p;
3303     if ((p = advance()) == null)
3304     return false;
3305     action.apply(p.key);
3306     return true;
3307 dl 1.84 }
3308 dl 1.102
3309     public long estimateSize() { return est; }
3310    
3311 dl 1.75 }
3312    
3313 dl 1.102 static final class ValueSpliterator<K,V> extends Traverser<K,V>
3314     implements ConcurrentHashMapSpliterator<V> {
3315     long est; // size estimate
3316     ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit,
3317     long est) {
3318     super(tab, size, index, limit);
3319     this.est = est;
3320     }
3321    
3322     public ConcurrentHashMapSpliterator<V> trySplit() {
3323     int i, f, h;
3324     return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3325     new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h,
3326     f, est >>>= 1);
3327     }
3328    
3329     public void forEachRemaining(Action<? super V> action) {
3330     if (action == null) throw new NullPointerException();
3331     for (Node<K,V> p; (p = advance()) != null;)
3332     action.apply(p.val);
3333     }
3334    
3335     public boolean tryAdvance(Action<? super V> action) {
3336     if (action == null) throw new NullPointerException();
3337     Node<K,V> p;
3338     if ((p = advance()) == null)
3339     return false;
3340     action.apply(p.val);
3341     return true;
3342     }
3343    
3344     public long estimateSize() { return est; }
3345    
3346 dl 1.75 }
3347    
3348 dl 1.102 static final class EntrySpliterator<K,V> extends Traverser<K,V>
3349     implements ConcurrentHashMapSpliterator<Map.Entry<K,V>> {
3350     final ConcurrentHashMapV8<K,V> map; // To export MapEntry
3351     long est; // size estimate
3352     EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3353     long est, ConcurrentHashMapV8<K,V> map) {
3354     super(tab, size, index, limit);
3355     this.map = map;
3356     this.est = est;
3357     }
3358    
3359     public ConcurrentHashMapSpliterator<Map.Entry<K,V>> trySplit() {
3360     int i, f, h;
3361     return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3362     new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h,
3363     f, est >>>= 1, map);
3364     }
3365    
3366     public void forEachRemaining(Action<? super Map.Entry<K,V>> action) {
3367     if (action == null) throw new NullPointerException();
3368     for (Node<K,V> p; (p = advance()) != null; )
3369     action.apply(new MapEntry<K,V>(p.key, p.val, map));
3370     }
3371    
3372     public boolean tryAdvance(Action<? super Map.Entry<K,V>> action) {
3373     if (action == null) throw new NullPointerException();
3374     Node<K,V> p;
3375     if ((p = advance()) == null)
3376     return false;
3377     action.apply(new MapEntry<K,V>(p.key, p.val, map));
3378     return true;
3379     }
3380    
3381     public long estimateSize() { return est; }
3382    
3383 dl 1.75 }
3384    
3385 dl 1.102 // Parallel bulk operations
3386    
3387 dl 1.75 /**
3388 dl 1.102 * Computes initial batch value for bulk tasks. The returned value
3389     * is approximately exp2 of the number of times (minus one) to
3390     * split task by two before executing leaf action. This value is
3391     * faster to compute and more convenient to use as a guide to
3392     * splitting than is the depth, since it is used while dividing by
3393     * two anyway.
3394     */
3395     final int batchFor(long b) {
3396     long n;
3397     if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b)
3398     return 0;
3399     int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4
3400     return (b <= 0L || (n /= b) >= sp) ? sp : (int)n;
3401 dl 1.75 }
3402    
3403     /**
3404 dl 1.84 * Performs the given action for each (key, value).
3405     *
3406 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3407     * needed for this operation to be executed in parallel
3408 dl 1.84 * @param action the action
3409 dl 1.102 * @since 1.8
3410 dl 1.75 */
3411 dl 1.102 public void forEach(long parallelismThreshold,
3412     BiAction<? super K,? super V> action) {
3413     if (action == null) throw new NullPointerException();
3414     new ForEachMappingTask<K,V>
3415     (null, batchFor(parallelismThreshold), 0, 0, table,
3416     action).invoke();
3417 dl 1.84 }
3418 dl 1.75
3419 dl 1.84 /**
3420     * Performs the given action for each non-null transformation
3421     * of each (key, value).
3422     *
3423 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3424     * needed for this operation to be executed in parallel
3425 dl 1.84 * @param transformer a function returning the transformation
3426 jsr166 1.91 * for an element, or null if there is no transformation (in
3427 jsr166 1.93 * which case the action is not applied)
3428 dl 1.84 * @param action the action
3429 dl 1.102 * @since 1.8
3430 dl 1.84 */
3431 dl 1.102 public <U> void forEach(long parallelismThreshold,
3432     BiFun<? super K, ? super V, ? extends U> transformer,
3433     Action<? super U> action) {
3434     if (transformer == null || action == null)
3435     throw new NullPointerException();
3436     new ForEachTransformedMappingTask<K,V,U>
3437     (null, batchFor(parallelismThreshold), 0, 0, table,
3438     transformer, action).invoke();
3439 dl 1.84 }
3440 dl 1.75
3441 dl 1.84 /**
3442     * Returns a non-null result from applying the given search
3443     * function on each (key, value), or null if none. Upon
3444     * success, further element processing is suppressed and the
3445     * results of any other parallel invocations of the search
3446     * function are ignored.
3447     *
3448 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3449     * needed for this operation to be executed in parallel
3450 dl 1.84 * @param searchFunction a function returning a non-null
3451     * result on success, else null
3452     * @return a non-null result from applying the given search
3453     * function on each (key, value), or null if none
3454 dl 1.102 * @since 1.8
3455 dl 1.84 */
3456 dl 1.102 public <U> U search(long parallelismThreshold,
3457     BiFun<? super K, ? super V, ? extends U> searchFunction) {
3458     if (searchFunction == null) throw new NullPointerException();
3459     return new SearchMappingsTask<K,V,U>
3460     (null, batchFor(parallelismThreshold), 0, 0, table,
3461     searchFunction, new AtomicReference<U>()).invoke();
3462 dl 1.84 }
3463 dl 1.75
3464 dl 1.84 /**
3465     * Returns the result of accumulating the given transformation
3466     * of all (key, value) pairs using the given reducer to
3467     * combine values, or null if none.
3468     *
3469 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3470     * needed for this operation to be executed in parallel
3471 dl 1.84 * @param transformer a function returning the transformation
3472 jsr166 1.91 * for an element, or null if there is no transformation (in
3473 jsr166 1.93 * which case it is not combined)
3474 dl 1.84 * @param reducer a commutative associative combining function
3475     * @return the result of accumulating the given transformation
3476     * of all (key, value) pairs
3477 dl 1.102 * @since 1.8
3478 dl 1.84 */
3479 dl 1.102 public <U> U reduce(long parallelismThreshold,
3480     BiFun<? super K, ? super V, ? extends U> transformer,
3481     BiFun<? super U, ? super U, ? extends U> reducer) {
3482     if (transformer == null || reducer == null)
3483     throw new NullPointerException();
3484     return new MapReduceMappingsTask<K,V,U>
3485     (null, batchFor(parallelismThreshold), 0, 0, table,
3486     null, transformer, reducer).invoke();
3487 dl 1.84 }
3488 dl 1.75
3489 dl 1.84 /**
3490     * Returns the result of accumulating the given transformation
3491     * of all (key, value) pairs using the given reducer to
3492     * combine values, and the given basis as an identity value.
3493     *
3494 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3495     * needed for this operation to be executed in parallel
3496 dl 1.84 * @param transformer a function returning the transformation
3497     * for an element
3498     * @param basis the identity (initial default value) for the reduction
3499     * @param reducer a commutative associative combining function
3500     * @return the result of accumulating the given transformation
3501     * of all (key, value) pairs
3502 dl 1.102 * @since 1.8
3503 dl 1.84 */
3504 dl 1.107 public double reduceToDouble(long parallelismThreshold,
3505     ObjectByObjectToDouble<? super K, ? super V> transformer,
3506     double basis,
3507     DoubleByDoubleToDouble reducer) {
3508 dl 1.102 if (transformer == null || reducer == null)
3509     throw new NullPointerException();
3510     return new MapReduceMappingsToDoubleTask<K,V>
3511     (null, batchFor(parallelismThreshold), 0, 0, table,
3512     null, transformer, basis, reducer).invoke();
3513 dl 1.84 }
3514    
3515     /**
3516     * Returns the result of accumulating the given transformation
3517     * of all (key, value) pairs using the given reducer to
3518     * combine values, and the given basis as an identity value.
3519     *
3520 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3521     * needed for this operation to be executed in parallel
3522 dl 1.84 * @param transformer a function returning the transformation
3523     * for an element
3524     * @param basis the identity (initial default value) for the reduction
3525     * @param reducer a commutative associative combining function
3526     * @return the result of accumulating the given transformation
3527     * of all (key, value) pairs
3528 dl 1.102 * @since 1.8
3529 dl 1.84 */
3530 dl 1.102 public long reduceToLong(long parallelismThreshold,
3531     ObjectByObjectToLong<? super K, ? super V> transformer,
3532     long basis,
3533     LongByLongToLong reducer) {
3534     if (transformer == null || reducer == null)
3535     throw new NullPointerException();
3536     return new MapReduceMappingsToLongTask<K,V>
3537     (null, batchFor(parallelismThreshold), 0, 0, table,
3538     null, transformer, basis, reducer).invoke();
3539 dl 1.84 }
3540    
3541     /**
3542     * Returns the result of accumulating the given transformation
3543     * of all (key, value) pairs using the given reducer to
3544     * combine values, and the given basis as an identity value.
3545     *
3546 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3547     * needed for this operation to be executed in parallel
3548 dl 1.84 * @param transformer a function returning the transformation
3549     * for an element
3550     * @param basis the identity (initial default value) for the reduction
3551     * @param reducer a commutative associative combining function
3552     * @return the result of accumulating the given transformation
3553     * of all (key, value) pairs
3554 dl 1.102 * @since 1.8
3555 dl 1.84 */
3556 dl 1.102 public int reduceToInt(long parallelismThreshold,
3557     ObjectByObjectToInt<? super K, ? super V> transformer,
3558     int basis,
3559     IntByIntToInt reducer) {
3560     if (transformer == null || reducer == null)
3561     throw new NullPointerException();
3562     return new MapReduceMappingsToIntTask<K,V>
3563     (null, batchFor(parallelismThreshold), 0, 0, table,
3564     null, transformer, basis, reducer).invoke();
3565 dl 1.84 }
3566    
3567     /**
3568     * Performs the given action for each key.
3569     *
3570 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3571     * needed for this operation to be executed in parallel
3572 dl 1.84 * @param action the action
3573 dl 1.102 * @since 1.8
3574 dl 1.84 */
3575 dl 1.102 public void forEachKey(long parallelismThreshold,
3576     Action<? super K> action) {
3577     if (action == null) throw new NullPointerException();
3578     new ForEachKeyTask<K,V>
3579     (null, batchFor(parallelismThreshold), 0, 0, table,
3580     action).invoke();
3581 dl 1.84 }
3582    
3583     /**
3584     * Performs the given action for each non-null transformation
3585     * of each key.
3586     *
3587 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3588     * needed for this operation to be executed in parallel
3589 dl 1.84 * @param transformer a function returning the transformation
3590 jsr166 1.91 * for an element, or null if there is no transformation (in
3591 jsr166 1.93 * which case the action is not applied)
3592 dl 1.84 * @param action the action
3593 dl 1.102 * @since 1.8
3594 dl 1.84 */
3595 dl 1.102 public <U> void forEachKey(long parallelismThreshold,
3596     Fun<? super K, ? extends U> transformer,
3597     Action<? super U> action) {
3598     if (transformer == null || action == null)
3599     throw new NullPointerException();
3600     new ForEachTransformedKeyTask<K,V,U>
3601     (null, batchFor(parallelismThreshold), 0, 0, table,
3602     transformer, action).invoke();
3603 dl 1.84 }
3604    
3605     /**
3606     * Returns a non-null result from applying the given search
3607     * function on each key, or null if none. Upon success,
3608     * further element processing is suppressed and the results of
3609     * any other parallel invocations of the search function are
3610     * ignored.
3611     *
3612 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3613     * needed for this operation to be executed in parallel
3614 dl 1.84 * @param searchFunction a function returning a non-null
3615     * result on success, else null
3616     * @return a non-null result from applying the given search
3617     * function on each key, or null if none
3618 dl 1.102 * @since 1.8
3619 dl 1.84 */
3620 dl 1.102 public <U> U searchKeys(long parallelismThreshold,
3621     Fun<? super K, ? extends U> searchFunction) {
3622     if (searchFunction == null) throw new NullPointerException();
3623     return new SearchKeysTask<K,V,U>
3624     (null, batchFor(parallelismThreshold), 0, 0, table,
3625     searchFunction, new AtomicReference<U>()).invoke();
3626 dl 1.84 }
3627    
3628     /**
3629     * Returns the result of accumulating all keys using the given
3630     * reducer to combine values, or null if none.
3631     *
3632 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3633     * needed for this operation to be executed in parallel
3634 dl 1.84 * @param reducer a commutative associative combining function
3635     * @return the result of accumulating all keys using the given
3636     * reducer to combine values, or null if none
3637 dl 1.102 * @since 1.8
3638 dl 1.84 */
3639 dl 1.102 public K reduceKeys(long parallelismThreshold,
3640     BiFun<? super K, ? super K, ? extends K> reducer) {
3641     if (reducer == null) throw new NullPointerException();
3642     return new ReduceKeysTask<K,V>
3643     (null, batchFor(parallelismThreshold), 0, 0, table,
3644     null, reducer).invoke();
3645 dl 1.84 }
3646    
3647     /**
3648     * Returns the result of accumulating the given transformation
3649     * of all keys using the given reducer to combine values, or
3650     * null if none.
3651     *
3652 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3653     * needed for this operation to be executed in parallel
3654 dl 1.84 * @param transformer a function returning the transformation
3655 jsr166 1.91 * for an element, or null if there is no transformation (in
3656 jsr166 1.93 * which case it is not combined)
3657 dl 1.84 * @param reducer a commutative associative combining function
3658     * @return the result of accumulating the given transformation
3659     * of all keys
3660 dl 1.102 * @since 1.8
3661 dl 1.84 */
3662 dl 1.102 public <U> U reduceKeys(long parallelismThreshold,
3663     Fun<? super K, ? extends U> transformer,
3664 dl 1.84 BiFun<? super U, ? super U, ? extends U> reducer) {
3665 dl 1.102 if (transformer == null || reducer == null)
3666     throw new NullPointerException();
3667     return new MapReduceKeysTask<K,V,U>
3668     (null, batchFor(parallelismThreshold), 0, 0, table,
3669     null, transformer, reducer).invoke();
3670 dl 1.84 }
3671    
3672     /**
3673     * Returns the result of accumulating the given transformation
3674     * of all keys using the given reducer to combine values, and
3675     * the given basis as an identity value.
3676     *
3677 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3678     * needed for this operation to be executed in parallel
3679 dl 1.84 * @param transformer a function returning the transformation
3680     * for an element
3681     * @param basis the identity (initial default value) for the reduction
3682     * @param reducer a commutative associative combining function
3683 dl 1.102 * @return the result of accumulating the given transformation
3684 dl 1.84 * of all keys
3685 dl 1.102 * @since 1.8
3686 dl 1.84 */
3687 dl 1.102 public double reduceKeysToDouble(long parallelismThreshold,
3688     ObjectToDouble<? super K> transformer,
3689     double basis,
3690     DoubleByDoubleToDouble reducer) {
3691     if (transformer == null || reducer == null)
3692     throw new NullPointerException();
3693     return new MapReduceKeysToDoubleTask<K,V>
3694     (null, batchFor(parallelismThreshold), 0, 0, table,
3695     null, transformer, basis, reducer).invoke();
3696 dl 1.84 }
3697    
3698     /**
3699     * Returns the result of accumulating the given transformation
3700     * of all keys using the given reducer to combine values, and
3701     * the given basis as an identity value.
3702     *
3703 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3704     * needed for this operation to be executed in parallel
3705 dl 1.84 * @param transformer a function returning the transformation
3706     * for an element
3707     * @param basis the identity (initial default value) for the reduction
3708     * @param reducer a commutative associative combining function
3709     * @return the result of accumulating the given transformation
3710     * of all keys
3711 dl 1.102 * @since 1.8
3712 dl 1.84 */
3713 dl 1.102 public long reduceKeysToLong(long parallelismThreshold,
3714     ObjectToLong<? super K> transformer,
3715     long basis,
3716     LongByLongToLong reducer) {
3717     if (transformer == null || reducer == null)
3718     throw new NullPointerException();
3719     return new MapReduceKeysToLongTask<K,V>
3720     (null, batchFor(parallelismThreshold), 0, 0, table,
3721     null, transformer, basis, reducer).invoke();
3722 dl 1.84 }
3723    
3724     /**
3725     * Returns the result of accumulating the given transformation
3726     * of all keys using the given reducer to combine values, and
3727     * the given basis as an identity value.
3728     *
3729 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3730     * needed for this operation to be executed in parallel
3731 dl 1.84 * @param transformer a function returning the transformation
3732     * for an element
3733     * @param basis the identity (initial default value) for the reduction
3734     * @param reducer a commutative associative combining function
3735     * @return the result of accumulating the given transformation
3736     * of all keys
3737 dl 1.102 * @since 1.8
3738 dl 1.84 */
3739 dl 1.102 public int reduceKeysToInt(long parallelismThreshold,
3740     ObjectToInt<? super K> transformer,
3741     int basis,
3742     IntByIntToInt reducer) {
3743     if (transformer == null || reducer == null)
3744     throw new NullPointerException();
3745     return new MapReduceKeysToIntTask<K,V>
3746     (null, batchFor(parallelismThreshold), 0, 0, table,
3747     null, transformer, basis, reducer).invoke();
3748 dl 1.84 }
3749    
3750     /**
3751     * Performs the given action for each value.
3752     *
3753 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3754     * needed for this operation to be executed in parallel
3755 dl 1.84 * @param action the action
3756 dl 1.102 * @since 1.8
3757 dl 1.84 */
3758 dl 1.102 public void forEachValue(long parallelismThreshold,
3759     Action<? super V> action) {
3760     if (action == null)
3761     throw new NullPointerException();
3762     new ForEachValueTask<K,V>
3763     (null, batchFor(parallelismThreshold), 0, 0, table,
3764     action).invoke();
3765 dl 1.84 }
3766    
3767     /**
3768     * Performs the given action for each non-null transformation
3769     * of each value.
3770     *
3771 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3772     * needed for this operation to be executed in parallel
3773 dl 1.84 * @param transformer a function returning the transformation
3774 jsr166 1.91 * for an element, or null if there is no transformation (in
3775 jsr166 1.93 * which case the action is not applied)
3776 dl 1.102 * @param action the action
3777     * @since 1.8
3778 dl 1.84 */
3779 dl 1.102 public <U> void forEachValue(long parallelismThreshold,
3780     Fun<? super V, ? extends U> transformer,
3781     Action<? super U> action) {
3782     if (transformer == null || action == null)
3783     throw new NullPointerException();
3784     new ForEachTransformedValueTask<K,V,U>
3785     (null, batchFor(parallelismThreshold), 0, 0, table,
3786     transformer, action).invoke();
3787 dl 1.84 }
3788    
3789     /**
3790     * Returns a non-null result from applying the given search
3791     * function on each value, or null if none. Upon success,
3792     * further element processing is suppressed and the results of
3793     * any other parallel invocations of the search function are
3794     * ignored.
3795     *
3796 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3797     * needed for this operation to be executed in parallel
3798 dl 1.84 * @param searchFunction a function returning a non-null
3799     * result on success, else null
3800     * @return a non-null result from applying the given search
3801     * function on each value, or null if none
3802 dl 1.102 * @since 1.8
3803 dl 1.84 */
3804 dl 1.102 public <U> U searchValues(long parallelismThreshold,
3805     Fun<? super V, ? extends U> searchFunction) {
3806     if (searchFunction == null) throw new NullPointerException();
3807     return new SearchValuesTask<K,V,U>
3808     (null, batchFor(parallelismThreshold), 0, 0, table,
3809     searchFunction, new AtomicReference<U>()).invoke();
3810 dl 1.84 }
3811    
3812     /**
3813     * Returns the result of accumulating all values using the
3814     * given reducer to combine values, or null if none.
3815     *
3816 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3817     * needed for this operation to be executed in parallel
3818 dl 1.84 * @param reducer a commutative associative combining function
3819 dl 1.102 * @return the result of accumulating all values
3820     * @since 1.8
3821 dl 1.84 */
3822 dl 1.102 public V reduceValues(long parallelismThreshold,
3823     BiFun<? super V, ? super V, ? extends V> reducer) {
3824     if (reducer == null) throw new NullPointerException();
3825     return new ReduceValuesTask<K,V>
3826     (null, batchFor(parallelismThreshold), 0, 0, table,
3827     null, reducer).invoke();
3828 dl 1.84 }
3829    
3830     /**
3831     * Returns the result of accumulating the given transformation
3832     * of all values using the given reducer to combine values, or
3833     * null if none.
3834     *
3835 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3836     * needed for this operation to be executed in parallel
3837 dl 1.84 * @param transformer a function returning the transformation
3838 jsr166 1.91 * for an element, or null if there is no transformation (in
3839 jsr166 1.93 * which case it is not combined)
3840 dl 1.84 * @param reducer a commutative associative combining function
3841     * @return the result of accumulating the given transformation
3842     * of all values
3843 dl 1.102 * @since 1.8
3844 dl 1.84 */
3845 dl 1.102 public <U> U reduceValues(long parallelismThreshold,
3846     Fun<? super V, ? extends U> transformer,
3847     BiFun<? super U, ? super U, ? extends U> reducer) {
3848     if (transformer == null || reducer == null)
3849     throw new NullPointerException();
3850     return new MapReduceValuesTask<K,V,U>
3851     (null, batchFor(parallelismThreshold), 0, 0, table,
3852     null, transformer, reducer).invoke();
3853 dl 1.84 }
3854    
3855     /**
3856     * Returns the result of accumulating the given transformation
3857     * of all values using the given reducer to combine values,
3858     * and the given basis as an identity value.
3859     *
3860 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3861     * needed for this operation to be executed in parallel
3862 dl 1.84 * @param transformer a function returning the transformation
3863     * for an element
3864     * @param basis the identity (initial default value) for the reduction
3865     * @param reducer a commutative associative combining function
3866     * @return the result of accumulating the given transformation
3867     * of all values
3868 dl 1.102 * @since 1.8
3869 dl 1.84 */
3870 dl 1.102 public double reduceValuesToDouble(long parallelismThreshold,
3871     ObjectToDouble<? super V> transformer,
3872     double basis,
3873     DoubleByDoubleToDouble reducer) {
3874     if (transformer == null || reducer == null)
3875     throw new NullPointerException();
3876     return new MapReduceValuesToDoubleTask<K,V>
3877     (null, batchFor(parallelismThreshold), 0, 0, table,
3878     null, transformer, basis, reducer).invoke();
3879 dl 1.84 }
3880    
3881     /**
3882     * Returns the result of accumulating the given transformation
3883     * of all values using the given reducer to combine values,
3884     * and the given basis as an identity value.
3885     *
3886 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3887     * needed for this operation to be executed in parallel
3888 dl 1.84 * @param transformer a function returning the transformation
3889     * for an element
3890     * @param basis the identity (initial default value) for the reduction
3891     * @param reducer a commutative associative combining function
3892     * @return the result of accumulating the given transformation
3893     * of all values
3894 dl 1.102 * @since 1.8
3895 dl 1.84 */
3896 dl 1.102 public long reduceValuesToLong(long parallelismThreshold,
3897     ObjectToLong<? super V> transformer,
3898     long basis,
3899     LongByLongToLong reducer) {
3900     if (transformer == null || reducer == null)
3901     throw new NullPointerException();
3902     return new MapReduceValuesToLongTask<K,V>
3903     (null, batchFor(parallelismThreshold), 0, 0, table,
3904     null, transformer, basis, reducer).invoke();
3905 dl 1.84 }
3906    
3907     /**
3908     * Returns the result of accumulating the given transformation
3909     * of all values using the given reducer to combine values,
3910     * and the given basis as an identity value.
3911     *
3912 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3913     * needed for this operation to be executed in parallel
3914 dl 1.84 * @param transformer a function returning the transformation
3915     * for an element
3916     * @param basis the identity (initial default value) for the reduction
3917     * @param reducer a commutative associative combining function
3918     * @return the result of accumulating the given transformation
3919     * of all values
3920 dl 1.102 * @since 1.8
3921 dl 1.84 */
3922 dl 1.102 public int reduceValuesToInt(long parallelismThreshold,
3923     ObjectToInt<? super V> transformer,
3924     int basis,
3925     IntByIntToInt reducer) {
3926     if (transformer == null || reducer == null)
3927     throw new NullPointerException();
3928     return new MapReduceValuesToIntTask<K,V>
3929     (null, batchFor(parallelismThreshold), 0, 0, table,
3930     null, transformer, basis, reducer).invoke();
3931 dl 1.84 }
3932    
3933     /**
3934     * Performs the given action for each entry.
3935     *
3936 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3937     * needed for this operation to be executed in parallel
3938 dl 1.84 * @param action the action
3939 dl 1.102 * @since 1.8
3940 dl 1.84 */
3941 dl 1.102 public void forEachEntry(long parallelismThreshold,
3942     Action<? super Map.Entry<K,V>> action) {
3943     if (action == null) throw new NullPointerException();
3944     new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table,
3945     action).invoke();
3946 dl 1.84 }
3947    
3948     /**
3949     * Performs the given action for each non-null transformation
3950     * of each entry.
3951     *
3952 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3953     * needed for this operation to be executed in parallel
3954 dl 1.84 * @param transformer a function returning the transformation
3955 jsr166 1.91 * for an element, or null if there is no transformation (in
3956 jsr166 1.93 * which case the action is not applied)
3957 dl 1.84 * @param action the action
3958 dl 1.102 * @since 1.8
3959 dl 1.84 */
3960 dl 1.102 public <U> void forEachEntry(long parallelismThreshold,
3961     Fun<Map.Entry<K,V>, ? extends U> transformer,
3962     Action<? super U> action) {
3963     if (transformer == null || action == null)
3964     throw new NullPointerException();
3965     new ForEachTransformedEntryTask<K,V,U>
3966     (null, batchFor(parallelismThreshold), 0, 0, table,
3967     transformer, action).invoke();
3968 dl 1.84 }
3969    
3970     /**
3971     * Returns a non-null result from applying the given search
3972     * function on each entry, or null if none. Upon success,
3973     * further element processing is suppressed and the results of
3974     * any other parallel invocations of the search function are
3975     * ignored.
3976     *
3977 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3978     * needed for this operation to be executed in parallel
3979 dl 1.84 * @param searchFunction a function returning a non-null
3980     * result on success, else null
3981     * @return a non-null result from applying the given search
3982     * function on each entry, or null if none
3983 dl 1.102 * @since 1.8
3984 dl 1.84 */
3985 dl 1.102 public <U> U searchEntries(long parallelismThreshold,
3986     Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
3987     if (searchFunction == null) throw new NullPointerException();
3988     return new SearchEntriesTask<K,V,U>
3989     (null, batchFor(parallelismThreshold), 0, 0, table,
3990     searchFunction, new AtomicReference<U>()).invoke();
3991 dl 1.84 }
3992    
3993     /**
3994     * Returns the result of accumulating all entries using the
3995     * given reducer to combine values, or null if none.
3996     *
3997 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3998     * needed for this operation to be executed in parallel
3999 dl 1.84 * @param reducer a commutative associative combining function
4000     * @return the result of accumulating all entries
4001 dl 1.102 * @since 1.8
4002 dl 1.84 */
4003 dl 1.102 public Map.Entry<K,V> reduceEntries(long parallelismThreshold,
4004     BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4005     if (reducer == null) throw new NullPointerException();
4006     return new ReduceEntriesTask<K,V>
4007     (null, batchFor(parallelismThreshold), 0, 0, table,
4008     null, reducer).invoke();
4009 dl 1.84 }
4010    
4011     /**
4012     * Returns the result of accumulating the given transformation
4013     * of all entries using the given reducer to combine values,
4014     * or null if none.
4015     *
4016 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
4017     * needed for this operation to be executed in parallel
4018 dl 1.84 * @param transformer a function returning the transformation
4019 jsr166 1.91 * for an element, or null if there is no transformation (in
4020 jsr166 1.93 * which case it is not combined)
4021 dl 1.84 * @param reducer a commutative associative combining function
4022     * @return the result of accumulating the given transformation
4023     * of all entries
4024 dl 1.102 * @since 1.8
4025 dl 1.84 */
4026 dl 1.102 public <U> U reduceEntries(long parallelismThreshold,
4027     Fun<Map.Entry<K,V>, ? extends U> transformer,
4028     BiFun<? super U, ? super U, ? extends U> reducer) {
4029     if (transformer == null || reducer == null)
4030     throw new NullPointerException();
4031     return new MapReduceEntriesTask<K,V,U>
4032     (null, batchFor(parallelismThreshold), 0, 0, table,
4033     null, transformer, reducer).invoke();
4034 dl 1.84 }
4035    
4036     /**
4037     * Returns the result of accumulating the given transformation
4038     * of all entries using the given reducer to combine values,
4039     * and the given basis as an identity value.
4040     *
4041 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
4042     * needed for this operation to be executed in parallel
4043 dl 1.84 * @param transformer a function returning the transformation
4044     * for an element
4045     * @param basis the identity (initial default value) for the reduction
4046     * @param reducer a commutative associative combining function
4047     * @return the result of accumulating the given transformation
4048     * of all entries
4049 dl 1.102 * @since 1.8
4050 dl 1.84 */
4051 dl 1.102 public double reduceEntriesToDouble(long parallelismThreshold,
4052     ObjectToDouble<Map.Entry<K,V>> transformer,
4053     double basis,
4054     DoubleByDoubleToDouble reducer) {
4055     if (transformer == null || reducer == null)
4056     throw new NullPointerException();
4057     return new MapReduceEntriesToDoubleTask<K,V>
4058     (null, batchFor(parallelismThreshold), 0, 0, table,
4059     null, transformer, basis, reducer).invoke();
4060 dl 1.84 }
4061    
4062     /**
4063     * Returns the result of accumulating the given transformation
4064     * of all entries using the given reducer to combine values,
4065     * and the given basis as an identity value.
4066     *
4067 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
4068     * needed for this operation to be executed in parallel
4069 dl 1.84 * @param transformer a function returning the transformation
4070     * for an element
4071     * @param basis the identity (initial default value) for the reduction
4072     * @param reducer a commutative associative combining function
4073 dl 1.102 * @return the result of accumulating the given transformation
4074 dl 1.84 * of all entries
4075 dl 1.102 * @since 1.8
4076 dl 1.84 */
4077 dl 1.102 public long reduceEntriesToLong(long parallelismThreshold,
4078     ObjectToLong<Map.Entry<K,V>> transformer,
4079     long basis,
4080     LongByLongToLong reducer) {
4081     if (transformer == null || reducer == null)
4082     throw new NullPointerException();
4083     return new MapReduceEntriesToLongTask<K,V>
4084     (null, batchFor(parallelismThreshold), 0, 0, table,
4085     null, transformer, basis, reducer).invoke();
4086 dl 1.84 }
4087    
4088     /**
4089     * Returns the result of accumulating the given transformation
4090     * of all entries using the given reducer to combine values,
4091     * and the given basis as an identity value.
4092     *
4093 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
4094     * needed for this operation to be executed in parallel
4095 dl 1.84 * @param transformer a function returning the transformation
4096     * for an element
4097     * @param basis the identity (initial default value) for the reduction
4098     * @param reducer a commutative associative combining function
4099     * @return the result of accumulating the given transformation
4100     * of all entries
4101 dl 1.102 * @since 1.8
4102 dl 1.84 */
4103 dl 1.102 public int reduceEntriesToInt(long parallelismThreshold,
4104     ObjectToInt<Map.Entry<K,V>> transformer,
4105     int basis,
4106     IntByIntToInt reducer) {
4107     if (transformer == null || reducer == null)
4108     throw new NullPointerException();
4109     return new MapReduceEntriesToIntTask<K,V>
4110     (null, batchFor(parallelismThreshold), 0, 0, table,
4111     null, transformer, basis, reducer).invoke();
4112 dl 1.84 }
4113    
4114    
4115     /* ----------------Views -------------- */
4116    
4117     /**
4118     * Base class for views.
4119     */
4120 dl 1.102 abstract static class CollectionView<K,V,E>
4121     implements Collection<E>, java.io.Serializable {
4122     private static final long serialVersionUID = 7249069246763182397L;
4123 jsr166 1.99 final ConcurrentHashMapV8<K,V> map;
4124 dl 1.102 CollectionView(ConcurrentHashMapV8<K,V> map) { this.map = map; }
4125 dl 1.84
4126     /**
4127     * Returns the map backing this view.
4128     *
4129     * @return the map backing this view
4130     */
4131     public ConcurrentHashMapV8<K,V> getMap() { return map; }
4132    
4133 dl 1.102 /**
4134     * Removes all of the elements from this view, by removing all
4135     * the mappings from the map backing this view.
4136     */
4137     public final void clear() { map.clear(); }
4138     public final int size() { return map.size(); }
4139     public final boolean isEmpty() { return map.isEmpty(); }
4140 dl 1.84
4141     // implementations below rely on concrete classes supplying these
4142 dl 1.102 // abstract methods
4143     /**
4144     * Returns a "weakly consistent" iterator that will never
4145     * throw {@link ConcurrentModificationException}, and
4146     * guarantees to traverse elements as they existed upon
4147     * construction of the iterator, and may (but is not
4148     * guaranteed to) reflect any modifications subsequent to
4149     * construction.
4150     */
4151     public abstract Iterator<E> iterator();
4152 jsr166 1.88 public abstract boolean contains(Object o);
4153     public abstract boolean remove(Object o);
4154 dl 1.84
4155     private static final String oomeMsg = "Required array size too large";
4156 dl 1.75
4157     public final Object[] toArray() {
4158     long sz = map.mappingCount();
4159 dl 1.102 if (sz > MAX_ARRAY_SIZE)
4160 dl 1.75 throw new OutOfMemoryError(oomeMsg);
4161     int n = (int)sz;
4162     Object[] r = new Object[n];
4163     int i = 0;
4164 dl 1.102 for (E e : this) {
4165 dl 1.75 if (i == n) {
4166     if (n >= MAX_ARRAY_SIZE)
4167     throw new OutOfMemoryError(oomeMsg);
4168     if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4169     n = MAX_ARRAY_SIZE;
4170     else
4171     n += (n >>> 1) + 1;
4172     r = Arrays.copyOf(r, n);
4173     }
4174 dl 1.102 r[i++] = e;
4175 dl 1.75 }
4176     return (i == n) ? r : Arrays.copyOf(r, i);
4177     }
4178    
4179 dl 1.102 @SuppressWarnings("unchecked")
4180     public final <T> T[] toArray(T[] a) {
4181 dl 1.75 long sz = map.mappingCount();
4182 dl 1.102 if (sz > MAX_ARRAY_SIZE)
4183 dl 1.75 throw new OutOfMemoryError(oomeMsg);
4184     int m = (int)sz;
4185     T[] r = (a.length >= m) ? a :
4186     (T[])java.lang.reflect.Array
4187     .newInstance(a.getClass().getComponentType(), m);
4188     int n = r.length;
4189     int i = 0;
4190 dl 1.102 for (E e : this) {
4191 dl 1.75 if (i == n) {
4192     if (n >= MAX_ARRAY_SIZE)
4193     throw new OutOfMemoryError(oomeMsg);
4194     if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4195     n = MAX_ARRAY_SIZE;
4196     else
4197     n += (n >>> 1) + 1;
4198     r = Arrays.copyOf(r, n);
4199     }
4200 dl 1.102 r[i++] = (T)e;
4201 dl 1.75 }
4202     if (a == r && i < n) {
4203     r[i] = null; // null-terminate
4204     return r;
4205     }
4206     return (i == n) ? r : Arrays.copyOf(r, i);
4207     }
4208    
4209 dl 1.102 /**
4210     * Returns a string representation of this collection.
4211     * The string representation consists of the string representations
4212     * of the collection's elements in the order they are returned by
4213     * its iterator, enclosed in square brackets ({@code "[]"}).
4214     * Adjacent elements are separated by the characters {@code ", "}
4215     * (comma and space). Elements are converted to strings as by
4216     * {@link String#valueOf(Object)}.
4217     *
4218     * @return a string representation of this collection
4219     */
4220 dl 1.75 public final String toString() {
4221     StringBuilder sb = new StringBuilder();
4222     sb.append('[');
4223 dl 1.102 Iterator<E> it = iterator();
4224 dl 1.75 if (it.hasNext()) {
4225     for (;;) {
4226     Object e = it.next();
4227     sb.append(e == this ? "(this Collection)" : e);
4228     if (!it.hasNext())
4229     break;
4230     sb.append(',').append(' ');
4231     }
4232     }
4233     return sb.append(']').toString();
4234     }
4235    
4236     public final boolean containsAll(Collection<?> c) {
4237     if (c != this) {
4238 dl 1.102 for (Object e : c) {
4239 dl 1.75 if (e == null || !contains(e))
4240     return false;
4241     }
4242     }
4243     return true;
4244     }
4245    
4246     public final boolean removeAll(Collection<?> c) {
4247     boolean modified = false;
4248 dl 1.102 for (Iterator<E> it = iterator(); it.hasNext();) {
4249 dl 1.75 if (c.contains(it.next())) {
4250     it.remove();
4251     modified = true;
4252     }
4253     }
4254     return modified;
4255     }
4256    
4257     public final boolean retainAll(Collection<?> c) {
4258     boolean modified = false;
4259 dl 1.102 for (Iterator<E> it = iterator(); it.hasNext();) {
4260 dl 1.75 if (!c.contains(it.next())) {
4261     it.remove();
4262     modified = true;
4263     }
4264     }
4265     return modified;
4266     }
4267    
4268     }
4269    
4270     /**
4271     * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in
4272     * which additions may optionally be enabled by mapping to a
4273 dl 1.102 * common value. This class cannot be directly instantiated.
4274     * See {@link #keySet() keySet()},
4275     * {@link #keySet(Object) keySet(V)},
4276     * {@link #newKeySet() newKeySet()},
4277     * {@link #newKeySet(int) newKeySet(int)}.
4278     *
4279     * @since 1.8
4280 dl 1.75 */
4281 dl 1.102 public static class KeySetView<K,V> extends CollectionView<K,V,K>
4282 dl 1.82 implements Set<K>, java.io.Serializable {
4283 dl 1.75 private static final long serialVersionUID = 7249069246763182397L;
4284     private final V value;
4285 jsr166 1.99 KeySetView(ConcurrentHashMapV8<K,V> map, V value) { // non-public
4286 dl 1.75 super(map);
4287     this.value = value;
4288     }
4289    
4290     /**
4291     * Returns the default mapped value for additions,
4292     * or {@code null} if additions are not supported.
4293     *
4294     * @return the default mapped value for additions, or {@code null}
4295 jsr166 1.93 * if not supported
4296 dl 1.75 */
4297     public V getMappedValue() { return value; }
4298    
4299 dl 1.102 /**
4300     * {@inheritDoc}
4301     * @throws NullPointerException if the specified key is null
4302     */
4303     public boolean contains(Object o) { return map.containsKey(o); }
4304    
4305     /**
4306     * Removes the key from this map view, by removing the key (and its
4307     * corresponding value) from the backing map. This method does
4308     * nothing if the key is not in the map.
4309     *
4310     * @param o the key to be removed from the backing map
4311     * @return {@code true} if the backing map contained the specified key
4312     * @throws NullPointerException if the specified key is null
4313     */
4314     public boolean remove(Object o) { return map.remove(o) != null; }
4315 dl 1.75
4316 dl 1.102 /**
4317     * @return an iterator over the keys of the backing map
4318     */
4319     public Iterator<K> iterator() {
4320     Node<K,V>[] t;
4321     ConcurrentHashMapV8<K,V> m = map;
4322     int f = (t = m.table) == null ? 0 : t.length;
4323     return new KeyIterator<K,V>(t, f, 0, f, m);
4324     }
4325 dl 1.75
4326     /**
4327 dl 1.102 * Adds the specified key to this set view by mapping the key to
4328     * the default mapped value in the backing map, if defined.
4329 dl 1.75 *
4330 dl 1.102 * @param e key to be added
4331     * @return {@code true} if this set changed as a result of the call
4332     * @throws NullPointerException if the specified key is null
4333     * @throws UnsupportedOperationException if no default mapped value
4334     * for additions was provided
4335 dl 1.75 */
4336     public boolean add(K e) {
4337     V v;
4338     if ((v = value) == null)
4339     throw new UnsupportedOperationException();
4340 dl 1.102 return map.putVal(e, v, true) == null;
4341 dl 1.75 }
4342 dl 1.102
4343     /**
4344     * Adds all of the elements in the specified collection to this set,
4345     * as if by calling {@link #add} on each one.
4346     *
4347     * @param c the elements to be inserted into this set
4348     * @return {@code true} if this set changed as a result of the call
4349     * @throws NullPointerException if the collection or any of its
4350     * elements are {@code null}
4351     * @throws UnsupportedOperationException if no default mapped value
4352     * for additions was provided
4353     */
4354 dl 1.75 public boolean addAll(Collection<? extends K> c) {
4355     boolean added = false;
4356     V v;
4357     if ((v = value) == null)
4358     throw new UnsupportedOperationException();
4359     for (K e : c) {
4360 dl 1.102 if (map.putVal(e, v, true) == null)
4361 dl 1.75 added = true;
4362     }
4363     return added;
4364     }
4365 dl 1.102
4366     public int hashCode() {
4367     int h = 0;
4368     for (K e : this)
4369     h += e.hashCode();
4370     return h;
4371     }
4372    
4373 dl 1.75 public boolean equals(Object o) {
4374     Set<?> c;
4375     return ((o instanceof Set) &&
4376     ((c = (Set<?>)o) == this ||
4377     (containsAll(c) && c.containsAll(this))));
4378     }
4379 dl 1.102
4380     public ConcurrentHashMapSpliterator<K> spliterator() {
4381     Node<K,V>[] t;
4382     ConcurrentHashMapV8<K,V> m = map;
4383     long n = m.sumCount();
4384     int f = (t = m.table) == null ? 0 : t.length;
4385     return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4386     }
4387    
4388     public void forEach(Action<? super K> action) {
4389     if (action == null) throw new NullPointerException();
4390     Node<K,V>[] t;
4391     if ((t = map.table) != null) {
4392     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4393     for (Node<K,V> p; (p = it.advance()) != null; )
4394     action.apply(p.key);
4395     }
4396     }
4397 dl 1.75 }
4398    
4399     /**
4400     * A view of a ConcurrentHashMapV8 as a {@link Collection} of
4401     * values, in which additions are disabled. This class cannot be
4402 jsr166 1.96 * directly instantiated. See {@link #values()}.
4403 dl 1.75 */
4404 dl 1.102 static final class ValuesView<K,V> extends CollectionView<K,V,V>
4405     implements Collection<V>, java.io.Serializable {
4406     private static final long serialVersionUID = 2249069246763182397L;
4407     ValuesView(ConcurrentHashMapV8<K,V> map) { super(map); }
4408     public final boolean contains(Object o) {
4409     return map.containsValue(o);
4410     }
4411    
4412 dl 1.75 public final boolean remove(Object o) {
4413     if (o != null) {
4414 dl 1.102 for (Iterator<V> it = iterator(); it.hasNext();) {
4415 dl 1.75 if (o.equals(it.next())) {
4416     it.remove();
4417     return true;
4418     }
4419     }
4420     }
4421     return false;
4422     }
4423    
4424     public final Iterator<V> iterator() {
4425 dl 1.102 ConcurrentHashMapV8<K,V> m = map;
4426     Node<K,V>[] t;
4427     int f = (t = m.table) == null ? 0 : t.length;
4428     return new ValueIterator<K,V>(t, f, 0, f, m);
4429 dl 1.75 }
4430 dl 1.102
4431 dl 1.75 public final boolean add(V e) {
4432     throw new UnsupportedOperationException();
4433     }
4434     public final boolean addAll(Collection<? extends V> c) {
4435     throw new UnsupportedOperationException();
4436     }
4437    
4438 dl 1.102 public ConcurrentHashMapSpliterator<V> spliterator() {
4439     Node<K,V>[] t;
4440     ConcurrentHashMapV8<K,V> m = map;
4441     long n = m.sumCount();
4442     int f = (t = m.table) == null ? 0 : t.length;
4443     return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4444     }
4445    
4446     public void forEach(Action<? super V> action) {
4447     if (action == null) throw new NullPointerException();
4448     Node<K,V>[] t;
4449     if ((t = map.table) != null) {
4450     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4451     for (Node<K,V> p; (p = it.advance()) != null; )
4452     action.apply(p.val);
4453     }
4454     }
4455 dl 1.70 }
4456 dl 1.52
4457 dl 1.70 /**
4458 dl 1.75 * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value)
4459     * entries. This class cannot be directly instantiated. See
4460 jsr166 1.95 * {@link #entrySet()}.
4461 dl 1.70 */
4462 dl 1.102 static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>>
4463     implements Set<Map.Entry<K,V>>, java.io.Serializable {
4464     private static final long serialVersionUID = 2249069246763182397L;
4465 jsr166 1.99 EntrySetView(ConcurrentHashMapV8<K,V> map) { super(map); }
4466 dl 1.102
4467     public boolean contains(Object o) {
4468 dl 1.75 Object k, v, r; Map.Entry<?,?> e;
4469     return ((o instanceof Map.Entry) &&
4470     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4471     (r = map.get(k)) != null &&
4472     (v = e.getValue()) != null &&
4473     (v == r || v.equals(r)));
4474     }
4475 dl 1.102
4476     public boolean remove(Object o) {
4477 dl 1.75 Object k, v; Map.Entry<?,?> e;
4478     return ((o instanceof Map.Entry) &&
4479     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4480     (v = e.getValue()) != null &&
4481     map.remove(k, v));
4482     }
4483    
4484     /**
4485 dl 1.102 * @return an iterator over the entries of the backing map
4486 dl 1.75 */
4487 dl 1.102 public Iterator<Map.Entry<K,V>> iterator() {
4488     ConcurrentHashMapV8<K,V> m = map;
4489     Node<K,V>[] t;
4490     int f = (t = m.table) == null ? 0 : t.length;
4491     return new EntryIterator<K,V>(t, f, 0, f, m);
4492 dl 1.75 }
4493    
4494 dl 1.102 public boolean add(Entry<K,V> e) {
4495     return map.putVal(e.getKey(), e.getValue(), false) == null;
4496 dl 1.75 }
4497 dl 1.102
4498     public boolean addAll(Collection<? extends Entry<K,V>> c) {
4499 dl 1.75 boolean added = false;
4500     for (Entry<K,V> e : c) {
4501     if (add(e))
4502     added = true;
4503     }
4504     return added;
4505     }
4506 dl 1.52
4507 dl 1.102 public final int hashCode() {
4508     int h = 0;
4509     Node<K,V>[] t;
4510     if ((t = map.table) != null) {
4511     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4512     for (Node<K,V> p; (p = it.advance()) != null; ) {
4513     h += p.hashCode();
4514     }
4515     }
4516     return h;
4517 dl 1.52 }
4518    
4519 dl 1.102 public final boolean equals(Object o) {
4520     Set<?> c;
4521     return ((o instanceof Set) &&
4522     ((c = (Set<?>)o) == this ||
4523     (containsAll(c) && c.containsAll(this))));
4524 dl 1.52 }
4525    
4526 dl 1.102 public ConcurrentHashMapSpliterator<Map.Entry<K,V>> spliterator() {
4527     Node<K,V>[] t;
4528     ConcurrentHashMapV8<K,V> m = map;
4529     long n = m.sumCount();
4530     int f = (t = m.table) == null ? 0 : t.length;
4531     return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m);
4532 dl 1.52 }
4533    
4534 dl 1.102 public void forEach(Action<? super Map.Entry<K,V>> action) {
4535     if (action == null) throw new NullPointerException();
4536     Node<K,V>[] t;
4537     if ((t = map.table) != null) {
4538     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4539     for (Node<K,V> p; (p = it.advance()) != null; )
4540     action.apply(new MapEntry<K,V>(p.key, p.val, map));
4541     }
4542 dl 1.52 }
4543    
4544 dl 1.102 }
4545 dl 1.52
4546 dl 1.102 // -------------------------------------------------------
4547 dl 1.52
4548 dl 1.102 /**
4549     * Base class for bulk tasks. Repeats some fields and code from
4550     * class Traverser, because we need to subclass CountedCompleter.
4551     */
4552     abstract static class BulkTask<K,V,R> extends CountedCompleter<R> {
4553     Node<K,V>[] tab; // same as Traverser
4554     Node<K,V> next;
4555     int index;
4556     int baseIndex;
4557     int baseLimit;
4558     final int baseSize;
4559     int batch; // split control
4560    
4561     BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) {
4562     super(par);
4563     this.batch = b;
4564     this.index = this.baseIndex = i;
4565     if ((this.tab = t) == null)
4566     this.baseSize = this.baseLimit = 0;
4567     else if (par == null)
4568     this.baseSize = this.baseLimit = t.length;
4569     else {
4570     this.baseLimit = f;
4571     this.baseSize = par.baseSize;
4572     }
4573 dl 1.52 }
4574    
4575     /**
4576 dl 1.102 * Same as Traverser version
4577 dl 1.52 */
4578 dl 1.102 final Node<K,V> advance() {
4579     Node<K,V> e;
4580     if ((e = next) != null)
4581     e = e.next;
4582     for (;;) {
4583     Node<K,V>[] t; int i, n; K ek; // must use locals in checks
4584     if (e != null)
4585     return next = e;
4586     if (baseIndex >= baseLimit || (t = tab) == null ||
4587     (n = t.length) <= (i = index) || i < 0)
4588     return next = null;
4589     if ((e = tabAt(t, index)) != null && e.hash < 0) {
4590     if (e instanceof ForwardingNode) {
4591     tab = ((ForwardingNode<K,V>)e).nextTable;
4592     e = null;
4593     continue;
4594     }
4595     else if (e instanceof TreeBin)
4596     e = ((TreeBin<K,V>)e).first;
4597     else
4598     e = null;
4599     }
4600     if ((index += baseSize) >= n)
4601     index = ++baseIndex; // visit upper slots if present
4602     }
4603 dl 1.52 }
4604     }
4605    
4606     /*
4607     * Task classes. Coded in a regular but ugly format/style to
4608     * simplify checks that each variant differs in the right way from
4609 dl 1.82 * others. The null screenings exist because compilers cannot tell
4610     * that we've already null-checked task arguments, so we force
4611     * simplest hoisted bypass to help avoid convoluted traps.
4612 dl 1.52 */
4613 dl 1.102 @SuppressWarnings("serial")
4614     static final class ForEachKeyTask<K,V>
4615     extends BulkTask<K,V,Void> {
4616     final Action<? super K> action;
4617 dl 1.52 ForEachKeyTask
4618 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4619     Action<? super K> action) {
4620     super(p, b, i, f, t);
4621 dl 1.52 this.action = action;
4622     }
4623 dl 1.102 public final void compute() {
4624     final Action<? super K> action;
4625 dl 1.82 if ((action = this.action) != null) {
4626 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4627     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4628     addToPendingCount(1);
4629     new ForEachKeyTask<K,V>
4630     (this, batch >>>= 1, baseLimit = h, f, tab,
4631     action).fork();
4632     }
4633     for (Node<K,V> p; (p = advance()) != null;)
4634     action.apply(p.key);
4635 dl 1.82 propagateCompletion();
4636     }
4637 dl 1.52 }
4638     }
4639    
4640 dl 1.102 @SuppressWarnings("serial")
4641     static final class ForEachValueTask<K,V>
4642     extends BulkTask<K,V,Void> {
4643     final Action<? super V> action;
4644 dl 1.52 ForEachValueTask
4645 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4646     Action<? super V> action) {
4647     super(p, b, i, f, t);
4648 dl 1.52 this.action = action;
4649     }
4650 dl 1.102 public final void compute() {
4651     final Action<? super V> action;
4652 dl 1.82 if ((action = this.action) != null) {
4653 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4654     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4655     addToPendingCount(1);
4656     new ForEachValueTask<K,V>
4657     (this, batch >>>= 1, baseLimit = h, f, tab,
4658     action).fork();
4659     }
4660     for (Node<K,V> p; (p = advance()) != null;)
4661     action.apply(p.val);
4662 dl 1.82 propagateCompletion();
4663     }
4664 dl 1.52 }
4665     }
4666    
4667 dl 1.102 @SuppressWarnings("serial")
4668     static final class ForEachEntryTask<K,V>
4669     extends BulkTask<K,V,Void> {
4670     final Action<? super Entry<K,V>> action;
4671 dl 1.52 ForEachEntryTask
4672 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4673     Action<? super Entry<K,V>> action) {
4674     super(p, b, i, f, t);
4675 dl 1.52 this.action = action;
4676     }
4677 dl 1.102 public final void compute() {
4678     final Action<? super Entry<K,V>> action;
4679 dl 1.82 if ((action = this.action) != null) {
4680 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4681     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4682     addToPendingCount(1);
4683     new ForEachEntryTask<K,V>
4684     (this, batch >>>= 1, baseLimit = h, f, tab,
4685     action).fork();
4686     }
4687     for (Node<K,V> p; (p = advance()) != null; )
4688     action.apply(p);
4689 dl 1.82 propagateCompletion();
4690     }
4691 dl 1.52 }
4692     }
4693    
4694 dl 1.102 @SuppressWarnings("serial")
4695     static final class ForEachMappingTask<K,V>
4696     extends BulkTask<K,V,Void> {
4697     final BiAction<? super K, ? super V> action;
4698 dl 1.52 ForEachMappingTask
4699 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4700     BiAction<? super K,? super V> action) {
4701     super(p, b, i, f, t);
4702 dl 1.52 this.action = action;
4703     }
4704 dl 1.102 public final void compute() {
4705     final BiAction<? super K, ? super V> action;
4706 dl 1.82 if ((action = this.action) != null) {
4707 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4708     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4709     addToPendingCount(1);
4710     new ForEachMappingTask<K,V>
4711     (this, batch >>>= 1, baseLimit = h, f, tab,
4712     action).fork();
4713     }
4714     for (Node<K,V> p; (p = advance()) != null; )
4715     action.apply(p.key, p.val);
4716 dl 1.82 propagateCompletion();
4717     }
4718 dl 1.52 }
4719     }
4720    
4721 dl 1.102 @SuppressWarnings("serial")
4722     static final class ForEachTransformedKeyTask<K,V,U>
4723     extends BulkTask<K,V,Void> {
4724 dl 1.52 final Fun<? super K, ? extends U> transformer;
4725 dl 1.102 final Action<? super U> action;
4726 dl 1.52 ForEachTransformedKeyTask
4727 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4728     Fun<? super K, ? extends U> transformer, Action<? super U> action) {
4729     super(p, b, i, f, t);
4730 dl 1.79 this.transformer = transformer; this.action = action;
4731     }
4732 dl 1.102 public final void compute() {
4733 dl 1.79 final Fun<? super K, ? extends U> transformer;
4734 dl 1.102 final Action<? super U> action;
4735 dl 1.82 if ((transformer = this.transformer) != null &&
4736     (action = this.action) != null) {
4737 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4738     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4739     addToPendingCount(1);
4740 dl 1.82 new ForEachTransformedKeyTask<K,V,U>
4741 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
4742     transformer, action).fork();
4743     }
4744     for (Node<K,V> p; (p = advance()) != null; ) {
4745     U u;
4746     if ((u = transformer.apply(p.key)) != null)
4747 dl 1.82 action.apply(u);
4748     }
4749     propagateCompletion();
4750 dl 1.52 }
4751     }
4752     }
4753    
4754 dl 1.102 @SuppressWarnings("serial")
4755     static final class ForEachTransformedValueTask<K,V,U>
4756     extends BulkTask<K,V,Void> {
4757 dl 1.52 final Fun<? super V, ? extends U> transformer;
4758 dl 1.102 final Action<? super U> action;
4759 dl 1.52 ForEachTransformedValueTask
4760 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4761     Fun<? super V, ? extends U> transformer, Action<? super U> action) {
4762     super(p, b, i, f, t);
4763 dl 1.79 this.transformer = transformer; this.action = action;
4764     }
4765 dl 1.102 public final void compute() {
4766 dl 1.79 final Fun<? super V, ? extends U> transformer;
4767 dl 1.102 final Action<? super U> action;
4768 dl 1.82 if ((transformer = this.transformer) != null &&
4769     (action = this.action) != null) {
4770 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4771     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4772     addToPendingCount(1);
4773 dl 1.82 new ForEachTransformedValueTask<K,V,U>
4774 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
4775     transformer, action).fork();
4776     }
4777     for (Node<K,V> p; (p = advance()) != null; ) {
4778     U u;
4779     if ((u = transformer.apply(p.val)) != null)
4780 dl 1.82 action.apply(u);
4781     }
4782     propagateCompletion();
4783 dl 1.52 }
4784     }
4785     }
4786    
4787 dl 1.102 @SuppressWarnings("serial")
4788     static final class ForEachTransformedEntryTask<K,V,U>
4789     extends BulkTask<K,V,Void> {
4790 dl 1.52 final Fun<Map.Entry<K,V>, ? extends U> transformer;
4791 dl 1.102 final Action<? super U> action;
4792 dl 1.52 ForEachTransformedEntryTask
4793 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4794     Fun<Map.Entry<K,V>, ? extends U> transformer, Action<? super U> action) {
4795     super(p, b, i, f, t);
4796 dl 1.79 this.transformer = transformer; this.action = action;
4797     }
4798 dl 1.102 public final void compute() {
4799 dl 1.79 final Fun<Map.Entry<K,V>, ? extends U> transformer;
4800 dl 1.102 final Action<? super U> action;
4801 dl 1.82 if ((transformer = this.transformer) != null &&
4802     (action = this.action) != null) {
4803 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4804     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4805     addToPendingCount(1);
4806 dl 1.82 new ForEachTransformedEntryTask<K,V,U>
4807 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
4808     transformer, action).fork();
4809     }
4810     for (Node<K,V> p; (p = advance()) != null; ) {
4811     U u;
4812     if ((u = transformer.apply(p)) != null)
4813 dl 1.82 action.apply(u);
4814     }
4815     propagateCompletion();
4816 dl 1.52 }
4817     }
4818     }
4819    
4820 dl 1.102 @SuppressWarnings("serial")
4821     static final class ForEachTransformedMappingTask<K,V,U>
4822     extends BulkTask<K,V,Void> {
4823 dl 1.52 final BiFun<? super K, ? super V, ? extends U> transformer;
4824 dl 1.102 final Action<? super U> action;
4825 dl 1.52 ForEachTransformedMappingTask
4826 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4827 dl 1.52 BiFun<? super K, ? super V, ? extends U> transformer,
4828 dl 1.102 Action<? super U> action) {
4829     super(p, b, i, f, t);
4830 dl 1.79 this.transformer = transformer; this.action = action;
4831 dl 1.52 }
4832 dl 1.102 public final void compute() {
4833 dl 1.79 final BiFun<? super K, ? super V, ? extends U> transformer;
4834 dl 1.102 final Action<? super U> action;
4835 dl 1.82 if ((transformer = this.transformer) != null &&
4836     (action = this.action) != null) {
4837 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4838     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4839     addToPendingCount(1);
4840 dl 1.82 new ForEachTransformedMappingTask<K,V,U>
4841 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
4842     transformer, action).fork();
4843     }
4844     for (Node<K,V> p; (p = advance()) != null; ) {
4845     U u;
4846     if ((u = transformer.apply(p.key, p.val)) != null)
4847 dl 1.82 action.apply(u);
4848     }
4849     propagateCompletion();
4850 dl 1.52 }
4851     }
4852     }
4853    
4854 dl 1.102 @SuppressWarnings("serial")
4855     static final class SearchKeysTask<K,V,U>
4856     extends BulkTask<K,V,U> {
4857 dl 1.52 final Fun<? super K, ? extends U> searchFunction;
4858     final AtomicReference<U> result;
4859     SearchKeysTask
4860 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4861 dl 1.52 Fun<? super K, ? extends U> searchFunction,
4862     AtomicReference<U> result) {
4863 dl 1.102 super(p, b, i, f, t);
4864 dl 1.52 this.searchFunction = searchFunction; this.result = result;
4865     }
4866 dl 1.79 public final U getRawResult() { return result.get(); }
4867 dl 1.102 public final void compute() {
4868 dl 1.79 final Fun<? super K, ? extends U> searchFunction;
4869     final AtomicReference<U> result;
4870 dl 1.82 if ((searchFunction = this.searchFunction) != null &&
4871     (result = this.result) != null) {
4872 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4873     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4874 dl 1.82 if (result.get() != null)
4875     return;
4876 dl 1.102 addToPendingCount(1);
4877 dl 1.82 new SearchKeysTask<K,V,U>
4878 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
4879     searchFunction, result).fork();
4880 dl 1.61 }
4881 dl 1.82 while (result.get() == null) {
4882     U u;
4883 dl 1.102 Node<K,V> p;
4884     if ((p = advance()) == null) {
4885 dl 1.82 propagateCompletion();
4886     break;
4887     }
4888 dl 1.102 if ((u = searchFunction.apply(p.key)) != null) {
4889 dl 1.82 if (result.compareAndSet(null, u))
4890     quietlyCompleteRoot();
4891     break;
4892     }
4893 dl 1.52 }
4894     }
4895     }
4896     }
4897    
4898 dl 1.102 @SuppressWarnings("serial")
4899     static final class SearchValuesTask<K,V,U>
4900     extends BulkTask<K,V,U> {
4901 dl 1.52 final Fun<? super V, ? extends U> searchFunction;
4902     final AtomicReference<U> result;
4903     SearchValuesTask
4904 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4905 dl 1.52 Fun<? super V, ? extends U> searchFunction,
4906     AtomicReference<U> result) {
4907 dl 1.102 super(p, b, i, f, t);
4908 dl 1.52 this.searchFunction = searchFunction; this.result = result;
4909     }
4910 dl 1.79 public final U getRawResult() { return result.get(); }
4911 dl 1.102 public final void compute() {
4912 dl 1.79 final Fun<? super V, ? extends U> searchFunction;
4913     final AtomicReference<U> result;
4914 dl 1.82 if ((searchFunction = this.searchFunction) != null &&
4915     (result = this.result) != null) {
4916 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4917     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4918 dl 1.82 if (result.get() != null)
4919     return;
4920 dl 1.102 addToPendingCount(1);
4921 dl 1.82 new SearchValuesTask<K,V,U>
4922 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
4923     searchFunction, result).fork();
4924 dl 1.61 }
4925 dl 1.82 while (result.get() == null) {
4926 dl 1.102 U u;
4927     Node<K,V> p;
4928     if ((p = advance()) == null) {
4929 dl 1.82 propagateCompletion();
4930     break;
4931     }
4932 dl 1.102 if ((u = searchFunction.apply(p.val)) != null) {
4933 dl 1.82 if (result.compareAndSet(null, u))
4934     quietlyCompleteRoot();
4935     break;
4936     }
4937 dl 1.52 }
4938     }
4939     }
4940     }
4941    
4942 dl 1.102 @SuppressWarnings("serial")
4943     static final class SearchEntriesTask<K,V,U>
4944     extends BulkTask<K,V,U> {
4945 dl 1.52 final Fun<Entry<K,V>, ? extends U> searchFunction;
4946     final AtomicReference<U> result;
4947     SearchEntriesTask
4948 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4949 dl 1.52 Fun<Entry<K,V>, ? extends U> searchFunction,
4950     AtomicReference<U> result) {
4951 dl 1.102 super(p, b, i, f, t);
4952 dl 1.52 this.searchFunction = searchFunction; this.result = result;
4953     }
4954 dl 1.79 public final U getRawResult() { return result.get(); }
4955 dl 1.102 public final void compute() {
4956 dl 1.79 final Fun<Entry<K,V>, ? extends U> searchFunction;
4957     final AtomicReference<U> result;
4958 dl 1.82 if ((searchFunction = this.searchFunction) != null &&
4959     (result = this.result) != null) {
4960 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4961     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4962 dl 1.82 if (result.get() != null)
4963     return;
4964 dl 1.102 addToPendingCount(1);
4965 dl 1.82 new SearchEntriesTask<K,V,U>
4966 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
4967     searchFunction, result).fork();
4968 dl 1.61 }
4969 dl 1.82 while (result.get() == null) {
4970 dl 1.102 U u;
4971     Node<K,V> p;
4972     if ((p = advance()) == null) {
4973 dl 1.82 propagateCompletion();
4974     break;
4975     }
4976 dl 1.102 if ((u = searchFunction.apply(p)) != null) {
4977 dl 1.82 if (result.compareAndSet(null, u))
4978     quietlyCompleteRoot();
4979     return;
4980     }
4981 dl 1.52 }
4982     }
4983     }
4984     }
4985    
4986 dl 1.102 @SuppressWarnings("serial")
4987     static final class SearchMappingsTask<K,V,U>
4988     extends BulkTask<K,V,U> {
4989 dl 1.52 final BiFun<? super K, ? super V, ? extends U> searchFunction;
4990     final AtomicReference<U> result;
4991     SearchMappingsTask
4992 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4993 dl 1.52 BiFun<? super K, ? super V, ? extends U> searchFunction,
4994     AtomicReference<U> result) {
4995 dl 1.102 super(p, b, i, f, t);
4996 dl 1.52 this.searchFunction = searchFunction; this.result = result;
4997     }
4998 dl 1.79 public final U getRawResult() { return result.get(); }
4999 dl 1.102 public final void compute() {
5000 dl 1.79 final BiFun<? super K, ? super V, ? extends U> searchFunction;
5001     final AtomicReference<U> result;
5002 dl 1.82 if ((searchFunction = this.searchFunction) != null &&
5003     (result = this.result) != null) {
5004 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5005     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5006 dl 1.82 if (result.get() != null)
5007     return;
5008 dl 1.102 addToPendingCount(1);
5009 dl 1.82 new SearchMappingsTask<K,V,U>
5010 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5011     searchFunction, result).fork();
5012 dl 1.61 }
5013 dl 1.82 while (result.get() == null) {
5014 dl 1.102 U u;
5015     Node<K,V> p;
5016     if ((p = advance()) == null) {
5017 dl 1.82 propagateCompletion();
5018     break;
5019     }
5020 dl 1.102 if ((u = searchFunction.apply(p.key, p.val)) != null) {
5021 dl 1.82 if (result.compareAndSet(null, u))
5022     quietlyCompleteRoot();
5023     break;
5024     }
5025 dl 1.52 }
5026     }
5027     }
5028     }
5029    
5030 dl 1.102 @SuppressWarnings("serial")
5031     static final class ReduceKeysTask<K,V>
5032     extends BulkTask<K,V,K> {
5033 dl 1.52 final BiFun<? super K, ? super K, ? extends K> reducer;
5034     K result;
5035 dl 1.61 ReduceKeysTask<K,V> rights, nextRight;
5036 dl 1.52 ReduceKeysTask
5037 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5038 dl 1.61 ReduceKeysTask<K,V> nextRight,
5039 dl 1.52 BiFun<? super K, ? super K, ? extends K> reducer) {
5040 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5041 dl 1.52 this.reducer = reducer;
5042     }
5043 dl 1.79 public final K getRawResult() { return result; }
5044 dl 1.102 public final void compute() {
5045 dl 1.82 final BiFun<? super K, ? super K, ? extends K> reducer;
5046     if ((reducer = this.reducer) != null) {
5047 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5048     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5049     addToPendingCount(1);
5050 dl 1.82 (rights = new ReduceKeysTask<K,V>
5051 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5052     rights, reducer)).fork();
5053     }
5054 dl 1.82 K r = null;
5055 dl 1.102 for (Node<K,V> p; (p = advance()) != null; ) {
5056     K u = p.key;
5057     r = (r == null) ? u : u == null ? r : reducer.apply(r, u);
5058 dl 1.82 }
5059     result = r;
5060     CountedCompleter<?> c;
5061     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5062 dl 1.102 @SuppressWarnings("unchecked") ReduceKeysTask<K,V>
5063 dl 1.82 t = (ReduceKeysTask<K,V>)c,
5064     s = t.rights;
5065     while (s != null) {
5066     K tr, sr;
5067     if ((sr = s.result) != null)
5068     t.result = (((tr = t.result) == null) ? sr :
5069     reducer.apply(tr, sr));
5070     s = t.rights = s.nextRight;
5071     }
5072 dl 1.52 }
5073 dl 1.71 }
5074 dl 1.52 }
5075     }
5076    
5077 dl 1.102 @SuppressWarnings("serial")
5078     static final class ReduceValuesTask<K,V>
5079     extends BulkTask<K,V,V> {
5080 dl 1.52 final BiFun<? super V, ? super V, ? extends V> reducer;
5081     V result;
5082 dl 1.61 ReduceValuesTask<K,V> rights, nextRight;
5083 dl 1.52 ReduceValuesTask
5084 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5085 dl 1.61 ReduceValuesTask<K,V> nextRight,
5086 dl 1.52 BiFun<? super V, ? super V, ? extends V> reducer) {
5087 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5088 dl 1.52 this.reducer = reducer;
5089     }
5090 dl 1.79 public final V getRawResult() { return result; }
5091 dl 1.102 public final void compute() {
5092 dl 1.82 final BiFun<? super V, ? super V, ? extends V> reducer;
5093     if ((reducer = this.reducer) != null) {
5094 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5095     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5096     addToPendingCount(1);
5097 dl 1.82 (rights = new ReduceValuesTask<K,V>
5098 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5099     rights, reducer)).fork();
5100     }
5101 dl 1.82 V r = null;
5102 dl 1.102 for (Node<K,V> p; (p = advance()) != null; ) {
5103     V v = p.val;
5104     r = (r == null) ? v : reducer.apply(r, v);
5105 dl 1.82 }
5106     result = r;
5107     CountedCompleter<?> c;
5108     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5109 dl 1.102 @SuppressWarnings("unchecked") ReduceValuesTask<K,V>
5110 dl 1.82 t = (ReduceValuesTask<K,V>)c,
5111     s = t.rights;
5112     while (s != null) {
5113     V tr, sr;
5114     if ((sr = s.result) != null)
5115     t.result = (((tr = t.result) == null) ? sr :
5116     reducer.apply(tr, sr));
5117     s = t.rights = s.nextRight;
5118     }
5119 dl 1.52 }
5120     }
5121     }
5122     }
5123    
5124 dl 1.102 @SuppressWarnings("serial")
5125     static final class ReduceEntriesTask<K,V>
5126     extends BulkTask<K,V,Map.Entry<K,V>> {
5127 dl 1.52 final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5128     Map.Entry<K,V> result;
5129 dl 1.61 ReduceEntriesTask<K,V> rights, nextRight;
5130 dl 1.52 ReduceEntriesTask
5131 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5132 dl 1.63 ReduceEntriesTask<K,V> nextRight,
5133 dl 1.52 BiFun<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5134 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5135 dl 1.52 this.reducer = reducer;
5136     }
5137 dl 1.79 public final Map.Entry<K,V> getRawResult() { return result; }
5138 dl 1.102 public final void compute() {
5139 dl 1.82 final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5140     if ((reducer = this.reducer) != null) {
5141 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5142     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5143     addToPendingCount(1);
5144 dl 1.82 (rights = new ReduceEntriesTask<K,V>
5145 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5146     rights, reducer)).fork();
5147     }
5148 dl 1.82 Map.Entry<K,V> r = null;
5149 dl 1.102 for (Node<K,V> p; (p = advance()) != null; )
5150     r = (r == null) ? p : reducer.apply(r, p);
5151 dl 1.82 result = r;
5152     CountedCompleter<?> c;
5153     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5154 dl 1.102 @SuppressWarnings("unchecked") ReduceEntriesTask<K,V>
5155 dl 1.82 t = (ReduceEntriesTask<K,V>)c,
5156     s = t.rights;
5157     while (s != null) {
5158     Map.Entry<K,V> tr, sr;
5159     if ((sr = s.result) != null)
5160     t.result = (((tr = t.result) == null) ? sr :
5161     reducer.apply(tr, sr));
5162     s = t.rights = s.nextRight;
5163     }
5164 dl 1.52 }
5165     }
5166     }
5167     }
5168    
5169 dl 1.102 @SuppressWarnings("serial")
5170     static final class MapReduceKeysTask<K,V,U>
5171     extends BulkTask<K,V,U> {
5172 dl 1.52 final Fun<? super K, ? extends U> transformer;
5173     final BiFun<? super U, ? super U, ? extends U> reducer;
5174     U result;
5175 dl 1.61 MapReduceKeysTask<K,V,U> rights, nextRight;
5176 dl 1.52 MapReduceKeysTask
5177 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5178 dl 1.61 MapReduceKeysTask<K,V,U> nextRight,
5179 dl 1.52 Fun<? super K, ? extends U> transformer,
5180     BiFun<? super U, ? super U, ? extends U> reducer) {
5181 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5182 dl 1.52 this.transformer = transformer;
5183     this.reducer = reducer;
5184     }
5185 dl 1.79 public final U getRawResult() { return result; }
5186 dl 1.102 public final void compute() {
5187 dl 1.82 final Fun<? super K, ? extends U> transformer;
5188     final BiFun<? super U, ? super U, ? extends U> reducer;
5189     if ((transformer = this.transformer) != null &&
5190     (reducer = this.reducer) != null) {
5191 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5192     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5193     addToPendingCount(1);
5194 dl 1.82 (rights = new MapReduceKeysTask<K,V,U>
5195 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5196     rights, transformer, reducer)).fork();
5197     }
5198     U r = null;
5199     for (Node<K,V> p; (p = advance()) != null; ) {
5200     U u;
5201     if ((u = transformer.apply(p.key)) != null)
5202 dl 1.82 r = (r == null) ? u : reducer.apply(r, u);
5203     }
5204     result = r;
5205     CountedCompleter<?> c;
5206     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5207 dl 1.102 @SuppressWarnings("unchecked") MapReduceKeysTask<K,V,U>
5208 dl 1.82 t = (MapReduceKeysTask<K,V,U>)c,
5209     s = t.rights;
5210     while (s != null) {
5211     U tr, sr;
5212     if ((sr = s.result) != null)
5213     t.result = (((tr = t.result) == null) ? sr :
5214     reducer.apply(tr, sr));
5215     s = t.rights = s.nextRight;
5216     }
5217 dl 1.52 }
5218     }
5219     }
5220     }
5221    
5222 dl 1.102 @SuppressWarnings("serial")
5223     static final class MapReduceValuesTask<K,V,U>
5224     extends BulkTask<K,V,U> {
5225 dl 1.52 final Fun<? super V, ? extends U> transformer;
5226     final BiFun<? super U, ? super U, ? extends U> reducer;
5227     U result;
5228 dl 1.61 MapReduceValuesTask<K,V,U> rights, nextRight;
5229 dl 1.52 MapReduceValuesTask
5230 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5231 dl 1.61 MapReduceValuesTask<K,V,U> nextRight,
5232 dl 1.52 Fun<? super V, ? extends U> transformer,
5233     BiFun<? super U, ? super U, ? extends U> reducer) {
5234 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5235 dl 1.52 this.transformer = transformer;
5236     this.reducer = reducer;
5237     }
5238 dl 1.79 public final U getRawResult() { return result; }
5239 dl 1.102 public final void compute() {
5240 dl 1.82 final Fun<? super V, ? extends U> transformer;
5241     final BiFun<? super U, ? super U, ? extends U> reducer;
5242     if ((transformer = this.transformer) != null &&
5243     (reducer = this.reducer) != null) {
5244 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5245     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5246     addToPendingCount(1);
5247 dl 1.82 (rights = new MapReduceValuesTask<K,V,U>
5248 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5249     rights, transformer, reducer)).fork();
5250     }
5251     U r = null;
5252     for (Node<K,V> p; (p = advance()) != null; ) {
5253     U u;
5254     if ((u = transformer.apply(p.val)) != null)
5255 dl 1.82 r = (r == null) ? u : reducer.apply(r, u);
5256     }
5257     result = r;
5258     CountedCompleter<?> c;
5259     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5260 dl 1.102 @SuppressWarnings("unchecked") MapReduceValuesTask<K,V,U>
5261 dl 1.82 t = (MapReduceValuesTask<K,V,U>)c,
5262     s = t.rights;
5263     while (s != null) {
5264     U tr, sr;
5265     if ((sr = s.result) != null)
5266     t.result = (((tr = t.result) == null) ? sr :
5267     reducer.apply(tr, sr));
5268     s = t.rights = s.nextRight;
5269     }
5270 dl 1.52 }
5271 dl 1.71 }
5272 dl 1.52 }
5273     }
5274    
5275 dl 1.102 @SuppressWarnings("serial")
5276     static final class MapReduceEntriesTask<K,V,U>
5277     extends BulkTask<K,V,U> {
5278 dl 1.52 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5279     final BiFun<? super U, ? super U, ? extends U> reducer;
5280     U result;
5281 dl 1.61 MapReduceEntriesTask<K,V,U> rights, nextRight;
5282 dl 1.52 MapReduceEntriesTask
5283 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5284 dl 1.61 MapReduceEntriesTask<K,V,U> nextRight,
5285 dl 1.52 Fun<Map.Entry<K,V>, ? extends U> transformer,
5286     BiFun<? super U, ? super U, ? extends U> reducer) {
5287 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5288 dl 1.52 this.transformer = transformer;
5289     this.reducer = reducer;
5290     }
5291 dl 1.79 public final U getRawResult() { return result; }
5292 dl 1.102 public final void compute() {
5293 dl 1.82 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5294     final BiFun<? super U, ? super U, ? extends U> reducer;
5295     if ((transformer = this.transformer) != null &&
5296     (reducer = this.reducer) != null) {
5297 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5298     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5299     addToPendingCount(1);
5300 dl 1.82 (rights = new MapReduceEntriesTask<K,V,U>
5301 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5302     rights, transformer, reducer)).fork();
5303     }
5304     U r = null;
5305     for (Node<K,V> p; (p = advance()) != null; ) {
5306     U u;
5307     if ((u = transformer.apply(p)) != null)
5308 dl 1.82 r = (r == null) ? u : reducer.apply(r, u);
5309     }
5310     result = r;
5311     CountedCompleter<?> c;
5312     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5313 dl 1.102 @SuppressWarnings("unchecked") MapReduceEntriesTask<K,V,U>
5314 dl 1.82 t = (MapReduceEntriesTask<K,V,U>)c,
5315     s = t.rights;
5316     while (s != null) {
5317     U tr, sr;
5318     if ((sr = s.result) != null)
5319     t.result = (((tr = t.result) == null) ? sr :
5320     reducer.apply(tr, sr));
5321     s = t.rights = s.nextRight;
5322     }
5323 dl 1.52 }
5324     }
5325     }
5326     }
5327    
5328 dl 1.102 @SuppressWarnings("serial")
5329     static final class MapReduceMappingsTask<K,V,U>
5330     extends BulkTask<K,V,U> {
5331 dl 1.52 final BiFun<? super K, ? super V, ? extends U> transformer;
5332     final BiFun<? super U, ? super U, ? extends U> reducer;
5333     U result;
5334 dl 1.61 MapReduceMappingsTask<K,V,U> rights, nextRight;
5335 dl 1.52 MapReduceMappingsTask
5336 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5337 dl 1.61 MapReduceMappingsTask<K,V,U> nextRight,
5338 dl 1.52 BiFun<? super K, ? super V, ? extends U> transformer,
5339     BiFun<? super U, ? super U, ? extends U> reducer) {
5340 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5341 dl 1.52 this.transformer = transformer;
5342     this.reducer = reducer;
5343     }
5344 dl 1.79 public final U getRawResult() { return result; }
5345 dl 1.102 public final void compute() {
5346 dl 1.82 final BiFun<? super K, ? super V, ? extends U> transformer;
5347     final BiFun<? super U, ? super U, ? extends U> reducer;
5348     if ((transformer = this.transformer) != null &&
5349     (reducer = this.reducer) != null) {
5350 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5351     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5352     addToPendingCount(1);
5353 dl 1.82 (rights = new MapReduceMappingsTask<K,V,U>
5354 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5355     rights, transformer, reducer)).fork();
5356     }
5357     U r = null;
5358     for (Node<K,V> p; (p = advance()) != null; ) {
5359     U u;
5360     if ((u = transformer.apply(p.key, p.val)) != null)
5361 dl 1.82 r = (r == null) ? u : reducer.apply(r, u);
5362     }
5363     result = r;
5364     CountedCompleter<?> c;
5365     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5366 dl 1.102 @SuppressWarnings("unchecked") MapReduceMappingsTask<K,V,U>
5367 dl 1.82 t = (MapReduceMappingsTask<K,V,U>)c,
5368     s = t.rights;
5369     while (s != null) {
5370     U tr, sr;
5371     if ((sr = s.result) != null)
5372     t.result = (((tr = t.result) == null) ? sr :
5373     reducer.apply(tr, sr));
5374     s = t.rights = s.nextRight;
5375     }
5376 dl 1.52 }
5377 dl 1.71 }
5378 dl 1.52 }
5379     }
5380    
5381 dl 1.102 @SuppressWarnings("serial")
5382     static final class MapReduceKeysToDoubleTask<K,V>
5383     extends BulkTask<K,V,Double> {
5384 dl 1.52 final ObjectToDouble<? super K> transformer;
5385     final DoubleByDoubleToDouble reducer;
5386     final double basis;
5387     double result;
5388 dl 1.61 MapReduceKeysToDoubleTask<K,V> rights, nextRight;
5389 dl 1.52 MapReduceKeysToDoubleTask
5390 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5391 dl 1.61 MapReduceKeysToDoubleTask<K,V> nextRight,
5392 dl 1.52 ObjectToDouble<? super K> transformer,
5393     double basis,
5394     DoubleByDoubleToDouble reducer) {
5395 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5396 dl 1.52 this.transformer = transformer;
5397     this.basis = basis; this.reducer = reducer;
5398     }
5399 dl 1.79 public final Double getRawResult() { return result; }
5400 dl 1.102 public final void compute() {
5401 dl 1.82 final ObjectToDouble<? super K> transformer;
5402     final DoubleByDoubleToDouble reducer;
5403     if ((transformer = this.transformer) != null &&
5404     (reducer = this.reducer) != null) {
5405     double r = this.basis;
5406 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5407     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5408     addToPendingCount(1);
5409 dl 1.82 (rights = new MapReduceKeysToDoubleTask<K,V>
5410 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5411     rights, transformer, r, reducer)).fork();
5412     }
5413     for (Node<K,V> p; (p = advance()) != null; )
5414     r = reducer.apply(r, transformer.apply(p.key));
5415 dl 1.82 result = r;
5416     CountedCompleter<?> c;
5417     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5418 dl 1.102 @SuppressWarnings("unchecked") MapReduceKeysToDoubleTask<K,V>
5419 dl 1.82 t = (MapReduceKeysToDoubleTask<K,V>)c,
5420     s = t.rights;
5421     while (s != null) {
5422     t.result = reducer.apply(t.result, s.result);
5423     s = t.rights = s.nextRight;
5424     }
5425 dl 1.52 }
5426 dl 1.71 }
5427 dl 1.52 }
5428     }
5429    
5430 dl 1.102 @SuppressWarnings("serial")
5431     static final class MapReduceValuesToDoubleTask<K,V>
5432     extends BulkTask<K,V,Double> {
5433 dl 1.52 final ObjectToDouble<? super V> transformer;
5434     final DoubleByDoubleToDouble reducer;
5435     final double basis;
5436     double result;
5437 dl 1.61 MapReduceValuesToDoubleTask<K,V> rights, nextRight;
5438 dl 1.52 MapReduceValuesToDoubleTask
5439 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5440 dl 1.61 MapReduceValuesToDoubleTask<K,V> nextRight,
5441 dl 1.52 ObjectToDouble<? super V> transformer,
5442     double basis,
5443     DoubleByDoubleToDouble reducer) {
5444 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5445 dl 1.52 this.transformer = transformer;
5446     this.basis = basis; this.reducer = reducer;
5447     }
5448 dl 1.79 public final Double getRawResult() { return result; }
5449 dl 1.102 public final void compute() {
5450 dl 1.82 final ObjectToDouble<? super V> transformer;
5451     final DoubleByDoubleToDouble reducer;
5452     if ((transformer = this.transformer) != null &&
5453     (reducer = this.reducer) != null) {
5454     double r = this.basis;
5455 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5456     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5457     addToPendingCount(1);
5458 dl 1.82 (rights = new MapReduceValuesToDoubleTask<K,V>
5459 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5460     rights, transformer, r, reducer)).fork();
5461     }
5462     for (Node<K,V> p; (p = advance()) != null; )
5463     r = reducer.apply(r, transformer.apply(p.val));
5464 dl 1.82 result = r;
5465     CountedCompleter<?> c;
5466     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5467 dl 1.102 @SuppressWarnings("unchecked") MapReduceValuesToDoubleTask<K,V>
5468 dl 1.82 t = (MapReduceValuesToDoubleTask<K,V>)c,
5469     s = t.rights;
5470     while (s != null) {
5471     t.result = reducer.apply(t.result, s.result);
5472     s = t.rights = s.nextRight;
5473     }
5474 dl 1.52 }
5475 dl 1.71 }
5476 dl 1.52 }
5477     }
5478    
5479 dl 1.102 @SuppressWarnings("serial")
5480     static final class MapReduceEntriesToDoubleTask<K,V>
5481     extends BulkTask<K,V,Double> {
5482 dl 1.52 final ObjectToDouble<Map.Entry<K,V>> transformer;
5483     final DoubleByDoubleToDouble reducer;
5484     final double basis;
5485     double result;
5486 dl 1.61 MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
5487 dl 1.52 MapReduceEntriesToDoubleTask
5488 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5489 dl 1.61 MapReduceEntriesToDoubleTask<K,V> nextRight,
5490 dl 1.52 ObjectToDouble<Map.Entry<K,V>> transformer,
5491     double basis,
5492     DoubleByDoubleToDouble reducer) {
5493 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5494 dl 1.52 this.transformer = transformer;
5495     this.basis = basis; this.reducer = reducer;
5496     }
5497 dl 1.79 public final Double getRawResult() { return result; }
5498 dl 1.102 public final void compute() {
5499 dl 1.82 final ObjectToDouble<Map.Entry<K,V>> transformer;
5500     final DoubleByDoubleToDouble reducer;
5501     if ((transformer = this.transformer) != null &&
5502     (reducer = this.reducer) != null) {
5503     double r = this.basis;
5504 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5505     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5506     addToPendingCount(1);
5507 dl 1.82 (rights = new MapReduceEntriesToDoubleTask<K,V>
5508 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5509     rights, transformer, r, reducer)).fork();
5510     }
5511     for (Node<K,V> p; (p = advance()) != null; )
5512     r = reducer.apply(r, transformer.apply(p));
5513 dl 1.82 result = r;
5514     CountedCompleter<?> c;
5515     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5516 dl 1.102 @SuppressWarnings("unchecked") MapReduceEntriesToDoubleTask<K,V>
5517 dl 1.82 t = (MapReduceEntriesToDoubleTask<K,V>)c,
5518     s = t.rights;
5519     while (s != null) {
5520     t.result = reducer.apply(t.result, s.result);
5521     s = t.rights = s.nextRight;
5522     }
5523 dl 1.52 }
5524 dl 1.71 }
5525 dl 1.52 }
5526     }
5527    
5528 dl 1.102 @SuppressWarnings("serial")
5529     static final class MapReduceMappingsToDoubleTask<K,V>
5530     extends BulkTask<K,V,Double> {
5531 dl 1.52 final ObjectByObjectToDouble<? super K, ? super V> transformer;
5532     final DoubleByDoubleToDouble reducer;
5533     final double basis;
5534     double result;
5535 dl 1.61 MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
5536 dl 1.52 MapReduceMappingsToDoubleTask
5537 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5538 dl 1.61 MapReduceMappingsToDoubleTask<K,V> nextRight,
5539 dl 1.52 ObjectByObjectToDouble<? super K, ? super V> transformer,
5540     double basis,
5541     DoubleByDoubleToDouble reducer) {
5542 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5543 dl 1.52 this.transformer = transformer;
5544     this.basis = basis; this.reducer = reducer;
5545     }
5546 dl 1.79 public final Double getRawResult() { return result; }
5547 dl 1.102 public final void compute() {
5548 dl 1.82 final ObjectByObjectToDouble<? super K, ? super V> transformer;
5549     final DoubleByDoubleToDouble reducer;
5550     if ((transformer = this.transformer) != null &&
5551     (reducer = this.reducer) != null) {
5552     double r = this.basis;
5553 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5554     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5555     addToPendingCount(1);
5556 dl 1.82 (rights = new MapReduceMappingsToDoubleTask<K,V>
5557 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5558     rights, transformer, r, reducer)).fork();
5559     }
5560     for (Node<K,V> p; (p = advance()) != null; )
5561     r = reducer.apply(r, transformer.apply(p.key, p.val));
5562 dl 1.82 result = r;
5563     CountedCompleter<?> c;
5564     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5565 dl 1.102 @SuppressWarnings("unchecked") MapReduceMappingsToDoubleTask<K,V>
5566 dl 1.82 t = (MapReduceMappingsToDoubleTask<K,V>)c,
5567     s = t.rights;
5568     while (s != null) {
5569     t.result = reducer.apply(t.result, s.result);
5570     s = t.rights = s.nextRight;
5571     }
5572 dl 1.52 }
5573 dl 1.71 }
5574 dl 1.52 }
5575     }
5576    
5577 dl 1.102 @SuppressWarnings("serial")
5578     static final class MapReduceKeysToLongTask<K,V>
5579     extends BulkTask<K,V,Long> {
5580 dl 1.52 final ObjectToLong<? super K> transformer;
5581     final LongByLongToLong reducer;
5582     final long basis;
5583     long result;
5584 dl 1.61 MapReduceKeysToLongTask<K,V> rights, nextRight;
5585 dl 1.52 MapReduceKeysToLongTask
5586 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5587 dl 1.61 MapReduceKeysToLongTask<K,V> nextRight,
5588 dl 1.52 ObjectToLong<? super K> transformer,
5589     long basis,
5590     LongByLongToLong reducer) {
5591 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5592 dl 1.52 this.transformer = transformer;
5593     this.basis = basis; this.reducer = reducer;
5594     }
5595 dl 1.79 public final Long getRawResult() { return result; }
5596 dl 1.102 public final void compute() {
5597 dl 1.82 final ObjectToLong<? super K> transformer;
5598     final LongByLongToLong reducer;
5599     if ((transformer = this.transformer) != null &&
5600     (reducer = this.reducer) != null) {
5601     long r = this.basis;
5602 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5603     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5604     addToPendingCount(1);
5605 dl 1.82 (rights = new MapReduceKeysToLongTask<K,V>
5606 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5607     rights, transformer, r, reducer)).fork();
5608     }
5609     for (Node<K,V> p; (p = advance()) != null; )
5610     r = reducer.apply(r, transformer.apply(p.key));
5611 dl 1.82 result = r;
5612     CountedCompleter<?> c;
5613     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5614 dl 1.102 @SuppressWarnings("unchecked") MapReduceKeysToLongTask<K,V>
5615 dl 1.82 t = (MapReduceKeysToLongTask<K,V>)c,
5616     s = t.rights;
5617     while (s != null) {
5618     t.result = reducer.apply(t.result, s.result);
5619     s = t.rights = s.nextRight;
5620     }
5621 dl 1.52 }
5622 dl 1.71 }
5623 dl 1.52 }
5624     }
5625    
5626 dl 1.102 @SuppressWarnings("serial")
5627     static final class MapReduceValuesToLongTask<K,V>
5628     extends BulkTask<K,V,Long> {
5629 dl 1.52 final ObjectToLong<? super V> transformer;
5630     final LongByLongToLong reducer;
5631     final long basis;
5632     long result;
5633 dl 1.61 MapReduceValuesToLongTask<K,V> rights, nextRight;
5634 dl 1.52 MapReduceValuesToLongTask
5635 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5636 dl 1.61 MapReduceValuesToLongTask<K,V> nextRight,
5637 dl 1.52 ObjectToLong<? super V> transformer,
5638     long basis,
5639     LongByLongToLong reducer) {
5640 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5641 dl 1.52 this.transformer = transformer;
5642     this.basis = basis; this.reducer = reducer;
5643     }
5644 dl 1.79 public final Long getRawResult() { return result; }
5645 dl 1.102 public final void compute() {
5646 dl 1.82 final ObjectToLong<? super V> transformer;
5647     final LongByLongToLong reducer;
5648     if ((transformer = this.transformer) != null &&
5649     (reducer = this.reducer) != null) {
5650     long r = this.basis;
5651 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5652     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5653     addToPendingCount(1);
5654 dl 1.82 (rights = new MapReduceValuesToLongTask<K,V>
5655 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5656     rights, transformer, r, reducer)).fork();
5657     }
5658     for (Node<K,V> p; (p = advance()) != null; )
5659     r = reducer.apply(r, transformer.apply(p.val));
5660 dl 1.82 result = r;
5661     CountedCompleter<?> c;
5662     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5663 dl 1.102 @SuppressWarnings("unchecked") MapReduceValuesToLongTask<K,V>
5664 dl 1.82 t = (MapReduceValuesToLongTask<K,V>)c,
5665     s = t.rights;
5666     while (s != null) {
5667     t.result = reducer.apply(t.result, s.result);
5668     s = t.rights = s.nextRight;
5669     }
5670 dl 1.52 }
5671 dl 1.71 }
5672 dl 1.52 }
5673     }
5674    
5675 dl 1.102 @SuppressWarnings("serial")
5676     static final class MapReduceEntriesToLongTask<K,V>
5677     extends BulkTask<K,V,Long> {
5678 dl 1.52 final ObjectToLong<Map.Entry<K,V>> transformer;
5679     final LongByLongToLong reducer;
5680     final long basis;
5681     long result;
5682 dl 1.61 MapReduceEntriesToLongTask<K,V> rights, nextRight;
5683 dl 1.52 MapReduceEntriesToLongTask
5684 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5685 dl 1.61 MapReduceEntriesToLongTask<K,V> nextRight,
5686 dl 1.52 ObjectToLong<Map.Entry<K,V>> transformer,
5687     long basis,
5688     LongByLongToLong reducer) {
5689 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5690 dl 1.52 this.transformer = transformer;
5691     this.basis = basis; this.reducer = reducer;
5692     }
5693 dl 1.79 public final Long getRawResult() { return result; }
5694 dl 1.102 public final void compute() {
5695 dl 1.82 final ObjectToLong<Map.Entry<K,V>> transformer;
5696     final LongByLongToLong reducer;
5697     if ((transformer = this.transformer) != null &&
5698     (reducer = this.reducer) != null) {
5699     long r = this.basis;
5700 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5701     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5702     addToPendingCount(1);
5703 dl 1.82 (rights = new MapReduceEntriesToLongTask<K,V>
5704 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5705     rights, transformer, r, reducer)).fork();
5706     }
5707     for (Node<K,V> p; (p = advance()) != null; )
5708     r = reducer.apply(r, transformer.apply(p));
5709 dl 1.82 result = r;
5710     CountedCompleter<?> c;
5711     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5712 dl 1.102 @SuppressWarnings("unchecked") MapReduceEntriesToLongTask<K,V>
5713 dl 1.82 t = (MapReduceEntriesToLongTask<K,V>)c,
5714     s = t.rights;
5715     while (s != null) {
5716     t.result = reducer.apply(t.result, s.result);
5717     s = t.rights = s.nextRight;
5718     }
5719 dl 1.52 }
5720     }
5721     }
5722     }
5723    
5724 dl 1.102 @SuppressWarnings("serial")
5725     static final class MapReduceMappingsToLongTask<K,V>
5726     extends BulkTask<K,V,Long> {
5727 dl 1.52 final ObjectByObjectToLong<? super K, ? super V> transformer;
5728     final LongByLongToLong reducer;
5729     final long basis;
5730     long result;
5731 dl 1.61 MapReduceMappingsToLongTask<K,V> rights, nextRight;
5732 dl 1.52 MapReduceMappingsToLongTask
5733 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5734 dl 1.61 MapReduceMappingsToLongTask<K,V> nextRight,
5735 dl 1.52 ObjectByObjectToLong<? super K, ? super V> transformer,
5736     long basis,
5737     LongByLongToLong reducer) {
5738 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5739 dl 1.52 this.transformer = transformer;
5740     this.basis = basis; this.reducer = reducer;
5741     }
5742 dl 1.79 public final Long getRawResult() { return result; }
5743 dl 1.102 public final void compute() {
5744 dl 1.82 final ObjectByObjectToLong<? super K, ? super V> transformer;
5745     final LongByLongToLong reducer;
5746     if ((transformer = this.transformer) != null &&
5747     (reducer = this.reducer) != null) {
5748     long r = this.basis;
5749 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5750     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5751     addToPendingCount(1);
5752 dl 1.82 (rights = new MapReduceMappingsToLongTask<K,V>
5753 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5754     rights, transformer, r, reducer)).fork();
5755     }
5756     for (Node<K,V> p; (p = advance()) != null; )
5757     r = reducer.apply(r, transformer.apply(p.key, p.val));
5758 dl 1.82 result = r;
5759     CountedCompleter<?> c;
5760     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5761 dl 1.102 @SuppressWarnings("unchecked") MapReduceMappingsToLongTask<K,V>
5762 dl 1.82 t = (MapReduceMappingsToLongTask<K,V>)c,
5763     s = t.rights;
5764     while (s != null) {
5765     t.result = reducer.apply(t.result, s.result);
5766     s = t.rights = s.nextRight;
5767     }
5768 dl 1.52 }
5769 dl 1.71 }
5770 dl 1.52 }
5771     }
5772    
5773 dl 1.102 @SuppressWarnings("serial")
5774     static final class MapReduceKeysToIntTask<K,V>
5775     extends BulkTask<K,V,Integer> {
5776 dl 1.52 final ObjectToInt<? super K> transformer;
5777     final IntByIntToInt reducer;
5778     final int basis;
5779     int result;
5780 dl 1.61 MapReduceKeysToIntTask<K,V> rights, nextRight;
5781 dl 1.52 MapReduceKeysToIntTask
5782 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5783 dl 1.61 MapReduceKeysToIntTask<K,V> nextRight,
5784 dl 1.52 ObjectToInt<? super K> transformer,
5785     int basis,
5786     IntByIntToInt reducer) {
5787 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5788 dl 1.52 this.transformer = transformer;
5789     this.basis = basis; this.reducer = reducer;
5790     }
5791 dl 1.79 public final Integer getRawResult() { return result; }
5792 dl 1.102 public final void compute() {
5793 dl 1.82 final ObjectToInt<? super K> transformer;
5794     final IntByIntToInt reducer;
5795     if ((transformer = this.transformer) != null &&
5796     (reducer = this.reducer) != null) {
5797     int r = this.basis;
5798 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5799     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5800     addToPendingCount(1);
5801 dl 1.82 (rights = new MapReduceKeysToIntTask<K,V>
5802 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5803     rights, transformer, r, reducer)).fork();
5804     }
5805     for (Node<K,V> p; (p = advance()) != null; )
5806     r = reducer.apply(r, transformer.apply(p.key));
5807 dl 1.82 result = r;
5808     CountedCompleter<?> c;
5809     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5810 dl 1.102 @SuppressWarnings("unchecked") MapReduceKeysToIntTask<K,V>
5811 dl 1.82 t = (MapReduceKeysToIntTask<K,V>)c,
5812     s = t.rights;
5813     while (s != null) {
5814     t.result = reducer.apply(t.result, s.result);
5815     s = t.rights = s.nextRight;
5816     }
5817 dl 1.52 }
5818 dl 1.71 }
5819 dl 1.52 }
5820     }
5821    
5822 dl 1.102 @SuppressWarnings("serial")
5823     static final class MapReduceValuesToIntTask<K,V>
5824     extends BulkTask<K,V,Integer> {
5825 dl 1.52 final ObjectToInt<? super V> transformer;
5826     final IntByIntToInt reducer;
5827     final int basis;
5828     int result;
5829 dl 1.61 MapReduceValuesToIntTask<K,V> rights, nextRight;
5830 dl 1.52 MapReduceValuesToIntTask
5831 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5832 dl 1.61 MapReduceValuesToIntTask<K,V> nextRight,
5833 dl 1.52 ObjectToInt<? super V> transformer,
5834     int basis,
5835     IntByIntToInt reducer) {
5836 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5837 dl 1.52 this.transformer = transformer;
5838     this.basis = basis; this.reducer = reducer;
5839     }
5840 dl 1.79 public final Integer getRawResult() { return result; }
5841 dl 1.102 public final void compute() {
5842 dl 1.82 final ObjectToInt<? super V> transformer;
5843     final IntByIntToInt reducer;
5844     if ((transformer = this.transformer) != null &&
5845     (reducer = this.reducer) != null) {
5846     int r = this.basis;
5847 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5848     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5849     addToPendingCount(1);
5850 dl 1.82 (rights = new MapReduceValuesToIntTask<K,V>
5851 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5852     rights, transformer, r, reducer)).fork();
5853     }
5854     for (Node<K,V> p; (p = advance()) != null; )
5855     r = reducer.apply(r, transformer.apply(p.val));
5856 dl 1.82 result = r;
5857     CountedCompleter<?> c;
5858     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5859 dl 1.102 @SuppressWarnings("unchecked") MapReduceValuesToIntTask<K,V>
5860 dl 1.82 t = (MapReduceValuesToIntTask<K,V>)c,
5861     s = t.rights;
5862     while (s != null) {
5863     t.result = reducer.apply(t.result, s.result);
5864     s = t.rights = s.nextRight;
5865     }
5866 dl 1.52 }
5867 dl 1.71 }
5868 dl 1.52 }
5869     }
5870    
5871 dl 1.102 @SuppressWarnings("serial")
5872     static final class MapReduceEntriesToIntTask<K,V>
5873     extends BulkTask<K,V,Integer> {
5874 dl 1.52 final ObjectToInt<Map.Entry<K,V>> transformer;
5875     final IntByIntToInt reducer;
5876     final int basis;
5877     int result;
5878 dl 1.61 MapReduceEntriesToIntTask<K,V> rights, nextRight;
5879 dl 1.52 MapReduceEntriesToIntTask
5880 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5881 dl 1.61 MapReduceEntriesToIntTask<K,V> nextRight,
5882 dl 1.52 ObjectToInt<Map.Entry<K,V>> transformer,
5883     int basis,
5884     IntByIntToInt reducer) {
5885 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5886 dl 1.52 this.transformer = transformer;
5887     this.basis = basis; this.reducer = reducer;
5888     }
5889 dl 1.79 public final Integer getRawResult() { return result; }
5890 dl 1.102 public final void compute() {
5891 dl 1.82 final ObjectToInt<Map.Entry<K,V>> transformer;
5892     final IntByIntToInt reducer;
5893     if ((transformer = this.transformer) != null &&
5894     (reducer = this.reducer) != null) {
5895     int r = this.basis;
5896 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5897     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5898     addToPendingCount(1);
5899 dl 1.82 (rights = new MapReduceEntriesToIntTask<K,V>
5900 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5901     rights, transformer, r, reducer)).fork();
5902     }
5903     for (Node<K,V> p; (p = advance()) != null; )
5904     r = reducer.apply(r, transformer.apply(p));
5905 dl 1.82 result = r;
5906     CountedCompleter<?> c;
5907     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5908 dl 1.102 @SuppressWarnings("unchecked") MapReduceEntriesToIntTask<K,V>
5909 dl 1.82 t = (MapReduceEntriesToIntTask<K,V>)c,
5910     s = t.rights;
5911     while (s != null) {
5912     t.result = reducer.apply(t.result, s.result);
5913     s = t.rights = s.nextRight;
5914     }
5915 dl 1.52 }
5916     }
5917     }
5918     }
5919    
5920 dl 1.102 @SuppressWarnings("serial")
5921     static final class MapReduceMappingsToIntTask<K,V>
5922     extends BulkTask<K,V,Integer> {
5923 dl 1.52 final ObjectByObjectToInt<? super K, ? super V> transformer;
5924     final IntByIntToInt reducer;
5925     final int basis;
5926     int result;
5927 dl 1.61 MapReduceMappingsToIntTask<K,V> rights, nextRight;
5928 dl 1.52 MapReduceMappingsToIntTask
5929 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5930 dl 1.79 MapReduceMappingsToIntTask<K,V> nextRight,
5931 dl 1.52 ObjectByObjectToInt<? super K, ? super V> transformer,
5932     int basis,
5933     IntByIntToInt reducer) {
5934 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5935 dl 1.52 this.transformer = transformer;
5936     this.basis = basis; this.reducer = reducer;
5937     }
5938 dl 1.79 public final Integer getRawResult() { return result; }
5939 dl 1.102 public final void compute() {
5940 dl 1.82 final ObjectByObjectToInt<? super K, ? super V> transformer;
5941     final IntByIntToInt reducer;
5942     if ((transformer = this.transformer) != null &&
5943     (reducer = this.reducer) != null) {
5944     int r = this.basis;
5945 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5946     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5947     addToPendingCount(1);
5948 dl 1.82 (rights = new MapReduceMappingsToIntTask<K,V>
5949 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5950     rights, transformer, r, reducer)).fork();
5951     }
5952     for (Node<K,V> p; (p = advance()) != null; )
5953     r = reducer.apply(r, transformer.apply(p.key, p.val));
5954 dl 1.82 result = r;
5955     CountedCompleter<?> c;
5956     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5957 dl 1.102 @SuppressWarnings("unchecked") MapReduceMappingsToIntTask<K,V>
5958 dl 1.82 t = (MapReduceMappingsToIntTask<K,V>)c,
5959     s = t.rights;
5960     while (s != null) {
5961     t.result = reducer.apply(t.result, s.result);
5962     s = t.rights = s.nextRight;
5963     }
5964 dl 1.52 }
5965     }
5966     }
5967     }
5968    
5969 dl 1.102 /* ---------------- Counters -------------- */
5970    
5971     // Adapted from LongAdder and Striped64.
5972     // See their internal docs for explanation.
5973    
5974     // A padded cell for distributing counts
5975     static final class CounterCell {
5976     volatile long p0, p1, p2, p3, p4, p5, p6;
5977     volatile long value;
5978     volatile long q0, q1, q2, q3, q4, q5, q6;
5979     CounterCell(long x) { value = x; }
5980     }
5981    
5982     /**
5983     * Holder for the thread-local hash code determining which
5984     * CounterCell to use. The code is initialized via the
5985     * counterHashCodeGenerator, but may be moved upon collisions.
5986     */
5987     static final class CounterHashCode {
5988     int code;
5989     }
5990    
5991     /**
5992 jsr166 1.105 * Generates initial value for per-thread CounterHashCodes.
5993 dl 1.102 */
5994     static final AtomicInteger counterHashCodeGenerator = new AtomicInteger();
5995    
5996     /**
5997     * Increment for counterHashCodeGenerator. See class ThreadLocal
5998     * for explanation.
5999     */
6000     static final int SEED_INCREMENT = 0x61c88647;
6001    
6002     /**
6003     * Per-thread counter hash codes. Shared across all instances.
6004     */
6005     static final ThreadLocal<CounterHashCode> threadCounterHashCode =
6006     new ThreadLocal<CounterHashCode>();
6007    
6008    
6009     final long sumCount() {
6010     CounterCell[] as = counterCells; CounterCell a;
6011     long sum = baseCount;
6012     if (as != null) {
6013     for (int i = 0; i < as.length; ++i) {
6014     if ((a = as[i]) != null)
6015     sum += a.value;
6016     }
6017     }
6018     return sum;
6019     }
6020    
6021     // See LongAdder version for explanation
6022     private final void fullAddCount(long x, CounterHashCode hc,
6023     boolean wasUncontended) {
6024     int h;
6025     if (hc == null) {
6026     hc = new CounterHashCode();
6027     int s = counterHashCodeGenerator.addAndGet(SEED_INCREMENT);
6028     h = hc.code = (s == 0) ? 1 : s; // Avoid zero
6029     threadCounterHashCode.set(hc);
6030     }
6031     else
6032     h = hc.code;
6033     boolean collide = false; // True if last slot nonempty
6034     for (;;) {
6035     CounterCell[] as; CounterCell a; int n; long v;
6036     if ((as = counterCells) != null && (n = as.length) > 0) {
6037     if ((a = as[(n - 1) & h]) == null) {
6038     if (cellsBusy == 0) { // Try to attach new Cell
6039     CounterCell r = new CounterCell(x); // Optimistic create
6040     if (cellsBusy == 0 &&
6041     U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
6042     boolean created = false;
6043     try { // Recheck under lock
6044     CounterCell[] rs; int m, j;
6045     if ((rs = counterCells) != null &&
6046     (m = rs.length) > 0 &&
6047     rs[j = (m - 1) & h] == null) {
6048     rs[j] = r;
6049     created = true;
6050     }
6051     } finally {
6052     cellsBusy = 0;
6053     }
6054     if (created)
6055     break;
6056     continue; // Slot is now non-empty
6057     }
6058     }
6059     collide = false;
6060     }
6061     else if (!wasUncontended) // CAS already known to fail
6062     wasUncontended = true; // Continue after rehash
6063     else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))
6064     break;
6065     else if (counterCells != as || n >= NCPU)
6066     collide = false; // At max size or stale
6067     else if (!collide)
6068     collide = true;
6069     else if (cellsBusy == 0 &&
6070     U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
6071     try {
6072     if (counterCells == as) {// Expand table unless stale
6073     CounterCell[] rs = new CounterCell[n << 1];
6074     for (int i = 0; i < n; ++i)
6075     rs[i] = as[i];
6076     counterCells = rs;
6077     }
6078     } finally {
6079     cellsBusy = 0;
6080     }
6081     collide = false;
6082     continue; // Retry with expanded table
6083     }
6084     h ^= h << 13; // Rehash
6085     h ^= h >>> 17;
6086     h ^= h << 5;
6087     }
6088     else if (cellsBusy == 0 && counterCells == as &&
6089     U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
6090     boolean init = false;
6091     try { // Initialize table
6092     if (counterCells == as) {
6093     CounterCell[] rs = new CounterCell[2];
6094     rs[h & 1] = new CounterCell(x);
6095     counterCells = rs;
6096     init = true;
6097     }
6098     } finally {
6099     cellsBusy = 0;
6100     }
6101     if (init)
6102     break;
6103     }
6104     else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x))
6105     break; // Fall back on using base
6106     }
6107     hc.code = h; // Record index for next time
6108     }
6109    
6110 dl 1.52 // Unsafe mechanics
6111 dl 1.82 private static final sun.misc.Unsafe U;
6112     private static final long SIZECTL;
6113     private static final long TRANSFERINDEX;
6114     private static final long TRANSFERORIGIN;
6115     private static final long BASECOUNT;
6116 dl 1.102 private static final long CELLSBUSY;
6117 dl 1.82 private static final long CELLVALUE;
6118 dl 1.52 private static final long ABASE;
6119     private static final int ASHIFT;
6120    
6121     static {
6122     try {
6123 dl 1.82 U = getUnsafe();
6124 dl 1.52 Class<?> k = ConcurrentHashMapV8.class;
6125 dl 1.82 SIZECTL = U.objectFieldOffset
6126 dl 1.52 (k.getDeclaredField("sizeCtl"));
6127 dl 1.82 TRANSFERINDEX = U.objectFieldOffset
6128     (k.getDeclaredField("transferIndex"));
6129     TRANSFERORIGIN = U.objectFieldOffset
6130     (k.getDeclaredField("transferOrigin"));
6131     BASECOUNT = U.objectFieldOffset
6132     (k.getDeclaredField("baseCount"));
6133 dl 1.102 CELLSBUSY = U.objectFieldOffset
6134     (k.getDeclaredField("cellsBusy"));
6135 dl 1.82 Class<?> ck = CounterCell.class;
6136     CELLVALUE = U.objectFieldOffset
6137     (ck.getDeclaredField("value"));
6138 jsr166 1.101 Class<?> ak = Node[].class;
6139     ABASE = U.arrayBaseOffset(ak);
6140     int scale = U.arrayIndexScale(ak);
6141 jsr166 1.89 if ((scale & (scale - 1)) != 0)
6142     throw new Error("data type scale not a power of two");
6143     ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
6144 dl 1.52 } catch (Exception e) {
6145     throw new Error(e);
6146     }
6147     }
6148    
6149     /**
6150     * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
6151     * Replace with a simple call to Unsafe.getUnsafe when integrating
6152     * into a jdk.
6153     *
6154     * @return a sun.misc.Unsafe
6155     */
6156     private static sun.misc.Unsafe getUnsafe() {
6157     try {
6158     return sun.misc.Unsafe.getUnsafe();
6159 jsr166 1.87 } catch (SecurityException tryReflectionInstead) {}
6160     try {
6161     return java.security.AccessController.doPrivileged
6162     (new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
6163     public sun.misc.Unsafe run() throws Exception {
6164     Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
6165     for (java.lang.reflect.Field f : k.getDeclaredFields()) {
6166     f.setAccessible(true);
6167     Object x = f.get(null);
6168     if (k.isInstance(x))
6169     return k.cast(x);
6170     }
6171     throw new NoSuchFieldError("the Unsafe");
6172     }});
6173     } catch (java.security.PrivilegedActionException e) {
6174     throw new RuntimeException("Could not initialize intrinsics",
6175     e.getCause());
6176 dl 1.52 }
6177     }
6178 dl 1.1 }