ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/jsr166e/ConcurrentHashMapV8.java
Revision: 1.110
Committed: Thu Jul 4 18:34:49 2013 UTC (10 years, 10 months ago) by dl
Branch: MAIN
Changes since 1.109: +31 -16 lines
Log Message:
Avoid unbounded recursion

File Contents

# User Rev Content
1 dl 1.1 /*
2     * Written by Doug Lea with assistance from members of JCP JSR-166
3     * Expert Group and released to the public domain, as explained at
4     * http://creativecommons.org/publicdomain/zero/1.0/
5     */
6    
7     package jsr166e;
8 dl 1.71
9 dl 1.102 import jsr166e.ForkJoinPool;
10    
11     import java.io.ObjectStreamField;
12     import java.io.Serializable;
13     import java.lang.reflect.ParameterizedType;
14     import java.lang.reflect.Type;
15 dl 1.24 import java.util.Arrays;
16 dl 1.1 import java.util.Collection;
17 dl 1.102 import java.util.Comparator;
18     import java.util.ConcurrentModificationException;
19     import java.util.Enumeration;
20     import java.util.HashMap;
21 dl 1.1 import java.util.Hashtable;
22     import java.util.Iterator;
23 dl 1.102 import java.util.Map;
24 dl 1.1 import java.util.NoSuchElementException;
25 dl 1.102 import java.util.Set;
26 dl 1.1 import java.util.concurrent.ConcurrentMap;
27 dl 1.102 import java.util.concurrent.atomic.AtomicReference;
28 dl 1.82 import java.util.concurrent.atomic.AtomicInteger;
29 dl 1.102 import java.util.concurrent.locks.LockSupport;
30     import java.util.concurrent.locks.ReentrantLock;
31 dl 1.79
32 dl 1.1 /**
33     * A hash table supporting full concurrency of retrievals and
34     * high expected concurrency for updates. This class obeys the
35     * same functional specification as {@link java.util.Hashtable}, and
36     * includes versions of methods corresponding to each method of
37     * {@code Hashtable}. However, even though all operations are
38     * thread-safe, retrieval operations do <em>not</em> entail locking,
39     * and there is <em>not</em> any support for locking the entire table
40     * in a way that prevents all access. This class is fully
41     * interoperable with {@code Hashtable} in programs that rely on its
42     * thread safety but not on its synchronization details.
43     *
44 jsr166 1.78 * <p>Retrieval operations (including {@code get}) generally do not
45 dl 1.1 * block, so may overlap with update operations (including {@code put}
46     * and {@code remove}). Retrievals reflect the results of the most
47     * recently <em>completed</em> update operations holding upon their
48 dl 1.59 * onset. (More formally, an update operation for a given key bears a
49     * <em>happens-before</em> relation with any (non-null) retrieval for
50     * that key reporting the updated value.) For aggregate operations
51     * such as {@code putAll} and {@code clear}, concurrent retrievals may
52     * reflect insertion or removal of only some entries. Similarly,
53     * Iterators and Enumerations return elements reflecting the state of
54     * the hash table at some point at or since the creation of the
55     * iterator/enumeration. They do <em>not</em> throw {@link
56     * ConcurrentModificationException}. However, iterators are designed
57     * to be used by only one thread at a time. Bear in mind that the
58     * results of aggregate status methods including {@code size}, {@code
59     * isEmpty}, and {@code containsValue} are typically useful only when
60     * a map is not undergoing concurrent updates in other threads.
61     * Otherwise the results of these methods reflect transient states
62     * that may be adequate for monitoring or estimation purposes, but not
63     * for program control.
64 dl 1.1 *
65 jsr166 1.78 * <p>The table is dynamically expanded when there are too many
66 dl 1.16 * collisions (i.e., keys that have distinct hash codes but fall into
67     * the same slot modulo the table size), with the expected average
68 dl 1.24 * effect of maintaining roughly two bins per mapping (corresponding
69     * to a 0.75 load factor threshold for resizing). There may be much
70     * variance around this average as mappings are added and removed, but
71     * overall, this maintains a commonly accepted time/space tradeoff for
72     * hash tables. However, resizing this or any other kind of hash
73     * table may be a relatively slow operation. When possible, it is a
74     * good idea to provide a size estimate as an optional {@code
75 dl 1.16 * initialCapacity} constructor argument. An additional optional
76     * {@code loadFactor} constructor argument provides a further means of
77     * customizing initial table capacity by specifying the table density
78     * to be used in calculating the amount of space to allocate for the
79     * given number of elements. Also, for compatibility with previous
80     * versions of this class, constructors may optionally specify an
81     * expected {@code concurrencyLevel} as an additional hint for
82     * internal sizing. Note that using many keys with exactly the same
83 jsr166 1.31 * {@code hashCode()} is a sure way to slow down performance of any
84 dl 1.102 * hash table. To ameliorate impact, when keys are {@link Comparable},
85     * this class may use comparison order among keys to help break ties.
86 dl 1.1 *
87 jsr166 1.78 * <p>A {@link Set} projection of a ConcurrentHashMapV8 may be created
88 dl 1.70 * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
89     * (using {@link #keySet(Object)} when only keys are of interest, and the
90     * mapped values are (perhaps transiently) not used or all take the
91     * same mapping value.
92     *
93 dl 1.1 * <p>This class and its views and iterators implement all of the
94     * <em>optional</em> methods of the {@link Map} and {@link Iterator}
95     * interfaces.
96     *
97 jsr166 1.78 * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
98 dl 1.1 * does <em>not</em> allow {@code null} to be used as a key or value.
99     *
100 dl 1.102 * <p>ConcurrentHashMapV8s support a set of sequential and parallel bulk
101     * operations that are designed
102     * to be safely, and often sensibly, applied even with maps that are
103     * being concurrently updated by other threads; for example, when
104     * computing a snapshot summary of the values in a shared registry.
105     * There are three kinds of operation, each with four forms, accepting
106     * functions with Keys, Values, Entries, and (Key, Value) arguments
107     * and/or return values. Because the elements of a ConcurrentHashMapV8
108     * are not ordered in any particular way, and may be processed in
109     * different orders in different parallel executions, the correctness
110     * of supplied functions should not depend on any ordering, or on any
111     * other objects or values that may transiently change while
112     * computation is in progress; and except for forEach actions, should
113     * ideally be side-effect-free. Bulk operations on {@link java.util.Map.Entry}
114     * objects do not support method {@code setValue}.
115 dl 1.70 *
116     * <ul>
117     * <li> forEach: Perform a given action on each element.
118     * A variant form applies a given transformation on each element
119     * before performing the action.</li>
120     *
121     * <li> search: Return the first available non-null result of
122     * applying a given function on each element; skipping further
123     * search when a result is found.</li>
124     *
125     * <li> reduce: Accumulate each element. The supplied reduction
126     * function cannot rely on ordering (more formally, it should be
127     * both associative and commutative). There are five variants:
128     *
129     * <ul>
130     *
131     * <li> Plain reductions. (There is not a form of this method for
132     * (key, value) function arguments since there is no corresponding
133     * return type.)</li>
134     *
135     * <li> Mapped reductions that accumulate the results of a given
136     * function applied to each element.</li>
137     *
138     * <li> Reductions to scalar doubles, longs, and ints, using a
139     * given basis value.</li>
140     *
141 dl 1.102 * </ul>
142 dl 1.70 * </li>
143     * </ul>
144 dl 1.102 *
145     * <p>These bulk operations accept a {@code parallelismThreshold}
146     * argument. Methods proceed sequentially if the current map size is
147     * estimated to be less than the given threshold. Using a value of
148     * {@code Long.MAX_VALUE} suppresses all parallelism. Using a value
149     * of {@code 1} results in maximal parallelism by partitioning into
150     * enough subtasks to fully utilize the {@link
151     * ForkJoinPool#commonPool()} that is used for all parallel
152     * computations. Normally, you would initially choose one of these
153     * extreme values, and then measure performance of using in-between
154     * values that trade off overhead versus throughput.
155 dl 1.70 *
156     * <p>The concurrency properties of bulk operations follow
157     * from those of ConcurrentHashMapV8: Any non-null result returned
158     * from {@code get(key)} and related access methods bears a
159     * happens-before relation with the associated insertion or
160     * update. The result of any bulk operation reflects the
161     * composition of these per-element relations (but is not
162     * necessarily atomic with respect to the map as a whole unless it
163     * is somehow known to be quiescent). Conversely, because keys
164     * and values in the map are never null, null serves as a reliable
165     * atomic indicator of the current lack of any result. To
166     * maintain this property, null serves as an implicit basis for
167     * all non-scalar reduction operations. For the double, long, and
168     * int versions, the basis should be one that, when combined with
169     * any other value, returns that other value (more formally, it
170     * should be the identity element for the reduction). Most common
171     * reductions have these properties; for example, computing a sum
172     * with basis 0 or a minimum with basis MAX_VALUE.
173     *
174     * <p>Search and transformation functions provided as arguments
175     * should similarly return null to indicate the lack of any result
176     * (in which case it is not used). In the case of mapped
177     * reductions, this also enables transformations to serve as
178     * filters, returning null (or, in the case of primitive
179     * specializations, the identity basis) if the element should not
180     * be combined. You can create compound transformations and
181     * filterings by composing them yourself under this "null means
182     * there is nothing there now" rule before using them in search or
183     * reduce operations.
184     *
185     * <p>Methods accepting and/or returning Entry arguments maintain
186     * key-value associations. They may be useful for example when
187     * finding the key for the greatest value. Note that "plain" Entry
188     * arguments can be supplied using {@code new
189     * AbstractMap.SimpleEntry(k,v)}.
190     *
191 jsr166 1.78 * <p>Bulk operations may complete abruptly, throwing an
192 dl 1.70 * exception encountered in the application of a supplied
193     * function. Bear in mind when handling such exceptions that other
194     * concurrently executing functions could also have thrown
195     * exceptions, or would have done so if the first exception had
196     * not occurred.
197     *
198 dl 1.84 * <p>Speedups for parallel compared to sequential forms are common
199     * but not guaranteed. Parallel operations involving brief functions
200     * on small maps may execute more slowly than sequential forms if the
201     * underlying work to parallelize the computation is more expensive
202     * than the computation itself. Similarly, parallelization may not
203     * lead to much actual parallelism if all processors are busy
204     * performing unrelated tasks.
205 dl 1.70 *
206 jsr166 1.78 * <p>All arguments to all task methods must be non-null.
207 dl 1.70 *
208     * <p><em>jsr166e note: During transition, this class
209     * uses nested functional interfaces with different names but the
210 jsr166 1.77 * same forms as those expected for JDK8.</em>
211 dl 1.70 *
212 dl 1.1 * <p>This class is a member of the
213     * <a href="{@docRoot}/../technotes/guides/collections/index.html">
214     * Java Collections Framework</a>.
215     *
216 jsr166 1.22 * @since 1.5
217 dl 1.1 * @author Doug Lea
218     * @param <K> the type of keys maintained by this map
219     * @param <V> the type of mapped values
220     */
221 jsr166 1.99 public class ConcurrentHashMapV8<K,V>
222     implements ConcurrentMap<K,V>, Serializable {
223 dl 1.1 private static final long serialVersionUID = 7249069246763182397L;
224    
225     /**
226 dl 1.102 * An object for traversing and partitioning elements of a source.
227     * This interface provides a subset of the functionality of JDK8
228     * java.util.Spliterator.
229     */
230     public static interface ConcurrentHashMapSpliterator<T> {
231 jsr166 1.103 /**
232 dl 1.102 * If possible, returns a new spliterator covering
233     * approximately one half of the elements, which will not be
234     * covered by this spliterator. Returns null if cannot be
235     * split.
236     */
237     ConcurrentHashMapSpliterator<T> trySplit();
238     /**
239     * Returns an estimate of the number of elements covered by
240     * this Spliterator.
241     */
242     long estimateSize();
243    
244     /** Applies the action to each untraversed element */
245     void forEachRemaining(Action<? super T> action);
246     /** If an element remains, applies the action and returns true. */
247     boolean tryAdvance(Action<? super T> action);
248 dl 1.41 }
249    
250 dl 1.102 // Sams
251     /** Interface describing a void action of one argument */
252     public interface Action<A> { void apply(A a); }
253     /** Interface describing a void action of two arguments */
254     public interface BiAction<A,B> { void apply(A a, B b); }
255     /** Interface describing a function of one argument */
256     public interface Fun<A,T> { T apply(A a); }
257     /** Interface describing a function of two arguments */
258     public interface BiFun<A,B,T> { T apply(A a, B b); }
259     /** Interface describing a function mapping its argument to a double */
260     public interface ObjectToDouble<A> { double apply(A a); }
261     /** Interface describing a function mapping its argument to a long */
262     public interface ObjectToLong<A> { long apply(A a); }
263     /** Interface describing a function mapping its argument to an int */
264     public interface ObjectToInt<A> {int apply(A a); }
265     /** Interface describing a function mapping two arguments to a double */
266     public interface ObjectByObjectToDouble<A,B> { double apply(A a, B b); }
267     /** Interface describing a function mapping two arguments to a long */
268     public interface ObjectByObjectToLong<A,B> { long apply(A a, B b); }
269     /** Interface describing a function mapping two arguments to an int */
270     public interface ObjectByObjectToInt<A,B> {int apply(A a, B b); }
271     /** Interface describing a function mapping two doubles to a double */
272     public interface DoubleByDoubleToDouble { double apply(double a, double b); }
273     /** Interface describing a function mapping two longs to a long */
274     public interface LongByLongToLong { long apply(long a, long b); }
275     /** Interface describing a function mapping two ints to an int */
276     public interface IntByIntToInt { int apply(int a, int b); }
277    
278 dl 1.1 /*
279     * Overview:
280     *
281     * The primary design goal of this hash table is to maintain
282     * concurrent readability (typically method get(), but also
283     * iterators and related methods) while minimizing update
284 dl 1.24 * contention. Secondary goals are to keep space consumption about
285     * the same or better than java.util.HashMap, and to support high
286     * initial insertion rates on an empty table by many threads.
287 dl 1.1 *
288 dl 1.102 * This map usually acts as a binned (bucketed) hash table. Each
289     * key-value mapping is held in a Node. Most nodes are instances
290     * of the basic Node class with hash, key, value, and next
291     * fields. However, various subclasses exist: TreeNodes are
292     * arranged in balanced trees, not lists. TreeBins hold the roots
293     * of sets of TreeNodes. ForwardingNodes are placed at the heads
294     * of bins during resizing. ReservationNodes are used as
295     * placeholders while establishing values in computeIfAbsent and
296     * related methods. The types TreeBin, ForwardingNode, and
297     * ReservationNode do not hold normal user keys, values, or
298     * hashes, and are readily distinguishable during search etc
299     * because they have negative hash fields and null key and value
300     * fields. (These special nodes are either uncommon or transient,
301     * so the impact of carrying around some unused fields is
302 jsr166 1.108 * insignificant.)
303 dl 1.1 *
304     * The table is lazily initialized to a power-of-two size upon the
305 dl 1.38 * first insertion. Each bin in the table normally contains a
306     * list of Nodes (most often, the list has only zero or one Node).
307     * Table accesses require volatile/atomic reads, writes, and
308     * CASes. Because there is no other way to arrange this without
309     * adding further indirections, we use intrinsics
310 dl 1.102 * (sun.misc.Unsafe) operations.
311 dl 1.24 *
312 dl 1.82 * We use the top (sign) bit of Node hash fields for control
313     * purposes -- it is available anyway because of addressing
314 dl 1.102 * constraints. Nodes with negative hash fields are specially
315     * handled or ignored in map methods.
316 dl 1.14 *
317 dl 1.27 * Insertion (via put or its variants) of the first node in an
318 dl 1.14 * empty bin is performed by just CASing it to the bin. This is
319 dl 1.38 * by far the most common case for put operations under most
320     * key/hash distributions. Other update operations (insert,
321     * delete, and replace) require locks. We do not want to waste
322     * the space required to associate a distinct lock object with
323     * each bin, so instead use the first node of a bin list itself as
324 dl 1.82 * a lock. Locking support for these locks relies on builtin
325     * "synchronized" monitors.
326 dl 1.24 *
327     * Using the first node of a list as a lock does not by itself
328     * suffice though: When a node is locked, any update must first
329     * validate that it is still the first node after locking it, and
330     * retry if not. Because new nodes are always appended to lists,
331     * once a node is first in a bin, it remains first until deleted
332 dl 1.102 * or the bin becomes invalidated (upon resizing).
333 dl 1.14 *
334 dl 1.24 * The main disadvantage of per-bin locks is that other update
335 dl 1.14 * operations on other nodes in a bin list protected by the same
336     * lock can stall, for example when user equals() or mapping
337 dl 1.38 * functions take a long time. However, statistically, under
338     * random hash codes, this is not a common problem. Ideally, the
339     * frequency of nodes in bins follows a Poisson distribution
340 dl 1.14 * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
341 dl 1.16 * parameter of about 0.5 on average, given the resizing threshold
342     * of 0.75, although with a large variance because of resizing
343     * granularity. Ignoring variance, the expected occurrences of
344     * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
345 dl 1.38 * first values are:
346 dl 1.16 *
347 dl 1.38 * 0: 0.60653066
348     * 1: 0.30326533
349     * 2: 0.07581633
350     * 3: 0.01263606
351     * 4: 0.00157952
352     * 5: 0.00015795
353     * 6: 0.00001316
354     * 7: 0.00000094
355     * 8: 0.00000006
356     * more: less than 1 in ten million
357 dl 1.16 *
358     * Lock contention probability for two threads accessing distinct
359 dl 1.38 * elements is roughly 1 / (8 * #elements) under random hashes.
360     *
361     * Actual hash code distributions encountered in practice
362     * sometimes deviate significantly from uniform randomness. This
363     * includes the case when N > (1<<30), so some keys MUST collide.
364     * Similarly for dumb or hostile usages in which multiple keys are
365 dl 1.102 * designed to have identical hash codes or ones that differs only
366     * in masked-out high bits. So we use a secondary strategy that
367     * applies when the number of nodes in a bin exceeds a
368     * threshold. These TreeBins use a balanced tree to hold nodes (a
369     * specialized form of red-black trees), bounding search time to
370     * O(log N). Each search step in a TreeBin is at least twice as
371 dl 1.38 * slow as in a regular list, but given that N cannot exceed
372     * (1<<64) (before running out of addresses) this bounds search
373     * steps, lock hold times, etc, to reasonable constants (roughly
374     * 100 nodes inspected per operation worst case) so long as keys
375     * are Comparable (which is very common -- String, Long, etc).
376     * TreeBin nodes (TreeNodes) also maintain the same "next"
377     * traversal pointers as regular nodes, so can be traversed in
378     * iterators in the same way.
379 dl 1.1 *
380 dl 1.38 * The table is resized when occupancy exceeds a percentage
381 dl 1.82 * threshold (nominally, 0.75, but see below). Any thread
382     * noticing an overfull bin may assist in resizing after the
383     * initiating thread allocates and sets up the replacement
384     * array. However, rather than stalling, these other threads may
385     * proceed with insertions etc. The use of TreeBins shields us
386     * from the worst case effects of overfilling while resizes are in
387     * progress. Resizing proceeds by transferring bins, one by one,
388     * from the table to the next table. To enable concurrency, the
389     * next table must be (incrementally) prefilled with place-holders
390     * serving as reverse forwarders to the old table. Because we are
391     * using power-of-two expansion, the elements from each bin must
392     * either stay at same index, or move with a power of two
393     * offset. We eliminate unnecessary node creation by catching
394     * cases where old nodes can be reused because their next fields
395     * won't change. On average, only about one-sixth of them need
396     * cloning when a table doubles. The nodes they replace will be
397     * garbage collectable as soon as they are no longer referenced by
398     * any reader thread that may be in the midst of concurrently
399     * traversing table. Upon transfer, the old table bin contains
400     * only a special forwarding node (with hash field "MOVED") that
401     * contains the next table as its key. On encountering a
402     * forwarding node, access and update operations restart, using
403     * the new table.
404     *
405     * Each bin transfer requires its bin lock, which can stall
406     * waiting for locks while resizing. However, because other
407     * threads can join in and help resize rather than contend for
408     * locks, average aggregate waits become shorter as resizing
409     * progresses. The transfer operation must also ensure that all
410     * accessible bins in both the old and new table are usable by any
411     * traversal. This is arranged by proceeding from the last bin
412     * (table.length - 1) up towards the first. Upon seeing a
413     * forwarding node, traversals (see class Traverser) arrange to
414     * move to the new table without revisiting nodes. However, to
415     * ensure that no intervening nodes are skipped, bin splitting can
416     * only begin after the associated reverse-forwarders are in
417     * place.
418 dl 1.14 *
419 dl 1.24 * The traversal scheme also applies to partial traversals of
420 dl 1.52 * ranges of bins (via an alternate Traverser constructor)
421 dl 1.41 * to support partitioned aggregate operations. Also, read-only
422     * operations give up if ever forwarded to a null table, which
423     * provides support for shutdown-style clearing, which is also not
424     * currently implemented.
425 dl 1.14 *
426     * Lazy table initialization minimizes footprint until first use,
427     * and also avoids resizings when the first operation is from a
428     * putAll, constructor with map argument, or deserialization.
429 dl 1.24 * These cases attempt to override the initial capacity settings,
430     * but harmlessly fail to take effect in cases of races.
431 dl 1.1 *
432 dl 1.82 * The element count is maintained using a specialization of
433     * LongAdder. We need to incorporate a specialization rather than
434     * just use a LongAdder in order to access implicit
435     * contention-sensing that leads to creation of multiple
436     * CounterCells. The counter mechanics avoid contention on
437     * updates but can encounter cache thrashing if read too
438     * frequently during concurrent access. To avoid reading so often,
439     * resizing under contention is attempted only upon adding to a
440     * bin already holding two or more nodes. Under uniform hash
441     * distributions, the probability of this occurring at threshold
442     * is around 13%, meaning that only about 1 in 8 puts check
443 dl 1.102 * threshold (and after resizing, many fewer do so).
444     *
445     * TreeBins use a special form of comparison for search and
446     * related operations (which is the main reason we cannot use
447     * existing collections such as TreeMaps). TreeBins contain
448     * Comparable elements, but may contain others, as well as
449     * elements that are Comparable but not necessarily Comparable
450     * for the same T, so we cannot invoke compareTo among them. To
451     * handle this, the tree is ordered primarily by hash value, then
452     * by Comparable.compareTo order if applicable. On lookup at a
453     * node, if elements are not comparable or compare as 0 then both
454     * left and right children may need to be searched in the case of
455     * tied hash values. (This corresponds to the full list search
456     * that would be necessary if all elements were non-Comparable and
457     * had tied hashes.) The red-black balancing code is updated from
458     * pre-jdk-collections
459     * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
460     * based in turn on Cormen, Leiserson, and Rivest "Introduction to
461     * Algorithms" (CLR).
462     *
463     * TreeBins also require an additional locking mechanism. While
464     * list traversal is always possible by readers even during
465 jsr166 1.108 * updates, tree traversal is not, mainly because of tree-rotations
466 dl 1.102 * that may change the root node and/or its linkages. TreeBins
467     * include a simple read-write lock mechanism parasitic on the
468     * main bin-synchronization strategy: Structural adjustments
469     * associated with an insertion or removal are already bin-locked
470     * (and so cannot conflict with other writers) but must wait for
471     * ongoing readers to finish. Since there can be only one such
472     * waiter, we use a simple scheme using a single "waiter" field to
473     * block writers. However, readers need never block. If the root
474     * lock is held, they proceed along the slow traversal path (via
475     * next-pointers) until the lock becomes available or the list is
476     * exhausted, whichever comes first. These cases are not fast, but
477     * maximize aggregate expected throughput.
478 dl 1.14 *
479     * Maintaining API and serialization compatibility with previous
480     * versions of this class introduces several oddities. Mainly: We
481     * leave untouched but unused constructor arguments refering to
482 dl 1.24 * concurrencyLevel. We accept a loadFactor constructor argument,
483     * but apply it only to initial table capacity (which is the only
484     * time that we can guarantee to honor it.) We also declare an
485     * unused "Segment" class that is instantiated in minimal form
486     * only when serializing.
487 dl 1.102 *
488     * This file is organized to make things a little easier to follow
489     * while reading than they might otherwise: First the main static
490     * declarations and utilities, then fields, then main public
491     * methods (with a few factorings of multiple public methods into
492     * internal ones), then sizing methods, trees, traversers, and
493     * bulk operations.
494 dl 1.1 */
495    
496     /* ---------------- Constants -------------- */
497    
498     /**
499 dl 1.16 * The largest possible table capacity. This value must be
500     * exactly 1<<30 to stay within Java array allocation and indexing
501 dl 1.24 * bounds for power of two table sizes, and is further required
502     * because the top two bits of 32bit hash fields are used for
503     * control purposes.
504 dl 1.1 */
505 dl 1.14 private static final int MAXIMUM_CAPACITY = 1 << 30;
506 dl 1.1
507     /**
508 dl 1.14 * The default initial table capacity. Must be a power of 2
509     * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
510 dl 1.1 */
511 dl 1.14 private static final int DEFAULT_CAPACITY = 16;
512 dl 1.1
513     /**
514 dl 1.24 * The largest possible (non-power of two) array size.
515     * Needed by toArray and related methods.
516     */
517     static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
518    
519     /**
520     * The default concurrency level for this table. Unused but
521     * defined for compatibility with previous versions of this class.
522     */
523     private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
524    
525     /**
526 dl 1.16 * The load factor for this table. Overrides of this value in
527     * constructors affect only the initial table capacity. The
528 dl 1.24 * actual floating point value isn't normally used -- it is
529     * simpler to use expressions such as {@code n - (n >>> 2)} for
530     * the associated resizing threshold.
531 dl 1.1 */
532 dl 1.16 private static final float LOAD_FACTOR = 0.75f;
533 dl 1.1
534     /**
535 dl 1.38 * The bin count threshold for using a tree rather than list for a
536 dl 1.102 * bin. Bins are converted to trees when adding an element to a
537     * bin with at least this many nodes. The value must be greater
538     * than 2, and should be at least 8 to mesh with assumptions in
539     * tree removal about conversion back to plain bins upon
540     * shrinkage.
541     */
542     static final int TREEIFY_THRESHOLD = 8;
543    
544     /**
545     * The bin count threshold for untreeifying a (split) bin during a
546     * resize operation. Should be less than TREEIFY_THRESHOLD, and at
547     * most 6 to mesh with shrinkage detection under removal.
548     */
549     static final int UNTREEIFY_THRESHOLD = 6;
550    
551     /**
552     * The smallest table capacity for which bins may be treeified.
553     * (Otherwise the table is resized if too many nodes in a bin.)
554     * The value should be at least 4 * TREEIFY_THRESHOLD to avoid
555     * conflicts between resizing and treeification thresholds.
556 dl 1.38 */
557 dl 1.102 static final int MIN_TREEIFY_CAPACITY = 64;
558 dl 1.38
559 dl 1.82 /**
560     * Minimum number of rebinnings per transfer step. Ranges are
561     * subdivided to allow multiple resizer threads. This value
562     * serves as a lower bound to avoid resizers encountering
563     * excessive memory contention. The value should be at least
564     * DEFAULT_CAPACITY.
565     */
566     private static final int MIN_TRANSFER_STRIDE = 16;
567    
568 dl 1.24 /*
569 dl 1.82 * Encodings for Node hash fields. See above for explanation.
570 dl 1.1 */
571 dl 1.109 static final int MOVED = -1; // hash for forwarding nodes
572     static final int TREEBIN = -2; // hash for roots of trees
573     static final int RESERVED = -3; // hash for transient reservations
574 dl 1.82 static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
575    
576     /** Number of CPUS, to place bounds on some sizings */
577     static final int NCPU = Runtime.getRuntime().availableProcessors();
578    
579 dl 1.102 /** For serialization compatibility. */
580     private static final ObjectStreamField[] serialPersistentFields = {
581     new ObjectStreamField("segments", Segment[].class),
582     new ObjectStreamField("segmentMask", Integer.TYPE),
583     new ObjectStreamField("segmentShift", Integer.TYPE)
584     };
585    
586     /* ---------------- Nodes -------------- */
587    
588     /**
589     * Key-value entry. This class is never exported out as a
590     * user-mutable Map.Entry (i.e., one supporting setValue; see
591     * MapEntry below), but can be used for read-only traversals used
592 jsr166 1.108 * in bulk tasks. Subclasses of Node with a negative hash field
593 dl 1.102 * are special, and contain null keys and values (but are never
594     * exported). Otherwise, keys and vals are never null.
595     */
596     static class Node<K,V> implements Map.Entry<K,V> {
597     final int hash;
598     final K key;
599     volatile V val;
600 dl 1.109 volatile Node<K,V> next;
601 dl 1.102
602     Node(int hash, K key, V val, Node<K,V> next) {
603     this.hash = hash;
604     this.key = key;
605     this.val = val;
606     this.next = next;
607     }
608    
609     public final K getKey() { return key; }
610     public final V getValue() { return val; }
611     public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
612     public final String toString(){ return key + "=" + val; }
613     public final V setValue(V value) {
614     throw new UnsupportedOperationException();
615     }
616 dl 1.82
617 dl 1.102 public final boolean equals(Object o) {
618     Object k, v, u; Map.Entry<?,?> e;
619     return ((o instanceof Map.Entry) &&
620     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
621     (v = e.getValue()) != null &&
622     (k == key || k.equals(key)) &&
623     (v == (u = val) || v.equals(u)));
624     }
625 dl 1.82
626 dl 1.102 /**
627     * Virtualized support for map.get(); overridden in subclasses.
628     */
629     Node<K,V> find(int h, Object k) {
630     Node<K,V> e = this;
631     if (k != null) {
632     do {
633     K ek;
634     if (e.hash == h &&
635     ((ek = e.key) == k || (ek != null && k.equals(ek))))
636     return e;
637     } while ((e = e.next) != null);
638     }
639     return null;
640     }
641 dl 1.82 }
642    
643 dl 1.102 /* ---------------- Static utilities -------------- */
644    
645 dl 1.82 /**
646 dl 1.102 * Spreads (XORs) higher bits of hash to lower and also forces top
647     * bit to 0. Because the table uses power-of-two masking, sets of
648     * hashes that vary only in bits above the current mask will
649     * always collide. (Among known examples are sets of Float keys
650     * holding consecutive whole numbers in small tables.) So we
651     * apply a transform that spreads the impact of higher bits
652     * downward. There is a tradeoff between speed, utility, and
653     * quality of bit-spreading. Because many common sets of hashes
654     * are already reasonably distributed (so don't benefit from
655     * spreading), and because we use trees to handle large sets of
656     * collisions in bins, we just XOR some shifted bits in the
657     * cheapest possible way to reduce systematic lossage, as well as
658     * to incorporate impact of the highest bits that would otherwise
659     * never be used in index calculations because of table bounds.
660 dl 1.82 */
661 dl 1.102 static final int spread(int h) {
662     return (h ^ (h >>> 16)) & HASH_BITS;
663 dl 1.82 }
664    
665     /**
666 dl 1.102 * Returns a power of two table size for the given desired capacity.
667     * See Hackers Delight, sec 3.2
668 dl 1.82 */
669 dl 1.102 private static final int tableSizeFor(int c) {
670     int n = c - 1;
671     n |= n >>> 1;
672     n |= n >>> 2;
673     n |= n >>> 4;
674     n |= n >>> 8;
675     n |= n >>> 16;
676     return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
677     }
678 dl 1.82
679     /**
680 dl 1.102 * Returns x's Class if it is of the form "class C implements
681     * Comparable<C>", else null.
682 dl 1.82 */
683 dl 1.102 static Class<?> comparableClassFor(Object x) {
684     if (x instanceof Comparable) {
685     Class<?> c; Type[] ts, as; Type t; ParameterizedType p;
686     if ((c = x.getClass()) == String.class) // bypass checks
687     return c;
688     if ((ts = c.getGenericInterfaces()) != null) {
689     for (int i = 0; i < ts.length; ++i) {
690     if (((t = ts[i]) instanceof ParameterizedType) &&
691     ((p = (ParameterizedType)t).getRawType() ==
692     Comparable.class) &&
693     (as = p.getActualTypeArguments()) != null &&
694     as.length == 1 && as[0] == c) // type arg is c
695     return c;
696     }
697     }
698     }
699     return null;
700     }
701 dl 1.82
702     /**
703 dl 1.102 * Returns k.compareTo(x) if x matches kc (k's screened comparable
704     * class), else 0.
705 dl 1.82 */
706 dl 1.102 @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
707     static int compareComparables(Class<?> kc, Object k, Object x) {
708     return (x == null || x.getClass() != kc ? 0 :
709     ((Comparable)k).compareTo(x));
710     }
711    
712     /* ---------------- Table element access -------------- */
713    
714     /*
715     * Volatile access methods are used for table elements as well as
716     * elements of in-progress next table while resizing. All uses of
717     * the tab arguments must be null checked by callers. All callers
718     * also paranoically precheck that tab's length is not zero (or an
719     * equivalent check), thus ensuring that any index argument taking
720     * the form of a hash value anded with (length - 1) is a valid
721     * index. Note that, to be correct wrt arbitrary concurrency
722     * errors by users, these checks must operate on local variables,
723     * which accounts for some odd-looking inline assignments below.
724     * Note that calls to setTabAt always occur within locked regions,
725 dl 1.109 * and so in principle require only release ordering, not need
726     * full volatile semantics, but are currently coded as volatile
727     * writes to be conservative.
728 dl 1.102 */
729    
730     @SuppressWarnings("unchecked")
731     static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) {
732     return (Node<K,V>)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE);
733     }
734 dl 1.24
735 dl 1.102 static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i,
736     Node<K,V> c, Node<K,V> v) {
737     return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
738     }
739 jsr166 1.103
740 dl 1.102 static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) {
741 dl 1.109 U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v);
742 dl 1.102 }
743 jsr166 1.103
744 dl 1.24 /* ---------------- Fields -------------- */
745    
746     /**
747     * The array of bins. Lazily initialized upon first insertion.
748     * Size is always a power of two. Accessed directly by iterators.
749     */
750 dl 1.102 transient volatile Node<K,V>[] table;
751 dl 1.14
752 dl 1.16 /**
753 dl 1.82 * The next table to use; non-null only while resizing.
754     */
755 dl 1.102 private transient volatile Node<K,V>[] nextTable;
756 dl 1.82
757     /**
758     * Base counter value, used mainly when there is no contention,
759     * but also as a fallback during table initialization
760     * races. Updated via CAS.
761 dl 1.16 */
762 dl 1.82 private transient volatile long baseCount;
763 dl 1.24
764     /**
765     * Table initialization and resizing control. When negative, the
766 dl 1.82 * table is being initialized or resized: -1 for initialization,
767     * else -(1 + the number of active resizing threads). Otherwise,
768     * when table is null, holds the initial table size to use upon
769     * creation, or 0 for default. After initialization, holds the
770     * next element count value upon which to resize the table.
771 dl 1.24 */
772     private transient volatile int sizeCtl;
773    
774 dl 1.82 /**
775     * The next table index (plus one) to split while resizing.
776     */
777     private transient volatile int transferIndex;
778    
779     /**
780     * The least available table index to split while resizing.
781     */
782     private transient volatile int transferOrigin;
783    
784     /**
785 dl 1.102 * Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
786 dl 1.82 */
787 dl 1.102 private transient volatile int cellsBusy;
788 dl 1.82
789     /**
790     * Table of counter cells. When non-null, size is a power of 2.
791     */
792     private transient volatile CounterCell[] counterCells;
793    
794 dl 1.24 // views
795 dl 1.70 private transient KeySetView<K,V> keySet;
796 dl 1.75 private transient ValuesView<K,V> values;
797     private transient EntrySetView<K,V> entrySet;
798 dl 1.24
799 dl 1.16
800 dl 1.102 /* ---------------- Public operations -------------- */
801 dl 1.38
802 dl 1.102 /**
803     * Creates a new, empty map with the default initial table size (16).
804     */
805     public ConcurrentHashMapV8() {
806 dl 1.38 }
807    
808 dl 1.102 /**
809     * Creates a new, empty map with an initial table size
810     * accommodating the specified number of elements without the need
811     * to dynamically resize.
812     *
813     * @param initialCapacity The implementation performs internal
814     * sizing to accommodate this many elements.
815     * @throws IllegalArgumentException if the initial capacity of
816     * elements is negative
817     */
818     public ConcurrentHashMapV8(int initialCapacity) {
819     if (initialCapacity < 0)
820     throw new IllegalArgumentException();
821     int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
822     MAXIMUM_CAPACITY :
823     tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
824     this.sizeCtl = cap;
825 dl 1.38 }
826    
827 dl 1.102 /**
828     * Creates a new map with the same mappings as the given map.
829     *
830     * @param m the map
831     */
832     public ConcurrentHashMapV8(Map<? extends K, ? extends V> m) {
833     this.sizeCtl = DEFAULT_CAPACITY;
834     putAll(m);
835 dl 1.38 }
836    
837 dl 1.102 /**
838     * Creates a new, empty map with an initial table size based on
839     * the given number of elements ({@code initialCapacity}) and
840     * initial table density ({@code loadFactor}).
841     *
842     * @param initialCapacity the initial capacity. The implementation
843     * performs internal sizing to accommodate this many elements,
844     * given the specified load factor.
845     * @param loadFactor the load factor (table density) for
846     * establishing the initial table size
847     * @throws IllegalArgumentException if the initial capacity of
848     * elements is negative or the load factor is nonpositive
849     *
850     * @since 1.6
851     */
852     public ConcurrentHashMapV8(int initialCapacity, float loadFactor) {
853     this(initialCapacity, loadFactor, 1);
854     }
855 dl 1.1
856     /**
857 dl 1.102 * Creates a new, empty map with an initial table size based on
858     * the given number of elements ({@code initialCapacity}), table
859     * density ({@code loadFactor}), and number of concurrently
860     * updating threads ({@code concurrencyLevel}).
861     *
862     * @param initialCapacity the initial capacity. The implementation
863     * performs internal sizing to accommodate this many elements,
864     * given the specified load factor.
865     * @param loadFactor the load factor (table density) for
866     * establishing the initial table size
867     * @param concurrencyLevel the estimated number of concurrently
868     * updating threads. The implementation may use this value as
869     * a sizing hint.
870     * @throws IllegalArgumentException if the initial capacity is
871     * negative or the load factor or concurrencyLevel are
872     * nonpositive
873 dl 1.1 */
874 dl 1.102 public ConcurrentHashMapV8(int initialCapacity,
875     float loadFactor, int concurrencyLevel) {
876     if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
877     throw new IllegalArgumentException();
878     if (initialCapacity < concurrencyLevel) // Use at least as many bins
879     initialCapacity = concurrencyLevel; // as estimated threads
880     long size = (long)(1.0 + (long)initialCapacity / loadFactor);
881     int cap = (size >= (long)MAXIMUM_CAPACITY) ?
882     MAXIMUM_CAPACITY : tableSizeFor((int)size);
883     this.sizeCtl = cap;
884 dl 1.24 }
885 dl 1.1
886 dl 1.102 // Original (since JDK1.2) Map methods
887 dl 1.38
888     /**
889 dl 1.102 * {@inheritDoc}
890 dl 1.38 */
891 dl 1.102 public int size() {
892     long n = sumCount();
893     return ((n < 0L) ? 0 :
894     (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
895     (int)n);
896 dl 1.38 }
897 dl 1.1
898 dl 1.38 /**
899 dl 1.102 * {@inheritDoc}
900     */
901     public boolean isEmpty() {
902     return sumCount() <= 0L; // ignore transient negative values
903     }
904    
905     /**
906     * Returns the value to which the specified key is mapped,
907     * or {@code null} if this map contains no mapping for the key.
908 dl 1.38 *
909 dl 1.102 * <p>More formally, if this map contains a mapping from a key
910     * {@code k} to a value {@code v} such that {@code key.equals(k)},
911     * then this method returns {@code v}; otherwise it returns
912     * {@code null}. (There can be at most one such mapping.)
913 dl 1.38 *
914 dl 1.102 * @throws NullPointerException if the specified key is null
915 dl 1.1 */
916 dl 1.102 public V get(Object key) {
917     Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
918     int h = spread(key.hashCode());
919     if ((tab = table) != null && (n = tab.length) > 0 &&
920     (e = tabAt(tab, (n - 1) & h)) != null) {
921     if ((eh = e.hash) == h) {
922     if ((ek = e.key) == key || (ek != null && key.equals(ek)))
923     return e.val;
924     }
925     else if (eh < 0)
926     return (p = e.find(h, key)) != null ? p.val : null;
927     while ((e = e.next) != null) {
928     if (e.hash == h &&
929     ((ek = e.key) == key || (ek != null && key.equals(ek))))
930     return e.val;
931 dl 1.41 }
932     }
933 dl 1.102 return null;
934     }
935 dl 1.41
936 dl 1.102 /**
937     * Tests if the specified object is a key in this table.
938     *
939     * @param key possible key
940     * @return {@code true} if and only if the specified object
941     * is a key in this table, as determined by the
942     * {@code equals} method; {@code false} otherwise
943     * @throws NullPointerException if the specified key is null
944     */
945     public boolean containsKey(Object key) {
946     return get(key) != null;
947     }
948 dl 1.41
949 dl 1.102 /**
950     * Returns {@code true} if this map maps one or more keys to the
951     * specified value. Note: This method may require a full traversal
952     * of the map, and is much slower than method {@code containsKey}.
953     *
954     * @param value value whose presence in this map is to be tested
955     * @return {@code true} if this map maps one or more keys to the
956     * specified value
957     * @throws NullPointerException if the specified value is null
958     */
959     public boolean containsValue(Object value) {
960     if (value == null)
961     throw new NullPointerException();
962     Node<K,V>[] t;
963     if ((t = table) != null) {
964     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
965     for (Node<K,V> p; (p = it.advance()) != null; ) {
966     V v;
967     if ((v = p.val) == value || (v != null && value.equals(v)))
968     return true;
969 dl 1.38 }
970     }
971 dl 1.102 return false;
972     }
973 dl 1.38
974 dl 1.102 /**
975     * Maps the specified key to the specified value in this table.
976     * Neither the key nor the value can be null.
977     *
978     * <p>The value can be retrieved by calling the {@code get} method
979     * with a key that is equal to the original key.
980     *
981     * @param key key with which the specified value is to be associated
982     * @param value value to be associated with the specified key
983     * @return the previous value associated with {@code key}, or
984     * {@code null} if there was no mapping for {@code key}
985     * @throws NullPointerException if the specified key or value is null
986     */
987     public V put(K key, V value) {
988     return putVal(key, value, false);
989     }
990 dl 1.38
991 dl 1.102 /** Implementation for put and putIfAbsent */
992     final V putVal(K key, V value, boolean onlyIfAbsent) {
993     if (key == null || value == null) throw new NullPointerException();
994     int hash = spread(key.hashCode());
995     int binCount = 0;
996     for (Node<K,V>[] tab = table;;) {
997     Node<K,V> f; int n, i, fh;
998     if (tab == null || (n = tab.length) == 0)
999     tab = initTable();
1000     else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
1001     if (casTabAt(tab, i, null,
1002     new Node<K,V>(hash, key, value, null)))
1003     break; // no lock when adding to empty bin
1004 dl 1.38 }
1005 dl 1.102 else if ((fh = f.hash) == MOVED)
1006     tab = helpTransfer(tab, f);
1007 dl 1.38 else {
1008 dl 1.102 V oldVal = null;
1009     synchronized (f) {
1010     if (tabAt(tab, i) == f) {
1011     if (fh >= 0) {
1012     binCount = 1;
1013     for (Node<K,V> e = f;; ++binCount) {
1014     K ek;
1015     if (e.hash == hash &&
1016     ((ek = e.key) == key ||
1017     (ek != null && key.equals(ek)))) {
1018     oldVal = e.val;
1019     if (!onlyIfAbsent)
1020     e.val = value;
1021     break;
1022 dl 1.41 }
1023 dl 1.102 Node<K,V> pred = e;
1024     if ((e = e.next) == null) {
1025     pred.next = new Node<K,V>(hash, key,
1026     value, null);
1027     break;
1028 dl 1.41 }
1029 dl 1.38 }
1030     }
1031 dl 1.102 else if (f instanceof TreeBin) {
1032     Node<K,V> p;
1033     binCount = 2;
1034     if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
1035     value)) != null) {
1036     oldVal = p.val;
1037     if (!onlyIfAbsent)
1038     p.val = value;
1039 dl 1.38 }
1040     }
1041     }
1042     }
1043 dl 1.102 if (binCount != 0) {
1044     if (binCount >= TREEIFY_THRESHOLD)
1045     treeifyBin(tab, i);
1046     if (oldVal != null)
1047     return oldVal;
1048     break;
1049     }
1050 dl 1.41 }
1051 dl 1.38 }
1052 dl 1.102 addCount(1L, binCount);
1053     return null;
1054 dl 1.1 }
1055    
1056 dl 1.14 /**
1057 dl 1.102 * Copies all of the mappings from the specified map to this one.
1058     * These mappings replace any mappings that this map had for any of the
1059     * keys currently in the specified map.
1060     *
1061     * @param m mappings to be stored in this map
1062 dl 1.14 */
1063 dl 1.102 public void putAll(Map<? extends K, ? extends V> m) {
1064     tryPresize(m.size());
1065     for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
1066     putVal(e.getKey(), e.getValue(), false);
1067 dl 1.38 }
1068    
1069     /**
1070 dl 1.102 * Removes the key (and its corresponding value) from this map.
1071     * This method does nothing if the key is not in the map.
1072     *
1073     * @param key the key that needs to be removed
1074     * @return the previous value associated with {@code key}, or
1075     * {@code null} if there was no mapping for {@code key}
1076     * @throws NullPointerException if the specified key is null
1077 dl 1.38 */
1078 dl 1.102 public V remove(Object key) {
1079     return replaceNode(key, null, null);
1080 dl 1.1 }
1081    
1082 dl 1.27 /**
1083     * Implementation for the four public remove/replace methods:
1084     * Replaces node value with v, conditional upon match of cv if
1085     * non-null. If resulting value is null, delete.
1086     */
1087 dl 1.102 final V replaceNode(Object key, V value, Object cv) {
1088     int hash = spread(key.hashCode());
1089     for (Node<K,V>[] tab = table;;) {
1090     Node<K,V> f; int n, i, fh;
1091     if (tab == null || (n = tab.length) == 0 ||
1092     (f = tabAt(tab, i = (n - 1) & hash)) == null)
1093 dl 1.27 break;
1094 dl 1.102 else if ((fh = f.hash) == MOVED)
1095     tab = helpTransfer(tab, f);
1096     else {
1097     V oldVal = null;
1098     boolean validated = false;
1099     synchronized (f) {
1100     if (tabAt(tab, i) == f) {
1101     if (fh >= 0) {
1102     validated = true;
1103     for (Node<K,V> e = f, pred = null;;) {
1104     K ek;
1105     if (e.hash == hash &&
1106     ((ek = e.key) == key ||
1107     (ek != null && key.equals(ek)))) {
1108     V ev = e.val;
1109     if (cv == null || cv == ev ||
1110     (ev != null && cv.equals(ev))) {
1111     oldVal = ev;
1112     if (value != null)
1113     e.val = value;
1114     else if (pred != null)
1115     pred.next = e.next;
1116     else
1117     setTabAt(tab, i, e.next);
1118     }
1119     break;
1120     }
1121     pred = e;
1122     if ((e = e.next) == null)
1123     break;
1124     }
1125     }
1126     else if (f instanceof TreeBin) {
1127 dl 1.38 validated = true;
1128 dl 1.102 TreeBin<K,V> t = (TreeBin<K,V>)f;
1129     TreeNode<K,V> r, p;
1130     if ((r = t.root) != null &&
1131     (p = r.findTreeNode(hash, key, null)) != null) {
1132 dl 1.84 V pv = p.val;
1133 dl 1.102 if (cv == null || cv == pv ||
1134     (pv != null && cv.equals(pv))) {
1135 dl 1.38 oldVal = pv;
1136 dl 1.102 if (value != null)
1137     p.val = value;
1138     else if (t.removeTreeNode(p))
1139     setTabAt(tab, i, untreeify(t.first));
1140 dl 1.38 }
1141     }
1142     }
1143     }
1144 dl 1.102 }
1145     if (validated) {
1146     if (oldVal != null) {
1147     if (value == null)
1148 dl 1.82 addCount(-1L, -1);
1149 dl 1.102 return oldVal;
1150 dl 1.38 }
1151 dl 1.102 break;
1152 dl 1.38 }
1153     }
1154 dl 1.102 }
1155     return null;
1156     }
1157    
1158     /**
1159     * Removes all of the mappings from this map.
1160     */
1161     public void clear() {
1162     long delta = 0L; // negative number of deletions
1163     int i = 0;
1164     Node<K,V>[] tab = table;
1165     while (tab != null && i < tab.length) {
1166     int fh;
1167     Node<K,V> f = tabAt(tab, i);
1168     if (f == null)
1169     ++i;
1170     else if ((fh = f.hash) == MOVED) {
1171     tab = helpTransfer(tab, f);
1172     i = 0; // restart
1173     }
1174 dl 1.82 else {
1175 jsr166 1.83 synchronized (f) {
1176 dl 1.27 if (tabAt(tab, i) == f) {
1177 dl 1.102 Node<K,V> p = (fh >= 0 ? f :
1178     (f instanceof TreeBin) ?
1179     ((TreeBin<K,V>)f).first : null);
1180     while (p != null) {
1181     --delta;
1182     p = p.next;
1183 dl 1.27 }
1184 dl 1.102 setTabAt(tab, i++, null);
1185 dl 1.27 }
1186     }
1187     }
1188     }
1189 dl 1.102 if (delta != 0L)
1190     addCount(delta, -1);
1191     }
1192    
1193     /**
1194     * Returns a {@link Set} view of the keys contained in this map.
1195     * The set is backed by the map, so changes to the map are
1196     * reflected in the set, and vice-versa. The set supports element
1197     * removal, which removes the corresponding mapping from this map,
1198     * via the {@code Iterator.remove}, {@code Set.remove},
1199     * {@code removeAll}, {@code retainAll}, and {@code clear}
1200     * operations. It does not support the {@code add} or
1201     * {@code addAll} operations.
1202     *
1203     * <p>The view's {@code iterator} is a "weakly consistent" iterator
1204     * that will never throw {@link ConcurrentModificationException},
1205     * and guarantees to traverse elements as they existed upon
1206     * construction of the iterator, and may (but is not guaranteed to)
1207     * reflect any modifications subsequent to construction.
1208     *
1209     * @return the set view
1210     */
1211     public KeySetView<K,V> keySet() {
1212     KeySetView<K,V> ks;
1213     return (ks = keySet) != null ? ks : (keySet = new KeySetView<K,V>(this, null));
1214     }
1215    
1216     /**
1217     * Returns a {@link Collection} view of the values contained in this map.
1218     * The collection is backed by the map, so changes to the map are
1219     * reflected in the collection, and vice-versa. The collection
1220     * supports element removal, which removes the corresponding
1221     * mapping from this map, via the {@code Iterator.remove},
1222     * {@code Collection.remove}, {@code removeAll},
1223     * {@code retainAll}, and {@code clear} operations. It does not
1224     * support the {@code add} or {@code addAll} operations.
1225     *
1226     * <p>The view's {@code iterator} is a "weakly consistent" iterator
1227     * that will never throw {@link ConcurrentModificationException},
1228     * and guarantees to traverse elements as they existed upon
1229     * construction of the iterator, and may (but is not guaranteed to)
1230     * reflect any modifications subsequent to construction.
1231     *
1232     * @return the collection view
1233     */
1234     public Collection<V> values() {
1235     ValuesView<K,V> vs;
1236     return (vs = values) != null ? vs : (values = new ValuesView<K,V>(this));
1237     }
1238    
1239     /**
1240     * Returns a {@link Set} view of the mappings contained in this map.
1241     * The set is backed by the map, so changes to the map are
1242     * reflected in the set, and vice-versa. The set supports element
1243     * removal, which removes the corresponding mapping from the map,
1244     * via the {@code Iterator.remove}, {@code Set.remove},
1245     * {@code removeAll}, {@code retainAll}, and {@code clear}
1246     * operations.
1247     *
1248     * <p>The view's {@code iterator} is a "weakly consistent" iterator
1249     * that will never throw {@link ConcurrentModificationException},
1250     * and guarantees to traverse elements as they existed upon
1251     * construction of the iterator, and may (but is not guaranteed to)
1252     * reflect any modifications subsequent to construction.
1253     *
1254     * @return the set view
1255     */
1256     public Set<Map.Entry<K,V>> entrySet() {
1257     EntrySetView<K,V> es;
1258     return (es = entrySet) != null ? es : (entrySet = new EntrySetView<K,V>(this));
1259 dl 1.27 }
1260    
1261 dl 1.102 /**
1262     * Returns the hash code value for this {@link Map}, i.e.,
1263     * the sum of, for each key-value pair in the map,
1264     * {@code key.hashCode() ^ value.hashCode()}.
1265     *
1266     * @return the hash code value for this map
1267 dl 1.27 */
1268 dl 1.102 public int hashCode() {
1269     int h = 0;
1270     Node<K,V>[] t;
1271     if ((t = table) != null) {
1272     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1273     for (Node<K,V> p; (p = it.advance()) != null; )
1274     h += p.key.hashCode() ^ p.val.hashCode();
1275     }
1276     return h;
1277     }
1278 dl 1.27
1279 dl 1.102 /**
1280     * Returns a string representation of this map. The string
1281     * representation consists of a list of key-value mappings (in no
1282     * particular order) enclosed in braces ("{@code {}}"). Adjacent
1283     * mappings are separated by the characters {@code ", "} (comma
1284     * and space). Each key-value mapping is rendered as the key
1285     * followed by an equals sign ("{@code =}") followed by the
1286     * associated value.
1287     *
1288     * @return a string representation of this map
1289     */
1290     public String toString() {
1291     Node<K,V>[] t;
1292     int f = (t = table) == null ? 0 : t.length;
1293     Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1294     StringBuilder sb = new StringBuilder();
1295     sb.append('{');
1296     Node<K,V> p;
1297     if ((p = it.advance()) != null) {
1298     for (;;) {
1299     K k = p.key;
1300     V v = p.val;
1301     sb.append(k == this ? "(this Map)" : k);
1302     sb.append('=');
1303     sb.append(v == this ? "(this Map)" : v);
1304     if ((p = it.advance()) == null)
1305     break;
1306     sb.append(',').append(' ');
1307 dl 1.14 }
1308 dl 1.102 }
1309     return sb.append('}').toString();
1310     }
1311    
1312     /**
1313     * Compares the specified object with this map for equality.
1314     * Returns {@code true} if the given object is a map with the same
1315     * mappings as this map. This operation may return misleading
1316     * results if either map is concurrently modified during execution
1317     * of this method.
1318     *
1319     * @param o object to be compared for equality with this map
1320     * @return {@code true} if the specified object is equal to this map
1321     */
1322     public boolean equals(Object o) {
1323     if (o != this) {
1324     if (!(o instanceof Map))
1325     return false;
1326     Map<?,?> m = (Map<?,?>) o;
1327     Node<K,V>[] t;
1328     int f = (t = table) == null ? 0 : t.length;
1329     Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1330     for (Node<K,V> p; (p = it.advance()) != null; ) {
1331     V val = p.val;
1332     Object v = m.get(p.key);
1333     if (v == null || (v != val && !v.equals(val)))
1334     return false;
1335 dl 1.38 }
1336 dl 1.102 for (Map.Entry<?,?> e : m.entrySet()) {
1337     Object mk, mv, v;
1338     if ((mk = e.getKey()) == null ||
1339     (mv = e.getValue()) == null ||
1340     (v = get(mk)) == null ||
1341     (mv != v && !mv.equals(v)))
1342     return false;
1343 dl 1.1 }
1344     }
1345 dl 1.102 return true;
1346     }
1347    
1348     /**
1349     * Stripped-down version of helper class used in previous version,
1350     * declared for the sake of serialization compatibility
1351     */
1352     static class Segment<K,V> extends ReentrantLock implements Serializable {
1353     private static final long serialVersionUID = 2249069246763182397L;
1354     final float loadFactor;
1355     Segment(float lf) { this.loadFactor = lf; }
1356 dl 1.27 }
1357    
1358 dl 1.102 /**
1359     * Saves the state of the {@code ConcurrentHashMapV8} instance to a
1360     * stream (i.e., serializes it).
1361     * @param s the stream
1362     * @serialData
1363     * the key (Object) and value (Object)
1364     * for each key-value mapping, followed by a null pair.
1365     * The key-value mappings are emitted in no particular order.
1366     */
1367     private void writeObject(java.io.ObjectOutputStream s)
1368     throws java.io.IOException {
1369     // For serialization compatibility
1370     // Emulate segment calculation from previous version of this class
1371     int sshift = 0;
1372     int ssize = 1;
1373     while (ssize < DEFAULT_CONCURRENCY_LEVEL) {
1374     ++sshift;
1375     ssize <<= 1;
1376     }
1377     int segmentShift = 32 - sshift;
1378     int segmentMask = ssize - 1;
1379     @SuppressWarnings("unchecked") Segment<K,V>[] segments = (Segment<K,V>[])
1380     new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
1381     for (int i = 0; i < segments.length; ++i)
1382     segments[i] = new Segment<K,V>(LOAD_FACTOR);
1383     s.putFields().put("segments", segments);
1384     s.putFields().put("segmentShift", segmentShift);
1385     s.putFields().put("segmentMask", segmentMask);
1386     s.writeFields();
1387    
1388     Node<K,V>[] t;
1389     if ((t = table) != null) {
1390     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1391     for (Node<K,V> p; (p = it.advance()) != null; ) {
1392     s.writeObject(p.key);
1393     s.writeObject(p.val);
1394 dl 1.27 }
1395 dl 1.102 }
1396     s.writeObject(null);
1397     s.writeObject(null);
1398     segments = null; // throw away
1399     }
1400    
1401     /**
1402     * Reconstitutes the instance from a stream (that is, deserializes it).
1403     * @param s the stream
1404     */
1405     private void readObject(java.io.ObjectInputStream s)
1406     throws java.io.IOException, ClassNotFoundException {
1407     /*
1408     * To improve performance in typical cases, we create nodes
1409     * while reading, then place in table once size is known.
1410     * However, we must also validate uniqueness and deal with
1411     * overpopulated bins while doing so, which requires
1412     * specialized versions of putVal mechanics.
1413     */
1414     sizeCtl = -1; // force exclusion for table construction
1415     s.defaultReadObject();
1416     long size = 0L;
1417     Node<K,V> p = null;
1418     for (;;) {
1419     @SuppressWarnings("unchecked") K k = (K) s.readObject();
1420     @SuppressWarnings("unchecked") V v = (V) s.readObject();
1421     if (k != null && v != null) {
1422     p = new Node<K,V>(spread(k.hashCode()), k, v, p);
1423     ++size;
1424 dl 1.38 }
1425 dl 1.102 else
1426     break;
1427     }
1428     if (size == 0L)
1429     sizeCtl = 0;
1430     else {
1431     int n;
1432     if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
1433     n = MAXIMUM_CAPACITY;
1434 dl 1.27 else {
1435 dl 1.102 int sz = (int)size;
1436     n = tableSizeFor(sz + (sz >>> 1) + 1);
1437     }
1438     @SuppressWarnings({"rawtypes","unchecked"})
1439     Node<K,V>[] tab = (Node<K,V>[])new Node[n];
1440     int mask = n - 1;
1441     long added = 0L;
1442     while (p != null) {
1443     boolean insertAtFront;
1444     Node<K,V> next = p.next, first;
1445     int h = p.hash, j = h & mask;
1446     if ((first = tabAt(tab, j)) == null)
1447     insertAtFront = true;
1448     else {
1449     K k = p.key;
1450     if (first.hash < 0) {
1451     TreeBin<K,V> t = (TreeBin<K,V>)first;
1452     if (t.putTreeVal(h, k, p.val) == null)
1453     ++added;
1454     insertAtFront = false;
1455     }
1456     else {
1457     int binCount = 0;
1458     insertAtFront = true;
1459     Node<K,V> q; K qk;
1460     for (q = first; q != null; q = q.next) {
1461     if (q.hash == h &&
1462     ((qk = q.key) == k ||
1463     (qk != null && k.equals(qk)))) {
1464     insertAtFront = false;
1465 dl 1.82 break;
1466     }
1467 dl 1.102 ++binCount;
1468     }
1469     if (insertAtFront && binCount >= TREEIFY_THRESHOLD) {
1470     insertAtFront = false;
1471     ++added;
1472     p.next = first;
1473     TreeNode<K,V> hd = null, tl = null;
1474     for (q = p; q != null; q = q.next) {
1475     TreeNode<K,V> t = new TreeNode<K,V>
1476     (q.hash, q.key, q.val, null, null);
1477     if ((t.prev = tl) == null)
1478     hd = t;
1479     else
1480     tl.next = t;
1481     tl = t;
1482 dl 1.27 }
1483 dl 1.102 setTabAt(tab, j, new TreeBin<K,V>(hd));
1484 dl 1.27 }
1485 dl 1.38 }
1486 dl 1.82 }
1487 dl 1.102 if (insertAtFront) {
1488     ++added;
1489     p.next = first;
1490     setTabAt(tab, j, p);
1491     }
1492     p = next;
1493     }
1494     table = tab;
1495     sizeCtl = n - (n >>> 2);
1496     baseCount = added;
1497     }
1498     }
1499    
1500     // ConcurrentMap methods
1501    
1502     /**
1503     * {@inheritDoc}
1504     *
1505     * @return the previous value associated with the specified key,
1506     * or {@code null} if there was no mapping for the key
1507     * @throws NullPointerException if the specified key or value is null
1508     */
1509     public V putIfAbsent(K key, V value) {
1510     return putVal(key, value, true);
1511     }
1512    
1513     /**
1514     * {@inheritDoc}
1515     *
1516     * @throws NullPointerException if the specified key is null
1517     */
1518     public boolean remove(Object key, Object value) {
1519     if (key == null)
1520     throw new NullPointerException();
1521     return value != null && replaceNode(key, null, value) != null;
1522     }
1523    
1524     /**
1525     * {@inheritDoc}
1526     *
1527     * @throws NullPointerException if any of the arguments are null
1528     */
1529     public boolean replace(K key, V oldValue, V newValue) {
1530     if (key == null || oldValue == null || newValue == null)
1531     throw new NullPointerException();
1532     return replaceNode(key, newValue, oldValue) != null;
1533     }
1534    
1535     /**
1536     * {@inheritDoc}
1537     *
1538     * @return the previous value associated with the specified key,
1539     * or {@code null} if there was no mapping for the key
1540     * @throws NullPointerException if the specified key or value is null
1541     */
1542     public V replace(K key, V value) {
1543     if (key == null || value == null)
1544     throw new NullPointerException();
1545     return replaceNode(key, value, null);
1546     }
1547    
1548     // Overrides of JDK8+ Map extension method defaults
1549    
1550     /**
1551     * Returns the value to which the specified key is mapped, or the
1552     * given default value if this map contains no mapping for the
1553     * key.
1554     *
1555     * @param key the key whose associated value is to be returned
1556     * @param defaultValue the value to return if this map contains
1557     * no mapping for the given key
1558     * @return the mapping for the key, if present; else the default value
1559     * @throws NullPointerException if the specified key is null
1560     */
1561     public V getOrDefault(Object key, V defaultValue) {
1562     V v;
1563     return (v = get(key)) == null ? defaultValue : v;
1564     }
1565    
1566     public void forEach(BiAction<? super K, ? super V> action) {
1567     if (action == null) throw new NullPointerException();
1568     Node<K,V>[] t;
1569     if ((t = table) != null) {
1570     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1571     for (Node<K,V> p; (p = it.advance()) != null; ) {
1572     action.apply(p.key, p.val);
1573     }
1574     }
1575     }
1576    
1577     public void replaceAll(BiFun<? super K, ? super V, ? extends V> function) {
1578     if (function == null) throw new NullPointerException();
1579     Node<K,V>[] t;
1580     if ((t = table) != null) {
1581     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1582     for (Node<K,V> p; (p = it.advance()) != null; ) {
1583     V oldValue = p.val;
1584     for (K key = p.key;;) {
1585     V newValue = function.apply(key, oldValue);
1586     if (newValue == null)
1587     throw new NullPointerException();
1588     if (replaceNode(key, newValue, oldValue) != null ||
1589     (oldValue = get(key)) == null)
1590     break;
1591 dl 1.1 }
1592     }
1593     }
1594     }
1595    
1596 dl 1.102 /**
1597     * If the specified key is not already associated with a value,
1598     * attempts to compute its value using the given mapping function
1599     * and enters it into this map unless {@code null}. The entire
1600     * method invocation is performed atomically, so the function is
1601     * applied at most once per key. Some attempted update operations
1602     * on this map by other threads may be blocked while computation
1603     * is in progress, so the computation should be short and simple,
1604     * and must not attempt to update any other mappings of this map.
1605     *
1606     * @param key key with which the specified value is to be associated
1607     * @param mappingFunction the function to compute a value
1608     * @return the current (existing or computed) value associated with
1609     * the specified key, or null if the computed value is null
1610     * @throws NullPointerException if the specified key or mappingFunction
1611     * is null
1612     * @throws IllegalStateException if the computation detectably
1613     * attempts a recursive update to this map that would
1614     * otherwise never complete
1615     * @throws RuntimeException or Error if the mappingFunction does so,
1616     * in which case the mapping is left unestablished
1617     */
1618     public V computeIfAbsent(K key, Fun<? super K, ? extends V> mappingFunction) {
1619     if (key == null || mappingFunction == null)
1620 dl 1.82 throw new NullPointerException();
1621 dl 1.102 int h = spread(key.hashCode());
1622 dl 1.84 V val = null;
1623 dl 1.102 int binCount = 0;
1624     for (Node<K,V>[] tab = table;;) {
1625     Node<K,V> f; int n, i, fh;
1626     if (tab == null || (n = tab.length) == 0)
1627 dl 1.24 tab = initTable();
1628 dl 1.102 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1629     Node<K,V> r = new ReservationNode<K,V>();
1630     synchronized (r) {
1631     if (casTabAt(tab, i, null, r)) {
1632     binCount = 1;
1633     Node<K,V> node = null;
1634 dl 1.82 try {
1635 dl 1.102 if ((val = mappingFunction.apply(key)) != null)
1636     node = new Node<K,V>(h, key, val, null);
1637 dl 1.82 } finally {
1638 dl 1.102 setTabAt(tab, i, node);
1639 dl 1.1 }
1640     }
1641     }
1642 dl 1.102 if (binCount != 0)
1643 dl 1.10 break;
1644 dl 1.1 }
1645 dl 1.102 else if ((fh = f.hash) == MOVED)
1646     tab = helpTransfer(tab, f);
1647     else {
1648     boolean added = false;
1649     synchronized (f) {
1650     if (tabAt(tab, i) == f) {
1651     if (fh >= 0) {
1652     binCount = 1;
1653     for (Node<K,V> e = f;; ++binCount) {
1654     K ek; V ev;
1655     if (e.hash == h &&
1656     ((ek = e.key) == key ||
1657     (ek != null && key.equals(ek)))) {
1658     val = e.val;
1659     break;
1660     }
1661     Node<K,V> pred = e;
1662     if ((e = e.next) == null) {
1663     if ((val = mappingFunction.apply(key)) != null) {
1664     added = true;
1665     pred.next = new Node<K,V>(h, key, val, null);
1666     }
1667     break;
1668 dl 1.38 }
1669     }
1670 dl 1.102 }
1671     else if (f instanceof TreeBin) {
1672     binCount = 2;
1673     TreeBin<K,V> t = (TreeBin<K,V>)f;
1674     TreeNode<K,V> r, p;
1675     if ((r = t.root) != null &&
1676     (p = r.findTreeNode(h, key, null)) != null)
1677     val = p.val;
1678     else if ((val = mappingFunction.apply(key)) != null) {
1679     added = true;
1680     t.putTreeVal(h, key, val);
1681 dl 1.41 }
1682 dl 1.38 }
1683     }
1684     }
1685 dl 1.102 if (binCount != 0) {
1686     if (binCount >= TREEIFY_THRESHOLD)
1687     treeifyBin(tab, i);
1688     if (!added)
1689     return val;
1690     break;
1691     }
1692 dl 1.38 }
1693 dl 1.102 }
1694     if (val != null)
1695     addCount(1L, binCount);
1696     return val;
1697     }
1698    
1699     /**
1700     * If the value for the specified key is present, attempts to
1701     * compute a new mapping given the key and its current mapped
1702     * value. The entire method invocation is performed atomically.
1703     * Some attempted update operations on this map by other threads
1704     * may be blocked while computation is in progress, so the
1705     * computation should be short and simple, and must not attempt to
1706     * update any other mappings of this map.
1707     *
1708     * @param key key with which a value may be associated
1709     * @param remappingFunction the function to compute a value
1710     * @return the new value associated with the specified key, or null if none
1711     * @throws NullPointerException if the specified key or remappingFunction
1712     * is null
1713     * @throws IllegalStateException if the computation detectably
1714     * attempts a recursive update to this map that would
1715     * otherwise never complete
1716     * @throws RuntimeException or Error if the remappingFunction does so,
1717     * in which case the mapping is unchanged
1718     */
1719     public V computeIfPresent(K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
1720     if (key == null || remappingFunction == null)
1721     throw new NullPointerException();
1722     int h = spread(key.hashCode());
1723     V val = null;
1724     int delta = 0;
1725     int binCount = 0;
1726     for (Node<K,V>[] tab = table;;) {
1727     Node<K,V> f; int n, i, fh;
1728     if (tab == null || (n = tab.length) == 0)
1729     tab = initTable();
1730     else if ((f = tabAt(tab, i = (n - 1) & h)) == null)
1731     break;
1732     else if ((fh = f.hash) == MOVED)
1733     tab = helpTransfer(tab, f);
1734 dl 1.82 else {
1735 jsr166 1.83 synchronized (f) {
1736 dl 1.24 if (tabAt(tab, i) == f) {
1737 dl 1.102 if (fh >= 0) {
1738     binCount = 1;
1739     for (Node<K,V> e = f, pred = null;; ++binCount) {
1740     K ek;
1741     if (e.hash == h &&
1742     ((ek = e.key) == key ||
1743     (ek != null && key.equals(ek)))) {
1744     val = remappingFunction.apply(key, e.val);
1745     if (val != null)
1746     e.val = val;
1747     else {
1748     delta = -1;
1749     Node<K,V> en = e.next;
1750     if (pred != null)
1751     pred.next = en;
1752     else
1753     setTabAt(tab, i, en);
1754     }
1755     break;
1756     }
1757     pred = e;
1758     if ((e = e.next) == null)
1759     break;
1760     }
1761     }
1762     else if (f instanceof TreeBin) {
1763     binCount = 2;
1764     TreeBin<K,V> t = (TreeBin<K,V>)f;
1765     TreeNode<K,V> r, p;
1766     if ((r = t.root) != null &&
1767     (p = r.findTreeNode(h, key, null)) != null) {
1768     val = remappingFunction.apply(key, p.val);
1769 dl 1.27 if (val != null)
1770 dl 1.102 p.val = val;
1771 dl 1.41 else {
1772     delta = -1;
1773 dl 1.102 if (t.removeTreeNode(p))
1774     setTabAt(tab, i, untreeify(t.first));
1775 dl 1.1 }
1776     }
1777     }
1778     }
1779     }
1780 dl 1.102 if (binCount != 0)
1781 dl 1.10 break;
1782 dl 1.1 }
1783 dl 1.10 }
1784 dl 1.82 if (delta != 0)
1785 dl 1.102 addCount((long)delta, binCount);
1786 dl 1.84 return val;
1787 dl 1.1 }
1788    
1789 dl 1.102 /**
1790     * Attempts to compute a mapping for the specified key and its
1791     * current mapped value (or {@code null} if there is no current
1792     * mapping). The entire method invocation is performed atomically.
1793     * Some attempted update operations on this map by other threads
1794     * may be blocked while computation is in progress, so the
1795     * computation should be short and simple, and must not attempt to
1796     * update any other mappings of this Map.
1797     *
1798     * @param key key with which the specified value is to be associated
1799     * @param remappingFunction the function to compute a value
1800     * @return the new value associated with the specified key, or null if none
1801     * @throws NullPointerException if the specified key or remappingFunction
1802     * is null
1803     * @throws IllegalStateException if the computation detectably
1804     * attempts a recursive update to this map that would
1805     * otherwise never complete
1806     * @throws RuntimeException or Error if the remappingFunction does so,
1807     * in which case the mapping is unchanged
1808     */
1809     public V compute(K key,
1810     BiFun<? super K, ? super V, ? extends V> remappingFunction) {
1811     if (key == null || remappingFunction == null)
1812 dl 1.82 throw new NullPointerException();
1813 dl 1.102 int h = spread(key.hashCode());
1814 dl 1.84 V val = null;
1815 dl 1.52 int delta = 0;
1816 dl 1.102 int binCount = 0;
1817     for (Node<K,V>[] tab = table;;) {
1818     Node<K,V> f; int n, i, fh;
1819     if (tab == null || (n = tab.length) == 0)
1820 dl 1.52 tab = initTable();
1821 dl 1.102 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1822     Node<K,V> r = new ReservationNode<K,V>();
1823     synchronized (r) {
1824     if (casTabAt(tab, i, null, r)) {
1825     binCount = 1;
1826     Node<K,V> node = null;
1827     try {
1828     if ((val = remappingFunction.apply(key, null)) != null) {
1829     delta = 1;
1830     node = new Node<K,V>(h, key, val, null);
1831     }
1832     } finally {
1833     setTabAt(tab, i, node);
1834     }
1835     }
1836     }
1837     if (binCount != 0)
1838 dl 1.52 break;
1839     }
1840 dl 1.102 else if ((fh = f.hash) == MOVED)
1841     tab = helpTransfer(tab, f);
1842     else {
1843     synchronized (f) {
1844     if (tabAt(tab, i) == f) {
1845     if (fh >= 0) {
1846     binCount = 1;
1847     for (Node<K,V> e = f, pred = null;; ++binCount) {
1848     K ek;
1849     if (e.hash == h &&
1850     ((ek = e.key) == key ||
1851     (ek != null && key.equals(ek)))) {
1852     val = remappingFunction.apply(key, e.val);
1853     if (val != null)
1854     e.val = val;
1855     else {
1856     delta = -1;
1857     Node<K,V> en = e.next;
1858     if (pred != null)
1859     pred.next = en;
1860     else
1861     setTabAt(tab, i, en);
1862     }
1863     break;
1864     }
1865     pred = e;
1866     if ((e = e.next) == null) {
1867     val = remappingFunction.apply(key, null);
1868     if (val != null) {
1869     delta = 1;
1870     pred.next =
1871     new Node<K,V>(h, key, val, null);
1872     }
1873     break;
1874     }
1875     }
1876     }
1877     else if (f instanceof TreeBin) {
1878     binCount = 1;
1879     TreeBin<K,V> t = (TreeBin<K,V>)f;
1880     TreeNode<K,V> r, p;
1881     if ((r = t.root) != null)
1882     p = r.findTreeNode(h, key, null);
1883     else
1884     p = null;
1885     V pv = (p == null) ? null : p.val;
1886     val = remappingFunction.apply(key, pv);
1887 dl 1.52 if (val != null) {
1888     if (p != null)
1889     p.val = val;
1890     else {
1891     delta = 1;
1892 dl 1.102 t.putTreeVal(h, key, val);
1893 dl 1.52 }
1894     }
1895     else if (p != null) {
1896     delta = -1;
1897 dl 1.102 if (t.removeTreeNode(p))
1898     setTabAt(tab, i, untreeify(t.first));
1899 dl 1.52 }
1900     }
1901     }
1902     }
1903 dl 1.102 if (binCount != 0) {
1904     if (binCount >= TREEIFY_THRESHOLD)
1905     treeifyBin(tab, i);
1906     break;
1907 dl 1.52 }
1908     }
1909     }
1910 dl 1.82 if (delta != 0)
1911 dl 1.102 addCount((long)delta, binCount);
1912 dl 1.84 return val;
1913 dl 1.52 }
1914    
1915 dl 1.102 /**
1916     * If the specified key is not already associated with a
1917     * (non-null) value, associates it with the given value.
1918     * Otherwise, replaces the value with the results of the given
1919     * remapping function, or removes if {@code null}. The entire
1920     * method invocation is performed atomically. Some attempted
1921     * update operations on this map by other threads may be blocked
1922     * while computation is in progress, so the computation should be
1923     * short and simple, and must not attempt to update any other
1924     * mappings of this Map.
1925     *
1926     * @param key key with which the specified value is to be associated
1927     * @param value the value to use if absent
1928     * @param remappingFunction the function to recompute a value if present
1929     * @return the new value associated with the specified key, or null if none
1930     * @throws NullPointerException if the specified key or the
1931     * remappingFunction is null
1932     * @throws RuntimeException or Error if the remappingFunction does so,
1933     * in which case the mapping is unchanged
1934     */
1935     public V merge(K key, V value, BiFun<? super V, ? super V, ? extends V> remappingFunction) {
1936     if (key == null || value == null || remappingFunction == null)
1937     throw new NullPointerException();
1938     int h = spread(key.hashCode());
1939     V val = null;
1940     int delta = 0;
1941     int binCount = 0;
1942     for (Node<K,V>[] tab = table;;) {
1943     Node<K,V> f; int n, i, fh;
1944     if (tab == null || (n = tab.length) == 0)
1945     tab = initTable();
1946     else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1947     if (casTabAt(tab, i, null, new Node<K,V>(h, key, value, null))) {
1948     delta = 1;
1949     val = value;
1950 dl 1.27 break;
1951     }
1952 dl 1.102 }
1953     else if ((fh = f.hash) == MOVED)
1954     tab = helpTransfer(tab, f);
1955     else {
1956     synchronized (f) {
1957     if (tabAt(tab, i) == f) {
1958     if (fh >= 0) {
1959     binCount = 1;
1960     for (Node<K,V> e = f, pred = null;; ++binCount) {
1961     K ek;
1962     if (e.hash == h &&
1963     ((ek = e.key) == key ||
1964     (ek != null && key.equals(ek)))) {
1965     val = remappingFunction.apply(e.val, value);
1966     if (val != null)
1967     e.val = val;
1968 dl 1.38 else {
1969 dl 1.102 delta = -1;
1970     Node<K,V> en = e.next;
1971     if (pred != null)
1972     pred.next = en;
1973     else
1974     setTabAt(tab, i, en);
1975 dl 1.38 }
1976 dl 1.102 break;
1977     }
1978     pred = e;
1979     if ((e = e.next) == null) {
1980     delta = 1;
1981     val = value;
1982     pred.next =
1983     new Node<K,V>(h, key, val, null);
1984     break;
1985 dl 1.38 }
1986     }
1987     }
1988 dl 1.102 else if (f instanceof TreeBin) {
1989     binCount = 2;
1990     TreeBin<K,V> t = (TreeBin<K,V>)f;
1991     TreeNode<K,V> r = t.root;
1992     TreeNode<K,V> p = (r == null) ? null :
1993     r.findTreeNode(h, key, null);
1994     val = (p == null) ? value :
1995     remappingFunction.apply(p.val, value);
1996     if (val != null) {
1997     if (p != null)
1998     p.val = val;
1999     else {
2000     delta = 1;
2001     t.putTreeVal(h, key, val);
2002 dl 1.27 }
2003     }
2004 dl 1.102 else if (p != null) {
2005     delta = -1;
2006     if (t.removeTreeNode(p))
2007     setTabAt(tab, i, untreeify(t.first));
2008 dl 1.90 }
2009 dl 1.24 }
2010     }
2011 dl 1.1 }
2012 dl 1.102 if (binCount != 0) {
2013     if (binCount >= TREEIFY_THRESHOLD)
2014     treeifyBin(tab, i);
2015     break;
2016     }
2017 dl 1.1 }
2018     }
2019 dl 1.102 if (delta != 0)
2020     addCount((long)delta, binCount);
2021     return val;
2022 dl 1.1 }
2023    
2024 dl 1.102 // Hashtable legacy methods
2025    
2026 dl 1.82 /**
2027 dl 1.102 * Legacy method testing if some key maps into the specified value
2028     * in this table. This method is identical in functionality to
2029     * {@link #containsValue(Object)}, and exists solely to ensure
2030     * full compatibility with class {@link java.util.Hashtable},
2031     * which supported this method prior to introduction of the
2032     * Java Collections framework.
2033     *
2034     * @param value a value to search for
2035     * @return {@code true} if and only if some key maps to the
2036     * {@code value} argument in this table as
2037     * determined by the {@code equals} method;
2038     * {@code false} otherwise
2039     * @throws NullPointerException if the specified value is null
2040     */
2041     @Deprecated public boolean contains(Object value) {
2042     return containsValue(value);
2043     }
2044    
2045     /**
2046     * Returns an enumeration of the keys in this table.
2047     *
2048     * @return an enumeration of the keys in this table
2049     * @see #keySet()
2050     */
2051     public Enumeration<K> keys() {
2052     Node<K,V>[] t;
2053     int f = (t = table) == null ? 0 : t.length;
2054     return new KeyIterator<K,V>(t, f, 0, f, this);
2055     }
2056    
2057     /**
2058     * Returns an enumeration of the values in this table.
2059     *
2060     * @return an enumeration of the values in this table
2061     * @see #values()
2062     */
2063     public Enumeration<V> elements() {
2064     Node<K,V>[] t;
2065     int f = (t = table) == null ? 0 : t.length;
2066     return new ValueIterator<K,V>(t, f, 0, f, this);
2067     }
2068    
2069     // ConcurrentHashMapV8-only methods
2070    
2071     /**
2072     * Returns the number of mappings. This method should be used
2073     * instead of {@link #size} because a ConcurrentHashMapV8 may
2074     * contain more mappings than can be represented as an int. The
2075     * value returned is an estimate; the actual count may differ if
2076     * there are concurrent insertions or removals.
2077     *
2078     * @return the number of mappings
2079     * @since 1.8
2080     */
2081     public long mappingCount() {
2082     long n = sumCount();
2083     return (n < 0L) ? 0L : n; // ignore transient negative values
2084     }
2085    
2086     /**
2087     * Creates a new {@link Set} backed by a ConcurrentHashMapV8
2088     * from the given type to {@code Boolean.TRUE}.
2089     *
2090     * @return the new set
2091     * @since 1.8
2092     */
2093     public static <K> KeySetView<K,Boolean> newKeySet() {
2094     return new KeySetView<K,Boolean>
2095     (new ConcurrentHashMapV8<K,Boolean>(), Boolean.TRUE);
2096     }
2097    
2098     /**
2099     * Creates a new {@link Set} backed by a ConcurrentHashMapV8
2100     * from the given type to {@code Boolean.TRUE}.
2101     *
2102     * @param initialCapacity The implementation performs internal
2103     * sizing to accommodate this many elements.
2104     * @throws IllegalArgumentException if the initial capacity of
2105     * elements is negative
2106     * @return the new set
2107     * @since 1.8
2108     */
2109     public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2110     return new KeySetView<K,Boolean>
2111     (new ConcurrentHashMapV8<K,Boolean>(initialCapacity), Boolean.TRUE);
2112     }
2113    
2114     /**
2115     * Returns a {@link Set} view of the keys in this map, using the
2116     * given common mapped value for any additions (i.e., {@link
2117     * Collection#add} and {@link Collection#addAll(Collection)}).
2118     * This is of course only appropriate if it is acceptable to use
2119     * the same value for all additions from this view.
2120     *
2121     * @param mappedValue the mapped value to use for any additions
2122     * @return the set view
2123     * @throws NullPointerException if the mappedValue is null
2124 dl 1.82 */
2125 dl 1.102 public KeySetView<K,V> keySet(V mappedValue) {
2126     if (mappedValue == null)
2127     throw new NullPointerException();
2128     return new KeySetView<K,V>(this, mappedValue);
2129     }
2130    
2131     /* ---------------- Special Nodes -------------- */
2132    
2133     /**
2134     * A node inserted at head of bins during transfer operations.
2135     */
2136     static final class ForwardingNode<K,V> extends Node<K,V> {
2137     final Node<K,V>[] nextTable;
2138     ForwardingNode(Node<K,V>[] tab) {
2139     super(MOVED, null, null, null);
2140     this.nextTable = tab;
2141     }
2142    
2143     Node<K,V> find(int h, Object k) {
2144 dl 1.110 // loop to avoid arbitrarily deep recursion on forwarding nodes
2145     outer: for (Node<K,V>[] tab = nextTable;;) {
2146     Node<K,V> e; int n;
2147     if (k == null || tab == null || (n = tab.length) == 0 ||
2148     (e = tabAt(tab, (n - 1) & h)) == null)
2149     return null;
2150     for (;;) {
2151 dl 1.102 int eh; K ek;
2152     if ((eh = e.hash) == h &&
2153     ((ek = e.key) == k || (ek != null && k.equals(ek))))
2154     return e;
2155 dl 1.110 if (eh < 0) {
2156     if (e instanceof ForwardingNode) {
2157     tab = ((ForwardingNode<K,V>)e).nextTable;
2158     continue outer;
2159     }
2160     else
2161     return e.find(h, k);
2162     }
2163     if ((e = e.next) == null)
2164     return null;
2165     }
2166 dl 1.82 }
2167     }
2168     }
2169    
2170 dl 1.24 /**
2171 dl 1.102 * A place-holder node used in computeIfAbsent and compute
2172 dl 1.24 */
2173 dl 1.102 static final class ReservationNode<K,V> extends Node<K,V> {
2174     ReservationNode() {
2175     super(RESERVED, null, null, null);
2176     }
2177    
2178     Node<K,V> find(int h, Object k) {
2179     return null;
2180     }
2181 dl 1.24 }
2182    
2183 dl 1.102 /* ---------------- Table Initialization and Resizing -------------- */
2184    
2185 dl 1.24 /**
2186     * Initializes table, using the size recorded in sizeCtl.
2187     */
2188 dl 1.102 private final Node<K,V>[] initTable() {
2189     Node<K,V>[] tab; int sc;
2190     while ((tab = table) == null || tab.length == 0) {
2191 dl 1.24 if ((sc = sizeCtl) < 0)
2192     Thread.yield(); // lost initialization race; just spin
2193 dl 1.82 else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
2194 dl 1.24 try {
2195 dl 1.102 if ((tab = table) == null || tab.length == 0) {
2196 dl 1.24 int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
2197 dl 1.102 @SuppressWarnings({"rawtypes","unchecked"})
2198     Node<K,V>[] nt = (Node<K,V>[])new Node[n];
2199     table = tab = nt;
2200 dl 1.27 sc = n - (n >>> 2);
2201 dl 1.24 }
2202     } finally {
2203     sizeCtl = sc;
2204     }
2205     break;
2206     }
2207     }
2208     return tab;
2209     }
2210    
2211     /**
2212 dl 1.82 * Adds to count, and if table is too small and not already
2213     * resizing, initiates transfer. If already resizing, helps
2214     * perform transfer if work is available. Rechecks occupancy
2215     * after a transfer to see if another resize is already needed
2216     * because resizings are lagging additions.
2217     *
2218     * @param x the count to add
2219     * @param check if <0, don't check resize, if <= 1 only check if uncontended
2220     */
2221     private final void addCount(long x, int check) {
2222     CounterCell[] as; long b, s;
2223     if ((as = counterCells) != null ||
2224     !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
2225     CounterHashCode hc; CounterCell a; long v; int m;
2226     boolean uncontended = true;
2227     if ((hc = threadCounterHashCode.get()) == null ||
2228     as == null || (m = as.length - 1) < 0 ||
2229     (a = as[m & hc.code]) == null ||
2230     !(uncontended =
2231     U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
2232     fullAddCount(x, hc, uncontended);
2233     return;
2234     }
2235     if (check <= 1)
2236     return;
2237     s = sumCount();
2238     }
2239     if (check >= 0) {
2240 dl 1.102 Node<K,V>[] tab, nt; int sc;
2241 dl 1.82 while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
2242     tab.length < MAXIMUM_CAPACITY) {
2243     if (sc < 0) {
2244     if (sc == -1 || transferIndex <= transferOrigin ||
2245     (nt = nextTable) == null)
2246     break;
2247     if (U.compareAndSwapInt(this, SIZECTL, sc, sc - 1))
2248     transfer(tab, nt);
2249 dl 1.24 }
2250 dl 1.82 else if (U.compareAndSwapInt(this, SIZECTL, sc, -2))
2251     transfer(tab, null);
2252     s = sumCount();
2253 dl 1.24 }
2254     }
2255     }
2256    
2257 dl 1.27 /**
2258 dl 1.102 * Helps transfer if a resize is in progress.
2259     */
2260     final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) {
2261     Node<K,V>[] nextTab; int sc;
2262     if ((f instanceof ForwardingNode) &&
2263     (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) {
2264     if (nextTab == nextTable && tab == table &&
2265     transferIndex > transferOrigin && (sc = sizeCtl) < -1 &&
2266     U.compareAndSwapInt(this, SIZECTL, sc, sc - 1))
2267     transfer(tab, nextTab);
2268     return nextTab;
2269     }
2270     return table;
2271     }
2272    
2273     /**
2274 dl 1.27 * Tries to presize table to accommodate the given number of elements.
2275     *
2276     * @param size number of elements (doesn't need to be perfectly accurate)
2277     */
2278 dl 1.102 private final void tryPresize(int size) {
2279 dl 1.27 int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
2280     tableSizeFor(size + (size >>> 1) + 1);
2281     int sc;
2282     while ((sc = sizeCtl) >= 0) {
2283 dl 1.102 Node<K,V>[] tab = table; int n;
2284 dl 1.27 if (tab == null || (n = tab.length) == 0) {
2285 jsr166 1.30 n = (sc > c) ? sc : c;
2286 dl 1.82 if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
2287 dl 1.27 try {
2288     if (table == tab) {
2289 dl 1.102 @SuppressWarnings({"rawtypes","unchecked"})
2290     Node<K,V>[] nt = (Node<K,V>[])new Node[n];
2291     table = nt;
2292 dl 1.27 sc = n - (n >>> 2);
2293     }
2294     } finally {
2295     sizeCtl = sc;
2296     }
2297     }
2298     }
2299     else if (c <= sc || n >= MAXIMUM_CAPACITY)
2300     break;
2301 dl 1.82 else if (tab == table &&
2302     U.compareAndSwapInt(this, SIZECTL, sc, -2))
2303     transfer(tab, null);
2304 dl 1.27 }
2305     }
2306    
2307 jsr166 1.92 /**
2308 dl 1.24 * Moves and/or copies the nodes in each bin to new table. See
2309     * above for explanation.
2310     */
2311 dl 1.102 private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
2312 dl 1.82 int n = tab.length, stride;
2313     if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
2314     stride = MIN_TRANSFER_STRIDE; // subdivide range
2315     if (nextTab == null) { // initiating
2316     try {
2317 dl 1.102 @SuppressWarnings({"rawtypes","unchecked"})
2318     Node<K,V>[] nt = (Node<K,V>[])new Node[n << 1];
2319     nextTab = nt;
2320 jsr166 1.83 } catch (Throwable ex) { // try to cope with OOME
2321 dl 1.82 sizeCtl = Integer.MAX_VALUE;
2322     return;
2323     }
2324     nextTable = nextTab;
2325     transferOrigin = n;
2326     transferIndex = n;
2327 dl 1.102 ForwardingNode<K,V> rev = new ForwardingNode<K,V>(tab);
2328 dl 1.82 for (int k = n; k > 0;) { // progressively reveal ready slots
2329 jsr166 1.83 int nextk = (k > stride) ? k - stride : 0;
2330 dl 1.82 for (int m = nextk; m < k; ++m)
2331     nextTab[m] = rev;
2332     for (int m = n + nextk; m < n + k; ++m)
2333     nextTab[m] = rev;
2334     U.putOrderedInt(this, TRANSFERORIGIN, k = nextk);
2335     }
2336     }
2337     int nextn = nextTab.length;
2338 dl 1.102 ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
2339 dl 1.82 boolean advance = true;
2340 dl 1.110 boolean finishing = false; // to ensure sweep before committing nextTab
2341 dl 1.82 for (int i = 0, bound = 0;;) {
2342 dl 1.102 int nextIndex, nextBound, fh; Node<K,V> f;
2343 dl 1.82 while (advance) {
2344 dl 1.110 if (--i >= bound || finishing)
2345 dl 1.82 advance = false;
2346     else if ((nextIndex = transferIndex) <= transferOrigin) {
2347     i = -1;
2348     advance = false;
2349     }
2350     else if (U.compareAndSwapInt
2351     (this, TRANSFERINDEX, nextIndex,
2352 jsr166 1.83 nextBound = (nextIndex > stride ?
2353 dl 1.82 nextIndex - stride : 0))) {
2354     bound = nextBound;
2355     i = nextIndex - 1;
2356     advance = false;
2357     }
2358     }
2359     if (i < 0 || i >= n || i + n >= nextn) {
2360 dl 1.110 if (finishing) {
2361     nextTable = null;
2362     table = nextTab;
2363     sizeCtl = (n << 1) - (n >>> 1);
2364     return;
2365     }
2366 dl 1.82 for (int sc;;) {
2367     if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, ++sc)) {
2368 dl 1.110 if (sc != -1)
2369     return;
2370     finishing = advance = true;
2371     i = n; // recheck before commit
2372     break;
2373 dl 1.82 }
2374     }
2375     }
2376     else if ((f = tabAt(tab, i)) == null) {
2377     if (casTabAt(tab, i, null, fwd)) {
2378 dl 1.24 setTabAt(nextTab, i, null);
2379     setTabAt(nextTab, i + n, null);
2380 dl 1.82 advance = true;
2381 dl 1.24 }
2382     }
2383 dl 1.102 else if ((fh = f.hash) == MOVED)
2384     advance = true; // already processed
2385     else {
2386 jsr166 1.83 synchronized (f) {
2387 dl 1.82 if (tabAt(tab, i) == f) {
2388 dl 1.102 Node<K,V> ln, hn;
2389     if (fh >= 0) {
2390     int runBit = fh & n;
2391     Node<K,V> lastRun = f;
2392     for (Node<K,V> p = f.next; p != null; p = p.next) {
2393     int b = p.hash & n;
2394     if (b != runBit) {
2395     runBit = b;
2396     lastRun = p;
2397     }
2398 dl 1.82 }
2399 dl 1.102 if (runBit == 0) {
2400     ln = lastRun;
2401     hn = null;
2402 dl 1.82 }
2403     else {
2404 dl 1.102 hn = lastRun;
2405     ln = null;
2406     }
2407     for (Node<K,V> p = f; p != lastRun; p = p.next) {
2408     int ph = p.hash; K pk = p.key; V pv = p.val;
2409     if ((ph & n) == 0)
2410     ln = new Node<K,V>(ph, pk, pv, ln);
2411     else
2412     hn = new Node<K,V>(ph, pk, pv, hn);
2413 dl 1.82 }
2414 dl 1.109 setTabAt(nextTab, i, ln);
2415     setTabAt(nextTab, i + n, hn);
2416     setTabAt(tab, i, fwd);
2417     advance = true;
2418 dl 1.82 }
2419 dl 1.102 else if (f instanceof TreeBin) {
2420     TreeBin<K,V> t = (TreeBin<K,V>)f;
2421     TreeNode<K,V> lo = null, loTail = null;
2422     TreeNode<K,V> hi = null, hiTail = null;
2423     int lc = 0, hc = 0;
2424     for (Node<K,V> e = t.first; e != null; e = e.next) {
2425     int h = e.hash;
2426     TreeNode<K,V> p = new TreeNode<K,V>
2427     (h, e.key, e.val, null, null);
2428     if ((h & n) == 0) {
2429     if ((p.prev = loTail) == null)
2430     lo = p;
2431     else
2432     loTail.next = p;
2433     loTail = p;
2434     ++lc;
2435     }
2436     else {
2437     if ((p.prev = hiTail) == null)
2438     hi = p;
2439     else
2440     hiTail.next = p;
2441     hiTail = p;
2442     ++hc;
2443     }
2444     }
2445 jsr166 1.104 ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
2446     (hc != 0) ? new TreeBin<K,V>(lo) : t;
2447     hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
2448     (lc != 0) ? new TreeBin<K,V>(hi) : t;
2449 dl 1.109 setTabAt(nextTab, i, ln);
2450     setTabAt(nextTab, i + n, hn);
2451     setTabAt(tab, i, fwd);
2452     advance = true;
2453 dl 1.82 }
2454 dl 1.24 }
2455     }
2456     }
2457     }
2458     }
2459    
2460 dl 1.102 /* ---------------- Conversion from/to TreeBins -------------- */
2461 dl 1.82
2462 dl 1.102 /**
2463     * Replaces all linked nodes in bin at given index unless table is
2464     * too small, in which case resizes instead.
2465     */
2466     private final void treeifyBin(Node<K,V>[] tab, int index) {
2467     Node<K,V> b; int n, sc;
2468     if (tab != null) {
2469     if ((n = tab.length) < MIN_TREEIFY_CAPACITY) {
2470     if (tab == table && (sc = sizeCtl) >= 0 &&
2471     U.compareAndSwapInt(this, SIZECTL, sc, -2))
2472     transfer(tab, null);
2473 dl 1.38 }
2474 dl 1.109 else if ((b = tabAt(tab, index)) != null && b.hash >= 0) {
2475 dl 1.102 synchronized (b) {
2476     if (tabAt(tab, index) == b) {
2477     TreeNode<K,V> hd = null, tl = null;
2478     for (Node<K,V> e = b; e != null; e = e.next) {
2479     TreeNode<K,V> p =
2480     new TreeNode<K,V>(e.hash, e.key, e.val,
2481     null, null);
2482     if ((p.prev = tl) == null)
2483     hd = p;
2484     else
2485     tl.next = p;
2486     tl = p;
2487 dl 1.38 }
2488 dl 1.102 setTabAt(tab, index, new TreeBin<K,V>(hd));
2489 dl 1.38 }
2490     }
2491 dl 1.82 }
2492 dl 1.27 }
2493     }
2494    
2495 dl 1.102 /**
2496 jsr166 1.105 * Returns a list on non-TreeNodes replacing those in given list.
2497 dl 1.102 */
2498     static <K,V> Node<K,V> untreeify(Node<K,V> b) {
2499     Node<K,V> hd = null, tl = null;
2500     for (Node<K,V> q = b; q != null; q = q.next) {
2501     Node<K,V> p = new Node<K,V>(q.hash, q.key, q.val, null);
2502     if (tl == null)
2503     hd = p;
2504     else
2505     tl.next = p;
2506     tl = p;
2507     }
2508     return hd;
2509     }
2510    
2511     /* ---------------- TreeNodes -------------- */
2512 dl 1.14
2513 dl 1.1 /**
2514 dl 1.102 * Nodes for use in TreeBins
2515 dl 1.14 */
2516 dl 1.102 static final class TreeNode<K,V> extends Node<K,V> {
2517     TreeNode<K,V> parent; // red-black tree links
2518     TreeNode<K,V> left;
2519     TreeNode<K,V> right;
2520     TreeNode<K,V> prev; // needed to unlink next upon deletion
2521     boolean red;
2522    
2523     TreeNode(int hash, K key, V val, Node<K,V> next,
2524     TreeNode<K,V> parent) {
2525     super(hash, key, val, next);
2526     this.parent = parent;
2527     }
2528    
2529     Node<K,V> find(int h, Object k) {
2530     return findTreeNode(h, k, null);
2531     }
2532    
2533     /**
2534     * Returns the TreeNode (or null if not found) for the given key
2535     * starting at given root.
2536     */
2537     final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) {
2538     if (k != null) {
2539     TreeNode<K,V> p = this;
2540     do {
2541     int ph, dir; K pk; TreeNode<K,V> q;
2542     TreeNode<K,V> pl = p.left, pr = p.right;
2543     if ((ph = p.hash) > h)
2544     p = pl;
2545     else if (ph < h)
2546     p = pr;
2547     else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2548     return p;
2549     else if (pl == null && pr == null)
2550     break;
2551 jsr166 1.103 else if ((kc != null ||
2552 dl 1.102 (kc = comparableClassFor(k)) != null) &&
2553     (dir = compareComparables(kc, k, pk)) != 0)
2554     p = (dir < 0) ? pl : pr;
2555     else if (pl == null)
2556     p = pr;
2557 jsr166 1.103 else if (pr == null ||
2558 dl 1.102 (q = pr.findTreeNode(h, k, kc)) == null)
2559     p = pl;
2560     else
2561     return q;
2562     } while (p != null);
2563     }
2564     return null;
2565     }
2566     }
2567    
2568     /* ---------------- TreeBins -------------- */
2569    
2570     /**
2571     * TreeNodes used at the heads of bins. TreeBins do not hold user
2572     * keys or values, but instead point to list of TreeNodes and
2573     * their root. They also maintain a parasitic read-write lock
2574     * forcing writers (who hold bin lock) to wait for readers (who do
2575     * not) to complete before tree restructuring operations.
2576     */
2577     static final class TreeBin<K,V> extends Node<K,V> {
2578     TreeNode<K,V> root;
2579     volatile TreeNode<K,V> first;
2580     volatile Thread waiter;
2581     volatile int lockState;
2582     // values for lockState
2583     static final int WRITER = 1; // set while holding write lock
2584     static final int WAITER = 2; // set when waiting for write lock
2585     static final int READER = 4; // increment value for setting read lock
2586    
2587     /**
2588     * Creates bin with initial set of nodes headed by b.
2589     */
2590     TreeBin(TreeNode<K,V> b) {
2591     super(TREEBIN, null, null, null);
2592     this.first = b;
2593     TreeNode<K,V> r = null;
2594     for (TreeNode<K,V> x = b, next; x != null; x = next) {
2595     next = (TreeNode<K,V>)x.next;
2596     x.left = x.right = null;
2597     if (r == null) {
2598     x.parent = null;
2599     x.red = false;
2600     r = x;
2601     }
2602     else {
2603     Object key = x.key;
2604     int hash = x.hash;
2605     Class<?> kc = null;
2606     for (TreeNode<K,V> p = r;;) {
2607     int dir, ph;
2608     if ((ph = p.hash) > hash)
2609     dir = -1;
2610     else if (ph < hash)
2611     dir = 1;
2612     else if ((kc != null ||
2613     (kc = comparableClassFor(key)) != null))
2614     dir = compareComparables(kc, key, p.key);
2615     else
2616     dir = 0;
2617     TreeNode<K,V> xp = p;
2618     if ((p = (dir <= 0) ? p.left : p.right) == null) {
2619     x.parent = xp;
2620     if (dir <= 0)
2621     xp.left = x;
2622     else
2623     xp.right = x;
2624     r = balanceInsertion(r, x);
2625     break;
2626     }
2627     }
2628     }
2629     }
2630     this.root = r;
2631     }
2632    
2633     /**
2634 jsr166 1.105 * Acquires write lock for tree restructuring.
2635 dl 1.102 */
2636     private final void lockRoot() {
2637     if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER))
2638     contendedLock(); // offload to separate method
2639     }
2640    
2641     /**
2642 jsr166 1.105 * Releases write lock for tree restructuring.
2643 dl 1.102 */
2644     private final void unlockRoot() {
2645     lockState = 0;
2646     }
2647 dl 1.14
2648 dl 1.102 /**
2649 jsr166 1.105 * Possibly blocks awaiting root lock.
2650 dl 1.102 */
2651     private final void contendedLock() {
2652     boolean waiting = false;
2653     for (int s;;) {
2654     if (((s = lockState) & WRITER) == 0) {
2655     if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) {
2656     if (waiting)
2657     waiter = null;
2658     return;
2659     }
2660     }
2661     else if ((s | WAITER) == 0) {
2662     if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) {
2663     waiting = true;
2664     waiter = Thread.currentThread();
2665     }
2666     }
2667     else if (waiting)
2668     LockSupport.park(this);
2669     }
2670 dl 1.14 }
2671    
2672 dl 1.102 /**
2673     * Returns matching node or null if none. Tries to search
2674 jsr166 1.108 * using tree comparisons from root, but continues linear
2675 dl 1.102 * search when lock not available.
2676     */
2677     final Node<K,V> find(int h, Object k) {
2678     if (k != null) {
2679     for (Node<K,V> e = first; e != null; e = e.next) {
2680     int s; K ek;
2681     if (((s = lockState) & (WAITER|WRITER)) != 0) {
2682     if (e.hash == h &&
2683     ((ek = e.key) == k || (ek != null && k.equals(ek))))
2684     return e;
2685     }
2686     else if (U.compareAndSwapInt(this, LOCKSTATE, s,
2687     s + READER)) {
2688     TreeNode<K,V> r, p;
2689     try {
2690     p = ((r = root) == null ? null :
2691     r.findTreeNode(h, k, null));
2692     } finally {
2693     Thread w;
2694     int ls;
2695     do {} while (!U.compareAndSwapInt
2696     (this, LOCKSTATE,
2697     ls = lockState, ls - READER));
2698     if (ls == (READER|WAITER) && (w = waiter) != null)
2699     LockSupport.unpark(w);
2700     }
2701     return p;
2702     }
2703     }
2704 dl 1.79 }
2705 dl 1.102 return null;
2706 dl 1.41 }
2707    
2708     /**
2709 dl 1.102 * Finds or adds a node.
2710     * @return null if added
2711 dl 1.41 */
2712 dl 1.102 final TreeNode<K,V> putTreeVal(int h, K k, V v) {
2713     Class<?> kc = null;
2714     for (TreeNode<K,V> p = root;;) {
2715     int dir, ph; K pk; TreeNode<K,V> q, pr;
2716     if (p == null) {
2717     first = root = new TreeNode<K,V>(h, k, v, null, null);
2718     break;
2719     }
2720     else if ((ph = p.hash) > h)
2721     dir = -1;
2722     else if (ph < h)
2723     dir = 1;
2724     else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2725     return p;
2726     else if ((kc == null &&
2727     (kc = comparableClassFor(k)) == null) ||
2728     (dir = compareComparables(kc, k, pk)) == 0) {
2729     if (p.left == null)
2730     dir = 1;
2731     else if ((pr = p.right) == null ||
2732     (q = pr.findTreeNode(h, k, kc)) == null)
2733     dir = -1;
2734     else
2735     return q;
2736     }
2737     TreeNode<K,V> xp = p;
2738     if ((p = (dir < 0) ? p.left : p.right) == null) {
2739     TreeNode<K,V> x, f = first;
2740     first = x = new TreeNode<K,V>(h, k, v, f, xp);
2741     if (f != null)
2742     f.prev = x;
2743     if (dir < 0)
2744     xp.left = x;
2745 dl 1.63 else
2746 dl 1.102 xp.right = x;
2747     if (!xp.red)
2748     x.red = true;
2749     else {
2750     lockRoot();
2751     try {
2752     root = balanceInsertion(root, x);
2753     } finally {
2754     unlockRoot();
2755 dl 1.38 }
2756 dl 1.102 }
2757     break;
2758 dl 1.1 }
2759 dl 1.102 }
2760     assert checkInvariants(root);
2761     return null;
2762 dl 1.1 }
2763 dl 1.41
2764 dl 1.102 /**
2765     * Removes the given node, that must be present before this
2766     * call. This is messier than typical red-black deletion code
2767     * because we cannot swap the contents of an interior node
2768     * with a leaf successor that is pinned by "next" pointers
2769     * that are accessible independently of lock. So instead we
2770     * swap the tree linkages.
2771     *
2772 jsr166 1.106 * @return true if now too small, so should be untreeified
2773 dl 1.102 */
2774     final boolean removeTreeNode(TreeNode<K,V> p) {
2775     TreeNode<K,V> next = (TreeNode<K,V>)p.next;
2776     TreeNode<K,V> pred = p.prev; // unlink traversal pointers
2777     TreeNode<K,V> r, rl;
2778     if (pred == null)
2779     first = next;
2780     else
2781     pred.next = next;
2782     if (next != null)
2783     next.prev = pred;
2784     if (first == null) {
2785     root = null;
2786     return true;
2787     }
2788     if ((r = root) == null || r.right == null || // too small
2789     (rl = r.left) == null || rl.left == null)
2790     return true;
2791     lockRoot();
2792     try {
2793     TreeNode<K,V> replacement;
2794     TreeNode<K,V> pl = p.left;
2795     TreeNode<K,V> pr = p.right;
2796     if (pl != null && pr != null) {
2797     TreeNode<K,V> s = pr, sl;
2798     while ((sl = s.left) != null) // find successor
2799     s = sl;
2800     boolean c = s.red; s.red = p.red; p.red = c; // swap colors
2801     TreeNode<K,V> sr = s.right;
2802     TreeNode<K,V> pp = p.parent;
2803     if (s == pr) { // p was s's direct parent
2804     p.parent = s;
2805     s.right = p;
2806     }
2807     else {
2808     TreeNode<K,V> sp = s.parent;
2809     if ((p.parent = sp) != null) {
2810     if (s == sp.left)
2811     sp.left = p;
2812     else
2813     sp.right = p;
2814     }
2815     if ((s.right = pr) != null)
2816     pr.parent = s;
2817     }
2818     p.left = null;
2819     if ((p.right = sr) != null)
2820     sr.parent = p;
2821     if ((s.left = pl) != null)
2822     pl.parent = s;
2823     if ((s.parent = pp) == null)
2824     r = s;
2825     else if (p == pp.left)
2826     pp.left = s;
2827     else
2828     pp.right = s;
2829     if (sr != null)
2830     replacement = sr;
2831     else
2832     replacement = p;
2833     }
2834     else if (pl != null)
2835     replacement = pl;
2836     else if (pr != null)
2837     replacement = pr;
2838     else
2839     replacement = p;
2840     if (replacement != p) {
2841     TreeNode<K,V> pp = replacement.parent = p.parent;
2842     if (pp == null)
2843     r = replacement;
2844     else if (p == pp.left)
2845     pp.left = replacement;
2846     else
2847     pp.right = replacement;
2848     p.left = p.right = p.parent = null;
2849     }
2850    
2851     root = (p.red) ? r : balanceDeletion(r, replacement);
2852    
2853     if (p == replacement) { // detach pointers
2854     TreeNode<K,V> pp;
2855     if ((pp = p.parent) != null) {
2856     if (p == pp.left)
2857     pp.left = null;
2858     else if (p == pp.right)
2859     pp.right = null;
2860     p.parent = null;
2861     }
2862     }
2863     } finally {
2864     unlockRoot();
2865     }
2866     assert checkInvariants(root);
2867     return false;
2868 dl 1.41 }
2869    
2870 dl 1.102 /* ------------------------------------------------------------ */
2871     // Red-black tree methods, all adapted from CLR
2872    
2873     static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
2874     TreeNode<K,V> p) {
2875     TreeNode<K,V> r, pp, rl;
2876     if (p != null && (r = p.right) != null) {
2877     if ((rl = p.right = r.left) != null)
2878     rl.parent = p;
2879     if ((pp = r.parent = p.parent) == null)
2880     (root = r).red = false;
2881     else if (pp.left == p)
2882     pp.left = r;
2883     else
2884     pp.right = r;
2885     r.left = p;
2886     p.parent = r;
2887     }
2888     return root;
2889 dl 1.41 }
2890    
2891 dl 1.102 static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
2892     TreeNode<K,V> p) {
2893     TreeNode<K,V> l, pp, lr;
2894     if (p != null && (l = p.left) != null) {
2895     if ((lr = p.left = l.right) != null)
2896     lr.parent = p;
2897     if ((pp = l.parent = p.parent) == null)
2898     (root = l).red = false;
2899     else if (pp.right == p)
2900     pp.right = l;
2901     else
2902     pp.left = l;
2903     l.right = p;
2904     p.parent = l;
2905     }
2906     return root;
2907     }
2908 dl 1.79
2909 dl 1.102 static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
2910     TreeNode<K,V> x) {
2911     x.red = true;
2912     for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
2913     if ((xp = x.parent) == null) {
2914     x.red = false;
2915     return x;
2916     }
2917     else if (!xp.red || (xpp = xp.parent) == null)
2918     return root;
2919     if (xp == (xppl = xpp.left)) {
2920     if ((xppr = xpp.right) != null && xppr.red) {
2921     xppr.red = false;
2922     xp.red = false;
2923     xpp.red = true;
2924     x = xpp;
2925     }
2926     else {
2927     if (x == xp.right) {
2928     root = rotateLeft(root, x = xp);
2929     xpp = (xp = x.parent) == null ? null : xp.parent;
2930     }
2931     if (xp != null) {
2932     xp.red = false;
2933     if (xpp != null) {
2934     xpp.red = true;
2935     root = rotateRight(root, xpp);
2936     }
2937     }
2938     }
2939     }
2940     else {
2941     if (xppl != null && xppl.red) {
2942     xppl.red = false;
2943     xp.red = false;
2944     xpp.red = true;
2945     x = xpp;
2946     }
2947     else {
2948     if (x == xp.left) {
2949     root = rotateRight(root, x = xp);
2950     xpp = (xp = x.parent) == null ? null : xp.parent;
2951     }
2952     if (xp != null) {
2953     xp.red = false;
2954     if (xpp != null) {
2955     xpp.red = true;
2956     root = rotateLeft(root, xpp);
2957     }
2958     }
2959     }
2960     }
2961     }
2962     }
2963 dl 1.79
2964 dl 1.102 static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
2965     TreeNode<K,V> x) {
2966     for (TreeNode<K,V> xp, xpl, xpr;;) {
2967     if (x == null || x == root)
2968     return root;
2969     else if ((xp = x.parent) == null) {
2970     x.red = false;
2971     return x;
2972     }
2973     else if (x.red) {
2974     x.red = false;
2975     return root;
2976     }
2977     else if ((xpl = xp.left) == x) {
2978     if ((xpr = xp.right) != null && xpr.red) {
2979     xpr.red = false;
2980     xp.red = true;
2981     root = rotateLeft(root, xp);
2982     xpr = (xp = x.parent) == null ? null : xp.right;
2983     }
2984     if (xpr == null)
2985     x = xp;
2986     else {
2987     TreeNode<K,V> sl = xpr.left, sr = xpr.right;
2988     if ((sr == null || !sr.red) &&
2989     (sl == null || !sl.red)) {
2990     xpr.red = true;
2991     x = xp;
2992     }
2993     else {
2994     if (sr == null || !sr.red) {
2995     if (sl != null)
2996     sl.red = false;
2997     xpr.red = true;
2998     root = rotateRight(root, xpr);
2999     xpr = (xp = x.parent) == null ?
3000     null : xp.right;
3001     }
3002     if (xpr != null) {
3003     xpr.red = (xp == null) ? false : xp.red;
3004     if ((sr = xpr.right) != null)
3005     sr.red = false;
3006     }
3007     if (xp != null) {
3008     xp.red = false;
3009     root = rotateLeft(root, xp);
3010     }
3011     x = root;
3012     }
3013     }
3014     }
3015     else { // symmetric
3016     if (xpl != null && xpl.red) {
3017     xpl.red = false;
3018     xp.red = true;
3019     root = rotateRight(root, xp);
3020     xpl = (xp = x.parent) == null ? null : xp.left;
3021     }
3022     if (xpl == null)
3023     x = xp;
3024     else {
3025     TreeNode<K,V> sl = xpl.left, sr = xpl.right;
3026     if ((sl == null || !sl.red) &&
3027     (sr == null || !sr.red)) {
3028     xpl.red = true;
3029     x = xp;
3030     }
3031     else {
3032     if (sl == null || !sl.red) {
3033     if (sr != null)
3034     sr.red = false;
3035     xpl.red = true;
3036     root = rotateLeft(root, xpl);
3037     xpl = (xp = x.parent) == null ?
3038     null : xp.left;
3039     }
3040     if (xpl != null) {
3041     xpl.red = (xp == null) ? false : xp.red;
3042     if ((sl = xpl.left) != null)
3043     sl.red = false;
3044     }
3045     if (xp != null) {
3046     xp.red = false;
3047     root = rotateRight(root, xp);
3048     }
3049     x = root;
3050     }
3051     }
3052     }
3053     }
3054     }
3055 jsr166 1.103
3056 dl 1.79 /**
3057 dl 1.102 * Recursive invariant check
3058 dl 1.79 */
3059 dl 1.102 static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
3060     TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
3061     tb = t.prev, tn = (TreeNode<K,V>)t.next;
3062     if (tb != null && tb.next != t)
3063     return false;
3064     if (tn != null && tn.prev != t)
3065     return false;
3066     if (tp != null && t != tp.left && t != tp.right)
3067     return false;
3068     if (tl != null && (tl.parent != t || tl.hash > t.hash))
3069     return false;
3070     if (tr != null && (tr.parent != t || tr.hash < t.hash))
3071     return false;
3072     if (t.red && tl != null && tl.red && tr != null && tr.red)
3073     return false;
3074     if (tl != null && !checkInvariants(tl))
3075     return false;
3076     if (tr != null && !checkInvariants(tr))
3077     return false;
3078     return true;
3079 dl 1.79 }
3080    
3081 dl 1.102 private static final sun.misc.Unsafe U;
3082     private static final long LOCKSTATE;
3083     static {
3084     try {
3085     U = getUnsafe();
3086     Class<?> k = TreeBin.class;
3087     LOCKSTATE = U.objectFieldOffset
3088     (k.getDeclaredField("lockState"));
3089     } catch (Exception e) {
3090     throw new Error(e);
3091     }
3092     }
3093 dl 1.1 }
3094    
3095 dl 1.102 /* ----------------Table Traversal -------------- */
3096 dl 1.1
3097     /**
3098 dl 1.102 * Encapsulates traversal for methods such as containsValue; also
3099     * serves as a base class for other iterators and spliterators.
3100     *
3101     * Method advance visits once each still-valid node that was
3102     * reachable upon iterator construction. It might miss some that
3103     * were added to a bin after the bin was visited, which is OK wrt
3104     * consistency guarantees. Maintaining this property in the face
3105     * of possible ongoing resizes requires a fair amount of
3106     * bookkeeping state that is difficult to optimize away amidst
3107     * volatile accesses. Even so, traversal maintains reasonable
3108     * throughput.
3109 dl 1.1 *
3110 dl 1.102 * Normally, iteration proceeds bin-by-bin traversing lists.
3111     * However, if the table has been resized, then all future steps
3112     * must traverse both the bin at the current index as well as at
3113     * (index + baseSize); and so on for further resizings. To
3114     * paranoically cope with potential sharing by users of iterators
3115     * across threads, iteration terminates if a bounds checks fails
3116     * for a table read.
3117 dl 1.1 */
3118 dl 1.102 static class Traverser<K,V> {
3119     Node<K,V>[] tab; // current table; updated if resized
3120     Node<K,V> next; // the next entry to use
3121     int index; // index of bin to use next
3122     int baseIndex; // current index of initial table
3123     int baseLimit; // index bound for initial table
3124     final int baseSize; // initial table size
3125    
3126     Traverser(Node<K,V>[] tab, int size, int index, int limit) {
3127     this.tab = tab;
3128     this.baseSize = size;
3129     this.baseIndex = this.index = index;
3130     this.baseLimit = limit;
3131     this.next = null;
3132     }
3133 dl 1.1
3134 dl 1.102 /**
3135     * Advances if possible, returning next valid node, or null if none.
3136     */
3137     final Node<K,V> advance() {
3138     Node<K,V> e;
3139     if ((e = next) != null)
3140     e = e.next;
3141     for (;;) {
3142     Node<K,V>[] t; int i, n; K ek; // must use locals in checks
3143     if (e != null)
3144     return next = e;
3145     if (baseIndex >= baseLimit || (t = tab) == null ||
3146     (n = t.length) <= (i = index) || i < 0)
3147     return next = null;
3148     if ((e = tabAt(t, index)) != null && e.hash < 0) {
3149     if (e instanceof ForwardingNode) {
3150     tab = ((ForwardingNode<K,V>)e).nextTable;
3151     e = null;
3152     continue;
3153 dl 1.75 }
3154 dl 1.102 else if (e instanceof TreeBin)
3155     e = ((TreeBin<K,V>)e).first;
3156     else
3157     e = null;
3158 dl 1.75 }
3159 dl 1.102 if ((index += baseSize) >= n)
3160     index = ++baseIndex; // visit upper slots if present
3161 dl 1.24 }
3162     }
3163 dl 1.75 }
3164    
3165     /**
3166 dl 1.102 * Base of key, value, and entry Iterators. Adds fields to
3167 jsr166 1.105 * Traverser to support iterator.remove.
3168 dl 1.75 */
3169 dl 1.102 static class BaseIterator<K,V> extends Traverser<K,V> {
3170     final ConcurrentHashMapV8<K,V> map;
3171     Node<K,V> lastReturned;
3172     BaseIterator(Node<K,V>[] tab, int size, int index, int limit,
3173     ConcurrentHashMapV8<K,V> map) {
3174     super(tab, size, index, limit);
3175     this.map = map;
3176     advance();
3177     }
3178    
3179     public final boolean hasNext() { return next != null; }
3180     public final boolean hasMoreElements() { return next != null; }
3181    
3182     public final void remove() {
3183     Node<K,V> p;
3184     if ((p = lastReturned) == null)
3185     throw new IllegalStateException();
3186     lastReturned = null;
3187     map.replaceNode(p.key, null, null);
3188     }
3189 dl 1.75 }
3190    
3191 dl 1.102 static final class KeyIterator<K,V> extends BaseIterator<K,V>
3192     implements Iterator<K>, Enumeration<K> {
3193     KeyIterator(Node<K,V>[] tab, int index, int size, int limit,
3194     ConcurrentHashMapV8<K,V> map) {
3195     super(tab, index, size, limit, map);
3196     }
3197    
3198     public final K next() {
3199     Node<K,V> p;
3200     if ((p = next) == null)
3201     throw new NoSuchElementException();
3202     K k = p.key;
3203     lastReturned = p;
3204     advance();
3205     return k;
3206     }
3207    
3208     public final K nextElement() { return next(); }
3209 dl 1.75 }
3210    
3211 dl 1.102 static final class ValueIterator<K,V> extends BaseIterator<K,V>
3212     implements Iterator<V>, Enumeration<V> {
3213     ValueIterator(Node<K,V>[] tab, int index, int size, int limit,
3214     ConcurrentHashMapV8<K,V> map) {
3215     super(tab, index, size, limit, map);
3216     }
3217    
3218     public final V next() {
3219     Node<K,V> p;
3220     if ((p = next) == null)
3221     throw new NoSuchElementException();
3222     V v = p.val;
3223     lastReturned = p;
3224     advance();
3225     return v;
3226 dl 1.84 }
3227 dl 1.102
3228     public final V nextElement() { return next(); }
3229 dl 1.75 }
3230    
3231 dl 1.102 static final class EntryIterator<K,V> extends BaseIterator<K,V>
3232     implements Iterator<Map.Entry<K,V>> {
3233     EntryIterator(Node<K,V>[] tab, int index, int size, int limit,
3234     ConcurrentHashMapV8<K,V> map) {
3235     super(tab, index, size, limit, map);
3236     }
3237    
3238     public final Map.Entry<K,V> next() {
3239     Node<K,V> p;
3240     if ((p = next) == null)
3241     throw new NoSuchElementException();
3242     K k = p.key;
3243     V v = p.val;
3244     lastReturned = p;
3245     advance();
3246     return new MapEntry<K,V>(k, v, map);
3247 dl 1.84 }
3248 dl 1.75 }
3249    
3250     /**
3251 dl 1.102 * Exported Entry for EntryIterator
3252 dl 1.75 */
3253 dl 1.102 static final class MapEntry<K,V> implements Map.Entry<K,V> {
3254     final K key; // non-null
3255     V val; // non-null
3256     final ConcurrentHashMapV8<K,V> map;
3257     MapEntry(K key, V val, ConcurrentHashMapV8<K,V> map) {
3258     this.key = key;
3259     this.val = val;
3260     this.map = map;
3261     }
3262     public K getKey() { return key; }
3263     public V getValue() { return val; }
3264     public int hashCode() { return key.hashCode() ^ val.hashCode(); }
3265     public String toString() { return key + "=" + val; }
3266    
3267     public boolean equals(Object o) {
3268     Object k, v; Map.Entry<?,?> e;
3269     return ((o instanceof Map.Entry) &&
3270     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3271     (v = e.getValue()) != null &&
3272     (k == key || k.equals(key)) &&
3273     (v == val || v.equals(val)));
3274     }
3275    
3276     /**
3277     * Sets our entry's value and writes through to the map. The
3278     * value to return is somewhat arbitrary here. Since we do not
3279     * necessarily track asynchronous changes, the most recent
3280     * "previous" value could be different from what we return (or
3281     * could even have been removed, in which case the put will
3282     * re-establish). We do not and cannot guarantee more.
3283     */
3284     public V setValue(V value) {
3285     if (value == null) throw new NullPointerException();
3286     V v = val;
3287     val = value;
3288     map.put(key, value);
3289     return v;
3290 dl 1.84 }
3291 dl 1.75 }
3292    
3293 dl 1.102 static final class KeySpliterator<K,V> extends Traverser<K,V>
3294     implements ConcurrentHashMapSpliterator<K> {
3295     long est; // size estimate
3296     KeySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3297     long est) {
3298     super(tab, size, index, limit);
3299     this.est = est;
3300     }
3301    
3302     public ConcurrentHashMapSpliterator<K> trySplit() {
3303     int i, f, h;
3304     return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3305     new KeySpliterator<K,V>(tab, baseSize, baseLimit = h,
3306     f, est >>>= 1);
3307     }
3308    
3309     public void forEachRemaining(Action<? super K> action) {
3310     if (action == null) throw new NullPointerException();
3311     for (Node<K,V> p; (p = advance()) != null;)
3312     action.apply(p.key);
3313     }
3314    
3315     public boolean tryAdvance(Action<? super K> action) {
3316     if (action == null) throw new NullPointerException();
3317     Node<K,V> p;
3318     if ((p = advance()) == null)
3319     return false;
3320     action.apply(p.key);
3321     return true;
3322 dl 1.84 }
3323 dl 1.102
3324     public long estimateSize() { return est; }
3325    
3326 dl 1.75 }
3327    
3328 dl 1.102 static final class ValueSpliterator<K,V> extends Traverser<K,V>
3329     implements ConcurrentHashMapSpliterator<V> {
3330     long est; // size estimate
3331     ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit,
3332     long est) {
3333     super(tab, size, index, limit);
3334     this.est = est;
3335     }
3336    
3337     public ConcurrentHashMapSpliterator<V> trySplit() {
3338     int i, f, h;
3339     return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3340     new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h,
3341     f, est >>>= 1);
3342     }
3343    
3344     public void forEachRemaining(Action<? super V> action) {
3345     if (action == null) throw new NullPointerException();
3346     for (Node<K,V> p; (p = advance()) != null;)
3347     action.apply(p.val);
3348     }
3349    
3350     public boolean tryAdvance(Action<? super V> action) {
3351     if (action == null) throw new NullPointerException();
3352     Node<K,V> p;
3353     if ((p = advance()) == null)
3354     return false;
3355     action.apply(p.val);
3356     return true;
3357     }
3358    
3359     public long estimateSize() { return est; }
3360    
3361 dl 1.75 }
3362    
3363 dl 1.102 static final class EntrySpliterator<K,V> extends Traverser<K,V>
3364     implements ConcurrentHashMapSpliterator<Map.Entry<K,V>> {
3365     final ConcurrentHashMapV8<K,V> map; // To export MapEntry
3366     long est; // size estimate
3367     EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3368     long est, ConcurrentHashMapV8<K,V> map) {
3369     super(tab, size, index, limit);
3370     this.map = map;
3371     this.est = est;
3372     }
3373    
3374     public ConcurrentHashMapSpliterator<Map.Entry<K,V>> trySplit() {
3375     int i, f, h;
3376     return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3377     new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h,
3378     f, est >>>= 1, map);
3379     }
3380    
3381     public void forEachRemaining(Action<? super Map.Entry<K,V>> action) {
3382     if (action == null) throw new NullPointerException();
3383     for (Node<K,V> p; (p = advance()) != null; )
3384     action.apply(new MapEntry<K,V>(p.key, p.val, map));
3385     }
3386    
3387     public boolean tryAdvance(Action<? super Map.Entry<K,V>> action) {
3388     if (action == null) throw new NullPointerException();
3389     Node<K,V> p;
3390     if ((p = advance()) == null)
3391     return false;
3392     action.apply(new MapEntry<K,V>(p.key, p.val, map));
3393     return true;
3394     }
3395    
3396     public long estimateSize() { return est; }
3397    
3398 dl 1.75 }
3399    
3400 dl 1.102 // Parallel bulk operations
3401    
3402 dl 1.75 /**
3403 dl 1.102 * Computes initial batch value for bulk tasks. The returned value
3404     * is approximately exp2 of the number of times (minus one) to
3405     * split task by two before executing leaf action. This value is
3406     * faster to compute and more convenient to use as a guide to
3407     * splitting than is the depth, since it is used while dividing by
3408     * two anyway.
3409     */
3410     final int batchFor(long b) {
3411     long n;
3412     if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b)
3413     return 0;
3414     int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4
3415     return (b <= 0L || (n /= b) >= sp) ? sp : (int)n;
3416 dl 1.75 }
3417    
3418     /**
3419 dl 1.84 * Performs the given action for each (key, value).
3420     *
3421 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3422     * needed for this operation to be executed in parallel
3423 dl 1.84 * @param action the action
3424 dl 1.102 * @since 1.8
3425 dl 1.75 */
3426 dl 1.102 public void forEach(long parallelismThreshold,
3427     BiAction<? super K,? super V> action) {
3428     if (action == null) throw new NullPointerException();
3429     new ForEachMappingTask<K,V>
3430     (null, batchFor(parallelismThreshold), 0, 0, table,
3431     action).invoke();
3432 dl 1.84 }
3433 dl 1.75
3434 dl 1.84 /**
3435     * Performs the given action for each non-null transformation
3436     * of each (key, value).
3437     *
3438 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3439     * needed for this operation to be executed in parallel
3440 dl 1.84 * @param transformer a function returning the transformation
3441 jsr166 1.91 * for an element, or null if there is no transformation (in
3442 jsr166 1.93 * which case the action is not applied)
3443 dl 1.84 * @param action the action
3444 dl 1.102 * @since 1.8
3445 dl 1.84 */
3446 dl 1.102 public <U> void forEach(long parallelismThreshold,
3447     BiFun<? super K, ? super V, ? extends U> transformer,
3448     Action<? super U> action) {
3449     if (transformer == null || action == null)
3450     throw new NullPointerException();
3451     new ForEachTransformedMappingTask<K,V,U>
3452     (null, batchFor(parallelismThreshold), 0, 0, table,
3453     transformer, action).invoke();
3454 dl 1.84 }
3455 dl 1.75
3456 dl 1.84 /**
3457     * Returns a non-null result from applying the given search
3458     * function on each (key, value), or null if none. Upon
3459     * success, further element processing is suppressed and the
3460     * results of any other parallel invocations of the search
3461     * function are ignored.
3462     *
3463 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3464     * needed for this operation to be executed in parallel
3465 dl 1.84 * @param searchFunction a function returning a non-null
3466     * result on success, else null
3467     * @return a non-null result from applying the given search
3468     * function on each (key, value), or null if none
3469 dl 1.102 * @since 1.8
3470 dl 1.84 */
3471 dl 1.102 public <U> U search(long parallelismThreshold,
3472     BiFun<? super K, ? super V, ? extends U> searchFunction) {
3473     if (searchFunction == null) throw new NullPointerException();
3474     return new SearchMappingsTask<K,V,U>
3475     (null, batchFor(parallelismThreshold), 0, 0, table,
3476     searchFunction, new AtomicReference<U>()).invoke();
3477 dl 1.84 }
3478 dl 1.75
3479 dl 1.84 /**
3480     * Returns the result of accumulating the given transformation
3481     * of all (key, value) pairs using the given reducer to
3482     * combine values, or null if none.
3483     *
3484 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3485     * needed for this operation to be executed in parallel
3486 dl 1.84 * @param transformer a function returning the transformation
3487 jsr166 1.91 * for an element, or null if there is no transformation (in
3488 jsr166 1.93 * which case it is not combined)
3489 dl 1.84 * @param reducer a commutative associative combining function
3490     * @return the result of accumulating the given transformation
3491     * of all (key, value) pairs
3492 dl 1.102 * @since 1.8
3493 dl 1.84 */
3494 dl 1.102 public <U> U reduce(long parallelismThreshold,
3495     BiFun<? super K, ? super V, ? extends U> transformer,
3496     BiFun<? super U, ? super U, ? extends U> reducer) {
3497     if (transformer == null || reducer == null)
3498     throw new NullPointerException();
3499     return new MapReduceMappingsTask<K,V,U>
3500     (null, batchFor(parallelismThreshold), 0, 0, table,
3501     null, transformer, reducer).invoke();
3502 dl 1.84 }
3503 dl 1.75
3504 dl 1.84 /**
3505     * Returns the result of accumulating the given transformation
3506     * of all (key, value) pairs using the given reducer to
3507     * combine values, and the given basis as an identity value.
3508     *
3509 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3510     * needed for this operation to be executed in parallel
3511 dl 1.84 * @param transformer a function returning the transformation
3512     * for an element
3513     * @param basis the identity (initial default value) for the reduction
3514     * @param reducer a commutative associative combining function
3515     * @return the result of accumulating the given transformation
3516     * of all (key, value) pairs
3517 dl 1.102 * @since 1.8
3518 dl 1.84 */
3519 dl 1.107 public double reduceToDouble(long parallelismThreshold,
3520     ObjectByObjectToDouble<? super K, ? super V> transformer,
3521     double basis,
3522     DoubleByDoubleToDouble reducer) {
3523 dl 1.102 if (transformer == null || reducer == null)
3524     throw new NullPointerException();
3525     return new MapReduceMappingsToDoubleTask<K,V>
3526     (null, batchFor(parallelismThreshold), 0, 0, table,
3527     null, transformer, basis, reducer).invoke();
3528 dl 1.84 }
3529    
3530     /**
3531     * Returns the result of accumulating the given transformation
3532     * of all (key, value) pairs using the given reducer to
3533     * combine values, and the given basis as an identity value.
3534     *
3535 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3536     * needed for this operation to be executed in parallel
3537 dl 1.84 * @param transformer a function returning the transformation
3538     * for an element
3539     * @param basis the identity (initial default value) for the reduction
3540     * @param reducer a commutative associative combining function
3541     * @return the result of accumulating the given transformation
3542     * of all (key, value) pairs
3543 dl 1.102 * @since 1.8
3544 dl 1.84 */
3545 dl 1.102 public long reduceToLong(long parallelismThreshold,
3546     ObjectByObjectToLong<? super K, ? super V> transformer,
3547     long basis,
3548     LongByLongToLong reducer) {
3549     if (transformer == null || reducer == null)
3550     throw new NullPointerException();
3551     return new MapReduceMappingsToLongTask<K,V>
3552     (null, batchFor(parallelismThreshold), 0, 0, table,
3553     null, transformer, basis, reducer).invoke();
3554 dl 1.84 }
3555    
3556     /**
3557     * Returns the result of accumulating the given transformation
3558     * of all (key, value) pairs using the given reducer to
3559     * combine values, and the given basis as an identity value.
3560     *
3561 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3562     * needed for this operation to be executed in parallel
3563 dl 1.84 * @param transformer a function returning the transformation
3564     * for an element
3565     * @param basis the identity (initial default value) for the reduction
3566     * @param reducer a commutative associative combining function
3567     * @return the result of accumulating the given transformation
3568     * of all (key, value) pairs
3569 dl 1.102 * @since 1.8
3570 dl 1.84 */
3571 dl 1.102 public int reduceToInt(long parallelismThreshold,
3572     ObjectByObjectToInt<? super K, ? super V> transformer,
3573     int basis,
3574     IntByIntToInt reducer) {
3575     if (transformer == null || reducer == null)
3576     throw new NullPointerException();
3577     return new MapReduceMappingsToIntTask<K,V>
3578     (null, batchFor(parallelismThreshold), 0, 0, table,
3579     null, transformer, basis, reducer).invoke();
3580 dl 1.84 }
3581    
3582     /**
3583     * Performs the given action for each key.
3584     *
3585 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3586     * needed for this operation to be executed in parallel
3587 dl 1.84 * @param action the action
3588 dl 1.102 * @since 1.8
3589 dl 1.84 */
3590 dl 1.102 public void forEachKey(long parallelismThreshold,
3591     Action<? super K> action) {
3592     if (action == null) throw new NullPointerException();
3593     new ForEachKeyTask<K,V>
3594     (null, batchFor(parallelismThreshold), 0, 0, table,
3595     action).invoke();
3596 dl 1.84 }
3597    
3598     /**
3599     * Performs the given action for each non-null transformation
3600     * of each key.
3601     *
3602 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3603     * needed for this operation to be executed in parallel
3604 dl 1.84 * @param transformer a function returning the transformation
3605 jsr166 1.91 * for an element, or null if there is no transformation (in
3606 jsr166 1.93 * which case the action is not applied)
3607 dl 1.84 * @param action the action
3608 dl 1.102 * @since 1.8
3609 dl 1.84 */
3610 dl 1.102 public <U> void forEachKey(long parallelismThreshold,
3611     Fun<? super K, ? extends U> transformer,
3612     Action<? super U> action) {
3613     if (transformer == null || action == null)
3614     throw new NullPointerException();
3615     new ForEachTransformedKeyTask<K,V,U>
3616     (null, batchFor(parallelismThreshold), 0, 0, table,
3617     transformer, action).invoke();
3618 dl 1.84 }
3619    
3620     /**
3621     * Returns a non-null result from applying the given search
3622     * function on each key, or null if none. Upon success,
3623     * further element processing is suppressed and the results of
3624     * any other parallel invocations of the search function are
3625     * ignored.
3626     *
3627 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3628     * needed for this operation to be executed in parallel
3629 dl 1.84 * @param searchFunction a function returning a non-null
3630     * result on success, else null
3631     * @return a non-null result from applying the given search
3632     * function on each key, or null if none
3633 dl 1.102 * @since 1.8
3634 dl 1.84 */
3635 dl 1.102 public <U> U searchKeys(long parallelismThreshold,
3636     Fun<? super K, ? extends U> searchFunction) {
3637     if (searchFunction == null) throw new NullPointerException();
3638     return new SearchKeysTask<K,V,U>
3639     (null, batchFor(parallelismThreshold), 0, 0, table,
3640     searchFunction, new AtomicReference<U>()).invoke();
3641 dl 1.84 }
3642    
3643     /**
3644     * Returns the result of accumulating all keys using the given
3645     * reducer to combine values, or null if none.
3646     *
3647 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3648     * needed for this operation to be executed in parallel
3649 dl 1.84 * @param reducer a commutative associative combining function
3650     * @return the result of accumulating all keys using the given
3651     * reducer to combine values, or null if none
3652 dl 1.102 * @since 1.8
3653 dl 1.84 */
3654 dl 1.102 public K reduceKeys(long parallelismThreshold,
3655     BiFun<? super K, ? super K, ? extends K> reducer) {
3656     if (reducer == null) throw new NullPointerException();
3657     return new ReduceKeysTask<K,V>
3658     (null, batchFor(parallelismThreshold), 0, 0, table,
3659     null, reducer).invoke();
3660 dl 1.84 }
3661    
3662     /**
3663     * Returns the result of accumulating the given transformation
3664     * of all keys using the given reducer to combine values, or
3665     * null if none.
3666     *
3667 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3668     * needed for this operation to be executed in parallel
3669 dl 1.84 * @param transformer a function returning the transformation
3670 jsr166 1.91 * for an element, or null if there is no transformation (in
3671 jsr166 1.93 * which case it is not combined)
3672 dl 1.84 * @param reducer a commutative associative combining function
3673     * @return the result of accumulating the given transformation
3674     * of all keys
3675 dl 1.102 * @since 1.8
3676 dl 1.84 */
3677 dl 1.102 public <U> U reduceKeys(long parallelismThreshold,
3678     Fun<? super K, ? extends U> transformer,
3679 dl 1.84 BiFun<? super U, ? super U, ? extends U> reducer) {
3680 dl 1.102 if (transformer == null || reducer == null)
3681     throw new NullPointerException();
3682     return new MapReduceKeysTask<K,V,U>
3683     (null, batchFor(parallelismThreshold), 0, 0, table,
3684     null, transformer, reducer).invoke();
3685 dl 1.84 }
3686    
3687     /**
3688     * Returns the result of accumulating the given transformation
3689     * of all keys using the given reducer to combine values, and
3690     * the given basis as an identity value.
3691     *
3692 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3693     * needed for this operation to be executed in parallel
3694 dl 1.84 * @param transformer a function returning the transformation
3695     * for an element
3696     * @param basis the identity (initial default value) for the reduction
3697     * @param reducer a commutative associative combining function
3698 dl 1.102 * @return the result of accumulating the given transformation
3699 dl 1.84 * of all keys
3700 dl 1.102 * @since 1.8
3701 dl 1.84 */
3702 dl 1.102 public double reduceKeysToDouble(long parallelismThreshold,
3703     ObjectToDouble<? super K> transformer,
3704     double basis,
3705     DoubleByDoubleToDouble reducer) {
3706     if (transformer == null || reducer == null)
3707     throw new NullPointerException();
3708     return new MapReduceKeysToDoubleTask<K,V>
3709     (null, batchFor(parallelismThreshold), 0, 0, table,
3710     null, transformer, basis, reducer).invoke();
3711 dl 1.84 }
3712    
3713     /**
3714     * Returns the result of accumulating the given transformation
3715     * of all keys using the given reducer to combine values, and
3716     * the given basis as an identity value.
3717     *
3718 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3719     * needed for this operation to be executed in parallel
3720 dl 1.84 * @param transformer a function returning the transformation
3721     * for an element
3722     * @param basis the identity (initial default value) for the reduction
3723     * @param reducer a commutative associative combining function
3724     * @return the result of accumulating the given transformation
3725     * of all keys
3726 dl 1.102 * @since 1.8
3727 dl 1.84 */
3728 dl 1.102 public long reduceKeysToLong(long parallelismThreshold,
3729     ObjectToLong<? super K> transformer,
3730     long basis,
3731     LongByLongToLong reducer) {
3732     if (transformer == null || reducer == null)
3733     throw new NullPointerException();
3734     return new MapReduceKeysToLongTask<K,V>
3735     (null, batchFor(parallelismThreshold), 0, 0, table,
3736     null, transformer, basis, reducer).invoke();
3737 dl 1.84 }
3738    
3739     /**
3740     * Returns the result of accumulating the given transformation
3741     * of all keys using the given reducer to combine values, and
3742     * the given basis as an identity value.
3743     *
3744 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3745     * needed for this operation to be executed in parallel
3746 dl 1.84 * @param transformer a function returning the transformation
3747     * for an element
3748     * @param basis the identity (initial default value) for the reduction
3749     * @param reducer a commutative associative combining function
3750     * @return the result of accumulating the given transformation
3751     * of all keys
3752 dl 1.102 * @since 1.8
3753 dl 1.84 */
3754 dl 1.102 public int reduceKeysToInt(long parallelismThreshold,
3755     ObjectToInt<? super K> transformer,
3756     int basis,
3757     IntByIntToInt reducer) {
3758     if (transformer == null || reducer == null)
3759     throw new NullPointerException();
3760     return new MapReduceKeysToIntTask<K,V>
3761     (null, batchFor(parallelismThreshold), 0, 0, table,
3762     null, transformer, basis, reducer).invoke();
3763 dl 1.84 }
3764    
3765     /**
3766     * Performs the given action for each value.
3767     *
3768 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3769     * needed for this operation to be executed in parallel
3770 dl 1.84 * @param action the action
3771 dl 1.102 * @since 1.8
3772 dl 1.84 */
3773 dl 1.102 public void forEachValue(long parallelismThreshold,
3774     Action<? super V> action) {
3775     if (action == null)
3776     throw new NullPointerException();
3777     new ForEachValueTask<K,V>
3778     (null, batchFor(parallelismThreshold), 0, 0, table,
3779     action).invoke();
3780 dl 1.84 }
3781    
3782     /**
3783     * Performs the given action for each non-null transformation
3784     * of each value.
3785     *
3786 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3787     * needed for this operation to be executed in parallel
3788 dl 1.84 * @param transformer a function returning the transformation
3789 jsr166 1.91 * for an element, or null if there is no transformation (in
3790 jsr166 1.93 * which case the action is not applied)
3791 dl 1.102 * @param action the action
3792     * @since 1.8
3793 dl 1.84 */
3794 dl 1.102 public <U> void forEachValue(long parallelismThreshold,
3795     Fun<? super V, ? extends U> transformer,
3796     Action<? super U> action) {
3797     if (transformer == null || action == null)
3798     throw new NullPointerException();
3799     new ForEachTransformedValueTask<K,V,U>
3800     (null, batchFor(parallelismThreshold), 0, 0, table,
3801     transformer, action).invoke();
3802 dl 1.84 }
3803    
3804     /**
3805     * Returns a non-null result from applying the given search
3806     * function on each value, or null if none. Upon success,
3807     * further element processing is suppressed and the results of
3808     * any other parallel invocations of the search function are
3809     * ignored.
3810     *
3811 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3812     * needed for this operation to be executed in parallel
3813 dl 1.84 * @param searchFunction a function returning a non-null
3814     * result on success, else null
3815     * @return a non-null result from applying the given search
3816     * function on each value, or null if none
3817 dl 1.102 * @since 1.8
3818 dl 1.84 */
3819 dl 1.102 public <U> U searchValues(long parallelismThreshold,
3820     Fun<? super V, ? extends U> searchFunction) {
3821     if (searchFunction == null) throw new NullPointerException();
3822     return new SearchValuesTask<K,V,U>
3823     (null, batchFor(parallelismThreshold), 0, 0, table,
3824     searchFunction, new AtomicReference<U>()).invoke();
3825 dl 1.84 }
3826    
3827     /**
3828     * Returns the result of accumulating all values using the
3829     * given reducer to combine values, or null if none.
3830     *
3831 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3832     * needed for this operation to be executed in parallel
3833 dl 1.84 * @param reducer a commutative associative combining function
3834 dl 1.102 * @return the result of accumulating all values
3835     * @since 1.8
3836 dl 1.84 */
3837 dl 1.102 public V reduceValues(long parallelismThreshold,
3838     BiFun<? super V, ? super V, ? extends V> reducer) {
3839     if (reducer == null) throw new NullPointerException();
3840     return new ReduceValuesTask<K,V>
3841     (null, batchFor(parallelismThreshold), 0, 0, table,
3842     null, reducer).invoke();
3843 dl 1.84 }
3844    
3845     /**
3846     * Returns the result of accumulating the given transformation
3847     * of all values using the given reducer to combine values, or
3848     * null if none.
3849     *
3850 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3851     * needed for this operation to be executed in parallel
3852 dl 1.84 * @param transformer a function returning the transformation
3853 jsr166 1.91 * for an element, or null if there is no transformation (in
3854 jsr166 1.93 * which case it is not combined)
3855 dl 1.84 * @param reducer a commutative associative combining function
3856     * @return the result of accumulating the given transformation
3857     * of all values
3858 dl 1.102 * @since 1.8
3859 dl 1.84 */
3860 dl 1.102 public <U> U reduceValues(long parallelismThreshold,
3861     Fun<? super V, ? extends U> transformer,
3862     BiFun<? super U, ? super U, ? extends U> reducer) {
3863     if (transformer == null || reducer == null)
3864     throw new NullPointerException();
3865     return new MapReduceValuesTask<K,V,U>
3866     (null, batchFor(parallelismThreshold), 0, 0, table,
3867     null, transformer, reducer).invoke();
3868 dl 1.84 }
3869    
3870     /**
3871     * Returns the result of accumulating the given transformation
3872     * of all values using the given reducer to combine values,
3873     * and the given basis as an identity value.
3874     *
3875 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3876     * needed for this operation to be executed in parallel
3877 dl 1.84 * @param transformer a function returning the transformation
3878     * for an element
3879     * @param basis the identity (initial default value) for the reduction
3880     * @param reducer a commutative associative combining function
3881     * @return the result of accumulating the given transformation
3882     * of all values
3883 dl 1.102 * @since 1.8
3884 dl 1.84 */
3885 dl 1.102 public double reduceValuesToDouble(long parallelismThreshold,
3886     ObjectToDouble<? super V> transformer,
3887     double basis,
3888     DoubleByDoubleToDouble reducer) {
3889     if (transformer == null || reducer == null)
3890     throw new NullPointerException();
3891     return new MapReduceValuesToDoubleTask<K,V>
3892     (null, batchFor(parallelismThreshold), 0, 0, table,
3893     null, transformer, basis, reducer).invoke();
3894 dl 1.84 }
3895    
3896     /**
3897     * Returns the result of accumulating the given transformation
3898     * of all values using the given reducer to combine values,
3899     * and the given basis as an identity value.
3900     *
3901 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3902     * needed for this operation to be executed in parallel
3903 dl 1.84 * @param transformer a function returning the transformation
3904     * for an element
3905     * @param basis the identity (initial default value) for the reduction
3906     * @param reducer a commutative associative combining function
3907     * @return the result of accumulating the given transformation
3908     * of all values
3909 dl 1.102 * @since 1.8
3910 dl 1.84 */
3911 dl 1.102 public long reduceValuesToLong(long parallelismThreshold,
3912     ObjectToLong<? super V> transformer,
3913     long basis,
3914     LongByLongToLong reducer) {
3915     if (transformer == null || reducer == null)
3916     throw new NullPointerException();
3917     return new MapReduceValuesToLongTask<K,V>
3918     (null, batchFor(parallelismThreshold), 0, 0, table,
3919     null, transformer, basis, reducer).invoke();
3920 dl 1.84 }
3921    
3922     /**
3923     * Returns the result of accumulating the given transformation
3924     * of all values using the given reducer to combine values,
3925     * and the given basis as an identity value.
3926     *
3927 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3928     * needed for this operation to be executed in parallel
3929 dl 1.84 * @param transformer a function returning the transformation
3930     * for an element
3931     * @param basis the identity (initial default value) for the reduction
3932     * @param reducer a commutative associative combining function
3933     * @return the result of accumulating the given transformation
3934     * of all values
3935 dl 1.102 * @since 1.8
3936 dl 1.84 */
3937 dl 1.102 public int reduceValuesToInt(long parallelismThreshold,
3938     ObjectToInt<? super V> transformer,
3939     int basis,
3940     IntByIntToInt reducer) {
3941     if (transformer == null || reducer == null)
3942     throw new NullPointerException();
3943     return new MapReduceValuesToIntTask<K,V>
3944     (null, batchFor(parallelismThreshold), 0, 0, table,
3945     null, transformer, basis, reducer).invoke();
3946 dl 1.84 }
3947    
3948     /**
3949     * Performs the given action for each entry.
3950     *
3951 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3952     * needed for this operation to be executed in parallel
3953 dl 1.84 * @param action the action
3954 dl 1.102 * @since 1.8
3955 dl 1.84 */
3956 dl 1.102 public void forEachEntry(long parallelismThreshold,
3957     Action<? super Map.Entry<K,V>> action) {
3958     if (action == null) throw new NullPointerException();
3959     new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table,
3960     action).invoke();
3961 dl 1.84 }
3962    
3963     /**
3964     * Performs the given action for each non-null transformation
3965     * of each entry.
3966     *
3967 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3968     * needed for this operation to be executed in parallel
3969 dl 1.84 * @param transformer a function returning the transformation
3970 jsr166 1.91 * for an element, or null if there is no transformation (in
3971 jsr166 1.93 * which case the action is not applied)
3972 dl 1.84 * @param action the action
3973 dl 1.102 * @since 1.8
3974 dl 1.84 */
3975 dl 1.102 public <U> void forEachEntry(long parallelismThreshold,
3976     Fun<Map.Entry<K,V>, ? extends U> transformer,
3977     Action<? super U> action) {
3978     if (transformer == null || action == null)
3979     throw new NullPointerException();
3980     new ForEachTransformedEntryTask<K,V,U>
3981     (null, batchFor(parallelismThreshold), 0, 0, table,
3982     transformer, action).invoke();
3983 dl 1.84 }
3984    
3985     /**
3986     * Returns a non-null result from applying the given search
3987     * function on each entry, or null if none. Upon success,
3988     * further element processing is suppressed and the results of
3989     * any other parallel invocations of the search function are
3990     * ignored.
3991     *
3992 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3993     * needed for this operation to be executed in parallel
3994 dl 1.84 * @param searchFunction a function returning a non-null
3995     * result on success, else null
3996     * @return a non-null result from applying the given search
3997     * function on each entry, or null if none
3998 dl 1.102 * @since 1.8
3999 dl 1.84 */
4000 dl 1.102 public <U> U searchEntries(long parallelismThreshold,
4001     Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
4002     if (searchFunction == null) throw new NullPointerException();
4003     return new SearchEntriesTask<K,V,U>
4004     (null, batchFor(parallelismThreshold), 0, 0, table,
4005     searchFunction, new AtomicReference<U>()).invoke();
4006 dl 1.84 }
4007    
4008     /**
4009     * Returns the result of accumulating all entries using the
4010     * given reducer to combine values, or null if none.
4011     *
4012 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
4013     * needed for this operation to be executed in parallel
4014 dl 1.84 * @param reducer a commutative associative combining function
4015     * @return the result of accumulating all entries
4016 dl 1.102 * @since 1.8
4017 dl 1.84 */
4018 dl 1.102 public Map.Entry<K,V> reduceEntries(long parallelismThreshold,
4019     BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4020     if (reducer == null) throw new NullPointerException();
4021     return new ReduceEntriesTask<K,V>
4022     (null, batchFor(parallelismThreshold), 0, 0, table,
4023     null, reducer).invoke();
4024 dl 1.84 }
4025    
4026     /**
4027     * Returns the result of accumulating the given transformation
4028     * of all entries using the given reducer to combine values,
4029     * or null if none.
4030     *
4031 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
4032     * needed for this operation to be executed in parallel
4033 dl 1.84 * @param transformer a function returning the transformation
4034 jsr166 1.91 * for an element, or null if there is no transformation (in
4035 jsr166 1.93 * which case it is not combined)
4036 dl 1.84 * @param reducer a commutative associative combining function
4037     * @return the result of accumulating the given transformation
4038     * of all entries
4039 dl 1.102 * @since 1.8
4040 dl 1.84 */
4041 dl 1.102 public <U> U reduceEntries(long parallelismThreshold,
4042     Fun<Map.Entry<K,V>, ? extends U> transformer,
4043     BiFun<? super U, ? super U, ? extends U> reducer) {
4044     if (transformer == null || reducer == null)
4045     throw new NullPointerException();
4046     return new MapReduceEntriesTask<K,V,U>
4047     (null, batchFor(parallelismThreshold), 0, 0, table,
4048     null, transformer, reducer).invoke();
4049 dl 1.84 }
4050    
4051     /**
4052     * Returns the result of accumulating the given transformation
4053     * of all entries using the given reducer to combine values,
4054     * and the given basis as an identity value.
4055     *
4056 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
4057     * needed for this operation to be executed in parallel
4058 dl 1.84 * @param transformer a function returning the transformation
4059     * for an element
4060     * @param basis the identity (initial default value) for the reduction
4061     * @param reducer a commutative associative combining function
4062     * @return the result of accumulating the given transformation
4063     * of all entries
4064 dl 1.102 * @since 1.8
4065 dl 1.84 */
4066 dl 1.102 public double reduceEntriesToDouble(long parallelismThreshold,
4067     ObjectToDouble<Map.Entry<K,V>> transformer,
4068     double basis,
4069     DoubleByDoubleToDouble reducer) {
4070     if (transformer == null || reducer == null)
4071     throw new NullPointerException();
4072     return new MapReduceEntriesToDoubleTask<K,V>
4073     (null, batchFor(parallelismThreshold), 0, 0, table,
4074     null, transformer, basis, reducer).invoke();
4075 dl 1.84 }
4076    
4077     /**
4078     * Returns the result of accumulating the given transformation
4079     * of all entries using the given reducer to combine values,
4080     * and the given basis as an identity value.
4081     *
4082 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
4083     * needed for this operation to be executed in parallel
4084 dl 1.84 * @param transformer a function returning the transformation
4085     * for an element
4086     * @param basis the identity (initial default value) for the reduction
4087     * @param reducer a commutative associative combining function
4088 dl 1.102 * @return the result of accumulating the given transformation
4089 dl 1.84 * of all entries
4090 dl 1.102 * @since 1.8
4091 dl 1.84 */
4092 dl 1.102 public long reduceEntriesToLong(long parallelismThreshold,
4093     ObjectToLong<Map.Entry<K,V>> transformer,
4094     long basis,
4095     LongByLongToLong reducer) {
4096     if (transformer == null || reducer == null)
4097     throw new NullPointerException();
4098     return new MapReduceEntriesToLongTask<K,V>
4099     (null, batchFor(parallelismThreshold), 0, 0, table,
4100     null, transformer, basis, reducer).invoke();
4101 dl 1.84 }
4102    
4103     /**
4104     * Returns the result of accumulating the given transformation
4105     * of all entries using the given reducer to combine values,
4106     * and the given basis as an identity value.
4107     *
4108 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
4109     * needed for this operation to be executed in parallel
4110 dl 1.84 * @param transformer a function returning the transformation
4111     * for an element
4112     * @param basis the identity (initial default value) for the reduction
4113     * @param reducer a commutative associative combining function
4114     * @return the result of accumulating the given transformation
4115     * of all entries
4116 dl 1.102 * @since 1.8
4117 dl 1.84 */
4118 dl 1.102 public int reduceEntriesToInt(long parallelismThreshold,
4119     ObjectToInt<Map.Entry<K,V>> transformer,
4120     int basis,
4121     IntByIntToInt reducer) {
4122     if (transformer == null || reducer == null)
4123     throw new NullPointerException();
4124     return new MapReduceEntriesToIntTask<K,V>
4125     (null, batchFor(parallelismThreshold), 0, 0, table,
4126     null, transformer, basis, reducer).invoke();
4127 dl 1.84 }
4128    
4129    
4130     /* ----------------Views -------------- */
4131    
4132     /**
4133     * Base class for views.
4134     */
4135 dl 1.102 abstract static class CollectionView<K,V,E>
4136     implements Collection<E>, java.io.Serializable {
4137     private static final long serialVersionUID = 7249069246763182397L;
4138 jsr166 1.99 final ConcurrentHashMapV8<K,V> map;
4139 dl 1.102 CollectionView(ConcurrentHashMapV8<K,V> map) { this.map = map; }
4140 dl 1.84
4141     /**
4142     * Returns the map backing this view.
4143     *
4144     * @return the map backing this view
4145     */
4146     public ConcurrentHashMapV8<K,V> getMap() { return map; }
4147    
4148 dl 1.102 /**
4149     * Removes all of the elements from this view, by removing all
4150     * the mappings from the map backing this view.
4151     */
4152     public final void clear() { map.clear(); }
4153     public final int size() { return map.size(); }
4154     public final boolean isEmpty() { return map.isEmpty(); }
4155 dl 1.84
4156     // implementations below rely on concrete classes supplying these
4157 dl 1.102 // abstract methods
4158     /**
4159     * Returns a "weakly consistent" iterator that will never
4160     * throw {@link ConcurrentModificationException}, and
4161     * guarantees to traverse elements as they existed upon
4162     * construction of the iterator, and may (but is not
4163     * guaranteed to) reflect any modifications subsequent to
4164     * construction.
4165     */
4166     public abstract Iterator<E> iterator();
4167 jsr166 1.88 public abstract boolean contains(Object o);
4168     public abstract boolean remove(Object o);
4169 dl 1.84
4170     private static final String oomeMsg = "Required array size too large";
4171 dl 1.75
4172     public final Object[] toArray() {
4173     long sz = map.mappingCount();
4174 dl 1.102 if (sz > MAX_ARRAY_SIZE)
4175 dl 1.75 throw new OutOfMemoryError(oomeMsg);
4176     int n = (int)sz;
4177     Object[] r = new Object[n];
4178     int i = 0;
4179 dl 1.102 for (E e : this) {
4180 dl 1.75 if (i == n) {
4181     if (n >= MAX_ARRAY_SIZE)
4182     throw new OutOfMemoryError(oomeMsg);
4183     if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4184     n = MAX_ARRAY_SIZE;
4185     else
4186     n += (n >>> 1) + 1;
4187     r = Arrays.copyOf(r, n);
4188     }
4189 dl 1.102 r[i++] = e;
4190 dl 1.75 }
4191     return (i == n) ? r : Arrays.copyOf(r, i);
4192     }
4193    
4194 dl 1.102 @SuppressWarnings("unchecked")
4195     public final <T> T[] toArray(T[] a) {
4196 dl 1.75 long sz = map.mappingCount();
4197 dl 1.102 if (sz > MAX_ARRAY_SIZE)
4198 dl 1.75 throw new OutOfMemoryError(oomeMsg);
4199     int m = (int)sz;
4200     T[] r = (a.length >= m) ? a :
4201     (T[])java.lang.reflect.Array
4202     .newInstance(a.getClass().getComponentType(), m);
4203     int n = r.length;
4204     int i = 0;
4205 dl 1.102 for (E e : this) {
4206 dl 1.75 if (i == n) {
4207     if (n >= MAX_ARRAY_SIZE)
4208     throw new OutOfMemoryError(oomeMsg);
4209     if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4210     n = MAX_ARRAY_SIZE;
4211     else
4212     n += (n >>> 1) + 1;
4213     r = Arrays.copyOf(r, n);
4214     }
4215 dl 1.102 r[i++] = (T)e;
4216 dl 1.75 }
4217     if (a == r && i < n) {
4218     r[i] = null; // null-terminate
4219     return r;
4220     }
4221     return (i == n) ? r : Arrays.copyOf(r, i);
4222     }
4223    
4224 dl 1.102 /**
4225     * Returns a string representation of this collection.
4226     * The string representation consists of the string representations
4227     * of the collection's elements in the order they are returned by
4228     * its iterator, enclosed in square brackets ({@code "[]"}).
4229     * Adjacent elements are separated by the characters {@code ", "}
4230     * (comma and space). Elements are converted to strings as by
4231     * {@link String#valueOf(Object)}.
4232     *
4233     * @return a string representation of this collection
4234     */
4235 dl 1.75 public final String toString() {
4236     StringBuilder sb = new StringBuilder();
4237     sb.append('[');
4238 dl 1.102 Iterator<E> it = iterator();
4239 dl 1.75 if (it.hasNext()) {
4240     for (;;) {
4241     Object e = it.next();
4242     sb.append(e == this ? "(this Collection)" : e);
4243     if (!it.hasNext())
4244     break;
4245     sb.append(',').append(' ');
4246     }
4247     }
4248     return sb.append(']').toString();
4249     }
4250    
4251     public final boolean containsAll(Collection<?> c) {
4252     if (c != this) {
4253 dl 1.102 for (Object e : c) {
4254 dl 1.75 if (e == null || !contains(e))
4255     return false;
4256     }
4257     }
4258     return true;
4259     }
4260    
4261     public final boolean removeAll(Collection<?> c) {
4262     boolean modified = false;
4263 dl 1.102 for (Iterator<E> it = iterator(); it.hasNext();) {
4264 dl 1.75 if (c.contains(it.next())) {
4265     it.remove();
4266     modified = true;
4267     }
4268     }
4269     return modified;
4270     }
4271    
4272     public final boolean retainAll(Collection<?> c) {
4273     boolean modified = false;
4274 dl 1.102 for (Iterator<E> it = iterator(); it.hasNext();) {
4275 dl 1.75 if (!c.contains(it.next())) {
4276     it.remove();
4277     modified = true;
4278     }
4279     }
4280     return modified;
4281     }
4282    
4283     }
4284    
4285     /**
4286     * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in
4287     * which additions may optionally be enabled by mapping to a
4288 dl 1.102 * common value. This class cannot be directly instantiated.
4289     * See {@link #keySet() keySet()},
4290     * {@link #keySet(Object) keySet(V)},
4291     * {@link #newKeySet() newKeySet()},
4292     * {@link #newKeySet(int) newKeySet(int)}.
4293     *
4294     * @since 1.8
4295 dl 1.75 */
4296 dl 1.102 public static class KeySetView<K,V> extends CollectionView<K,V,K>
4297 dl 1.82 implements Set<K>, java.io.Serializable {
4298 dl 1.75 private static final long serialVersionUID = 7249069246763182397L;
4299     private final V value;
4300 jsr166 1.99 KeySetView(ConcurrentHashMapV8<K,V> map, V value) { // non-public
4301 dl 1.75 super(map);
4302     this.value = value;
4303     }
4304    
4305     /**
4306     * Returns the default mapped value for additions,
4307     * or {@code null} if additions are not supported.
4308     *
4309     * @return the default mapped value for additions, or {@code null}
4310 jsr166 1.93 * if not supported
4311 dl 1.75 */
4312     public V getMappedValue() { return value; }
4313    
4314 dl 1.102 /**
4315     * {@inheritDoc}
4316     * @throws NullPointerException if the specified key is null
4317     */
4318     public boolean contains(Object o) { return map.containsKey(o); }
4319    
4320     /**
4321     * Removes the key from this map view, by removing the key (and its
4322     * corresponding value) from the backing map. This method does
4323     * nothing if the key is not in the map.
4324     *
4325     * @param o the key to be removed from the backing map
4326     * @return {@code true} if the backing map contained the specified key
4327     * @throws NullPointerException if the specified key is null
4328     */
4329     public boolean remove(Object o) { return map.remove(o) != null; }
4330 dl 1.75
4331 dl 1.102 /**
4332     * @return an iterator over the keys of the backing map
4333     */
4334     public Iterator<K> iterator() {
4335     Node<K,V>[] t;
4336     ConcurrentHashMapV8<K,V> m = map;
4337     int f = (t = m.table) == null ? 0 : t.length;
4338     return new KeyIterator<K,V>(t, f, 0, f, m);
4339     }
4340 dl 1.75
4341     /**
4342 dl 1.102 * Adds the specified key to this set view by mapping the key to
4343     * the default mapped value in the backing map, if defined.
4344 dl 1.75 *
4345 dl 1.102 * @param e key to be added
4346     * @return {@code true} if this set changed as a result of the call
4347     * @throws NullPointerException if the specified key is null
4348     * @throws UnsupportedOperationException if no default mapped value
4349     * for additions was provided
4350 dl 1.75 */
4351     public boolean add(K e) {
4352     V v;
4353     if ((v = value) == null)
4354     throw new UnsupportedOperationException();
4355 dl 1.102 return map.putVal(e, v, true) == null;
4356 dl 1.75 }
4357 dl 1.102
4358     /**
4359     * Adds all of the elements in the specified collection to this set,
4360     * as if by calling {@link #add} on each one.
4361     *
4362     * @param c the elements to be inserted into this set
4363     * @return {@code true} if this set changed as a result of the call
4364     * @throws NullPointerException if the collection or any of its
4365     * elements are {@code null}
4366     * @throws UnsupportedOperationException if no default mapped value
4367     * for additions was provided
4368     */
4369 dl 1.75 public boolean addAll(Collection<? extends K> c) {
4370     boolean added = false;
4371     V v;
4372     if ((v = value) == null)
4373     throw new UnsupportedOperationException();
4374     for (K e : c) {
4375 dl 1.102 if (map.putVal(e, v, true) == null)
4376 dl 1.75 added = true;
4377     }
4378     return added;
4379     }
4380 dl 1.102
4381     public int hashCode() {
4382     int h = 0;
4383     for (K e : this)
4384     h += e.hashCode();
4385     return h;
4386     }
4387    
4388 dl 1.75 public boolean equals(Object o) {
4389     Set<?> c;
4390     return ((o instanceof Set) &&
4391     ((c = (Set<?>)o) == this ||
4392     (containsAll(c) && c.containsAll(this))));
4393     }
4394 dl 1.102
4395     public ConcurrentHashMapSpliterator<K> spliterator() {
4396     Node<K,V>[] t;
4397     ConcurrentHashMapV8<K,V> m = map;
4398     long n = m.sumCount();
4399     int f = (t = m.table) == null ? 0 : t.length;
4400     return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4401     }
4402    
4403     public void forEach(Action<? super K> action) {
4404     if (action == null) throw new NullPointerException();
4405     Node<K,V>[] t;
4406     if ((t = map.table) != null) {
4407     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4408     for (Node<K,V> p; (p = it.advance()) != null; )
4409     action.apply(p.key);
4410     }
4411     }
4412 dl 1.75 }
4413    
4414     /**
4415     * A view of a ConcurrentHashMapV8 as a {@link Collection} of
4416     * values, in which additions are disabled. This class cannot be
4417 jsr166 1.96 * directly instantiated. See {@link #values()}.
4418 dl 1.75 */
4419 dl 1.102 static final class ValuesView<K,V> extends CollectionView<K,V,V>
4420     implements Collection<V>, java.io.Serializable {
4421     private static final long serialVersionUID = 2249069246763182397L;
4422     ValuesView(ConcurrentHashMapV8<K,V> map) { super(map); }
4423     public final boolean contains(Object o) {
4424     return map.containsValue(o);
4425     }
4426    
4427 dl 1.75 public final boolean remove(Object o) {
4428     if (o != null) {
4429 dl 1.102 for (Iterator<V> it = iterator(); it.hasNext();) {
4430 dl 1.75 if (o.equals(it.next())) {
4431     it.remove();
4432     return true;
4433     }
4434     }
4435     }
4436     return false;
4437     }
4438    
4439     public final Iterator<V> iterator() {
4440 dl 1.102 ConcurrentHashMapV8<K,V> m = map;
4441     Node<K,V>[] t;
4442     int f = (t = m.table) == null ? 0 : t.length;
4443     return new ValueIterator<K,V>(t, f, 0, f, m);
4444 dl 1.75 }
4445 dl 1.102
4446 dl 1.75 public final boolean add(V e) {
4447     throw new UnsupportedOperationException();
4448     }
4449     public final boolean addAll(Collection<? extends V> c) {
4450     throw new UnsupportedOperationException();
4451     }
4452    
4453 dl 1.102 public ConcurrentHashMapSpliterator<V> spliterator() {
4454     Node<K,V>[] t;
4455     ConcurrentHashMapV8<K,V> m = map;
4456     long n = m.sumCount();
4457     int f = (t = m.table) == null ? 0 : t.length;
4458     return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4459     }
4460    
4461     public void forEach(Action<? super V> action) {
4462     if (action == null) throw new NullPointerException();
4463     Node<K,V>[] t;
4464     if ((t = map.table) != null) {
4465     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4466     for (Node<K,V> p; (p = it.advance()) != null; )
4467     action.apply(p.val);
4468     }
4469     }
4470 dl 1.70 }
4471 dl 1.52
4472 dl 1.70 /**
4473 dl 1.75 * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value)
4474     * entries. This class cannot be directly instantiated. See
4475 jsr166 1.95 * {@link #entrySet()}.
4476 dl 1.70 */
4477 dl 1.102 static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>>
4478     implements Set<Map.Entry<K,V>>, java.io.Serializable {
4479     private static final long serialVersionUID = 2249069246763182397L;
4480 jsr166 1.99 EntrySetView(ConcurrentHashMapV8<K,V> map) { super(map); }
4481 dl 1.102
4482     public boolean contains(Object o) {
4483 dl 1.75 Object k, v, r; Map.Entry<?,?> e;
4484     return ((o instanceof Map.Entry) &&
4485     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4486     (r = map.get(k)) != null &&
4487     (v = e.getValue()) != null &&
4488     (v == r || v.equals(r)));
4489     }
4490 dl 1.102
4491     public boolean remove(Object o) {
4492 dl 1.75 Object k, v; Map.Entry<?,?> e;
4493     return ((o instanceof Map.Entry) &&
4494     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4495     (v = e.getValue()) != null &&
4496     map.remove(k, v));
4497     }
4498    
4499     /**
4500 dl 1.102 * @return an iterator over the entries of the backing map
4501 dl 1.75 */
4502 dl 1.102 public Iterator<Map.Entry<K,V>> iterator() {
4503     ConcurrentHashMapV8<K,V> m = map;
4504     Node<K,V>[] t;
4505     int f = (t = m.table) == null ? 0 : t.length;
4506     return new EntryIterator<K,V>(t, f, 0, f, m);
4507 dl 1.75 }
4508    
4509 dl 1.102 public boolean add(Entry<K,V> e) {
4510     return map.putVal(e.getKey(), e.getValue(), false) == null;
4511 dl 1.75 }
4512 dl 1.102
4513     public boolean addAll(Collection<? extends Entry<K,V>> c) {
4514 dl 1.75 boolean added = false;
4515     for (Entry<K,V> e : c) {
4516     if (add(e))
4517     added = true;
4518     }
4519     return added;
4520     }
4521 dl 1.52
4522 dl 1.102 public final int hashCode() {
4523     int h = 0;
4524     Node<K,V>[] t;
4525     if ((t = map.table) != null) {
4526     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4527     for (Node<K,V> p; (p = it.advance()) != null; ) {
4528     h += p.hashCode();
4529     }
4530     }
4531     return h;
4532 dl 1.52 }
4533    
4534 dl 1.102 public final boolean equals(Object o) {
4535     Set<?> c;
4536     return ((o instanceof Set) &&
4537     ((c = (Set<?>)o) == this ||
4538     (containsAll(c) && c.containsAll(this))));
4539 dl 1.52 }
4540    
4541 dl 1.102 public ConcurrentHashMapSpliterator<Map.Entry<K,V>> spliterator() {
4542     Node<K,V>[] t;
4543     ConcurrentHashMapV8<K,V> m = map;
4544     long n = m.sumCount();
4545     int f = (t = m.table) == null ? 0 : t.length;
4546     return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m);
4547 dl 1.52 }
4548    
4549 dl 1.102 public void forEach(Action<? super Map.Entry<K,V>> action) {
4550     if (action == null) throw new NullPointerException();
4551     Node<K,V>[] t;
4552     if ((t = map.table) != null) {
4553     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4554     for (Node<K,V> p; (p = it.advance()) != null; )
4555     action.apply(new MapEntry<K,V>(p.key, p.val, map));
4556     }
4557 dl 1.52 }
4558    
4559 dl 1.102 }
4560 dl 1.52
4561 dl 1.102 // -------------------------------------------------------
4562 dl 1.52
4563 dl 1.102 /**
4564     * Base class for bulk tasks. Repeats some fields and code from
4565     * class Traverser, because we need to subclass CountedCompleter.
4566     */
4567     abstract static class BulkTask<K,V,R> extends CountedCompleter<R> {
4568     Node<K,V>[] tab; // same as Traverser
4569     Node<K,V> next;
4570     int index;
4571     int baseIndex;
4572     int baseLimit;
4573     final int baseSize;
4574     int batch; // split control
4575    
4576     BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) {
4577     super(par);
4578     this.batch = b;
4579     this.index = this.baseIndex = i;
4580     if ((this.tab = t) == null)
4581     this.baseSize = this.baseLimit = 0;
4582     else if (par == null)
4583     this.baseSize = this.baseLimit = t.length;
4584     else {
4585     this.baseLimit = f;
4586     this.baseSize = par.baseSize;
4587     }
4588 dl 1.52 }
4589    
4590     /**
4591 dl 1.102 * Same as Traverser version
4592 dl 1.52 */
4593 dl 1.102 final Node<K,V> advance() {
4594     Node<K,V> e;
4595     if ((e = next) != null)
4596     e = e.next;
4597     for (;;) {
4598     Node<K,V>[] t; int i, n; K ek; // must use locals in checks
4599     if (e != null)
4600     return next = e;
4601     if (baseIndex >= baseLimit || (t = tab) == null ||
4602     (n = t.length) <= (i = index) || i < 0)
4603     return next = null;
4604     if ((e = tabAt(t, index)) != null && e.hash < 0) {
4605     if (e instanceof ForwardingNode) {
4606     tab = ((ForwardingNode<K,V>)e).nextTable;
4607     e = null;
4608     continue;
4609     }
4610     else if (e instanceof TreeBin)
4611     e = ((TreeBin<K,V>)e).first;
4612     else
4613     e = null;
4614     }
4615     if ((index += baseSize) >= n)
4616     index = ++baseIndex; // visit upper slots if present
4617     }
4618 dl 1.52 }
4619     }
4620    
4621     /*
4622     * Task classes. Coded in a regular but ugly format/style to
4623     * simplify checks that each variant differs in the right way from
4624 dl 1.82 * others. The null screenings exist because compilers cannot tell
4625     * that we've already null-checked task arguments, so we force
4626     * simplest hoisted bypass to help avoid convoluted traps.
4627 dl 1.52 */
4628 dl 1.102 @SuppressWarnings("serial")
4629     static final class ForEachKeyTask<K,V>
4630     extends BulkTask<K,V,Void> {
4631     final Action<? super K> action;
4632 dl 1.52 ForEachKeyTask
4633 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4634     Action<? super K> action) {
4635     super(p, b, i, f, t);
4636 dl 1.52 this.action = action;
4637     }
4638 dl 1.102 public final void compute() {
4639     final Action<? super K> action;
4640 dl 1.82 if ((action = this.action) != null) {
4641 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4642     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4643     addToPendingCount(1);
4644     new ForEachKeyTask<K,V>
4645     (this, batch >>>= 1, baseLimit = h, f, tab,
4646     action).fork();
4647     }
4648     for (Node<K,V> p; (p = advance()) != null;)
4649     action.apply(p.key);
4650 dl 1.82 propagateCompletion();
4651     }
4652 dl 1.52 }
4653     }
4654    
4655 dl 1.102 @SuppressWarnings("serial")
4656     static final class ForEachValueTask<K,V>
4657     extends BulkTask<K,V,Void> {
4658     final Action<? super V> action;
4659 dl 1.52 ForEachValueTask
4660 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4661     Action<? super V> action) {
4662     super(p, b, i, f, t);
4663 dl 1.52 this.action = action;
4664     }
4665 dl 1.102 public final void compute() {
4666     final Action<? super V> action;
4667 dl 1.82 if ((action = this.action) != null) {
4668 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4669     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4670     addToPendingCount(1);
4671     new ForEachValueTask<K,V>
4672     (this, batch >>>= 1, baseLimit = h, f, tab,
4673     action).fork();
4674     }
4675     for (Node<K,V> p; (p = advance()) != null;)
4676     action.apply(p.val);
4677 dl 1.82 propagateCompletion();
4678     }
4679 dl 1.52 }
4680     }
4681    
4682 dl 1.102 @SuppressWarnings("serial")
4683     static final class ForEachEntryTask<K,V>
4684     extends BulkTask<K,V,Void> {
4685     final Action<? super Entry<K,V>> action;
4686 dl 1.52 ForEachEntryTask
4687 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4688     Action<? super Entry<K,V>> action) {
4689     super(p, b, i, f, t);
4690 dl 1.52 this.action = action;
4691     }
4692 dl 1.102 public final void compute() {
4693     final Action<? super Entry<K,V>> action;
4694 dl 1.82 if ((action = this.action) != null) {
4695 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4696     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4697     addToPendingCount(1);
4698     new ForEachEntryTask<K,V>
4699     (this, batch >>>= 1, baseLimit = h, f, tab,
4700     action).fork();
4701     }
4702     for (Node<K,V> p; (p = advance()) != null; )
4703     action.apply(p);
4704 dl 1.82 propagateCompletion();
4705     }
4706 dl 1.52 }
4707     }
4708    
4709 dl 1.102 @SuppressWarnings("serial")
4710     static final class ForEachMappingTask<K,V>
4711     extends BulkTask<K,V,Void> {
4712     final BiAction<? super K, ? super V> action;
4713 dl 1.52 ForEachMappingTask
4714 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4715     BiAction<? super K,? super V> action) {
4716     super(p, b, i, f, t);
4717 dl 1.52 this.action = action;
4718     }
4719 dl 1.102 public final void compute() {
4720     final BiAction<? super K, ? super V> action;
4721 dl 1.82 if ((action = this.action) != null) {
4722 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4723     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4724     addToPendingCount(1);
4725     new ForEachMappingTask<K,V>
4726     (this, batch >>>= 1, baseLimit = h, f, tab,
4727     action).fork();
4728     }
4729     for (Node<K,V> p; (p = advance()) != null; )
4730     action.apply(p.key, p.val);
4731 dl 1.82 propagateCompletion();
4732     }
4733 dl 1.52 }
4734     }
4735    
4736 dl 1.102 @SuppressWarnings("serial")
4737     static final class ForEachTransformedKeyTask<K,V,U>
4738     extends BulkTask<K,V,Void> {
4739 dl 1.52 final Fun<? super K, ? extends U> transformer;
4740 dl 1.102 final Action<? super U> action;
4741 dl 1.52 ForEachTransformedKeyTask
4742 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4743     Fun<? super K, ? extends U> transformer, Action<? super U> action) {
4744     super(p, b, i, f, t);
4745 dl 1.79 this.transformer = transformer; this.action = action;
4746     }
4747 dl 1.102 public final void compute() {
4748 dl 1.79 final Fun<? super K, ? extends U> transformer;
4749 dl 1.102 final Action<? super U> action;
4750 dl 1.82 if ((transformer = this.transformer) != null &&
4751     (action = this.action) != null) {
4752 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4753     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4754     addToPendingCount(1);
4755 dl 1.82 new ForEachTransformedKeyTask<K,V,U>
4756 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
4757     transformer, action).fork();
4758     }
4759     for (Node<K,V> p; (p = advance()) != null; ) {
4760     U u;
4761     if ((u = transformer.apply(p.key)) != null)
4762 dl 1.82 action.apply(u);
4763     }
4764     propagateCompletion();
4765 dl 1.52 }
4766     }
4767     }
4768    
4769 dl 1.102 @SuppressWarnings("serial")
4770     static final class ForEachTransformedValueTask<K,V,U>
4771     extends BulkTask<K,V,Void> {
4772 dl 1.52 final Fun<? super V, ? extends U> transformer;
4773 dl 1.102 final Action<? super U> action;
4774 dl 1.52 ForEachTransformedValueTask
4775 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4776     Fun<? super V, ? extends U> transformer, Action<? super U> action) {
4777     super(p, b, i, f, t);
4778 dl 1.79 this.transformer = transformer; this.action = action;
4779     }
4780 dl 1.102 public final void compute() {
4781 dl 1.79 final Fun<? super V, ? extends U> transformer;
4782 dl 1.102 final Action<? super U> action;
4783 dl 1.82 if ((transformer = this.transformer) != null &&
4784     (action = this.action) != null) {
4785 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4786     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4787     addToPendingCount(1);
4788 dl 1.82 new ForEachTransformedValueTask<K,V,U>
4789 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
4790     transformer, action).fork();
4791     }
4792     for (Node<K,V> p; (p = advance()) != null; ) {
4793     U u;
4794     if ((u = transformer.apply(p.val)) != null)
4795 dl 1.82 action.apply(u);
4796     }
4797     propagateCompletion();
4798 dl 1.52 }
4799     }
4800     }
4801    
4802 dl 1.102 @SuppressWarnings("serial")
4803     static final class ForEachTransformedEntryTask<K,V,U>
4804     extends BulkTask<K,V,Void> {
4805 dl 1.52 final Fun<Map.Entry<K,V>, ? extends U> transformer;
4806 dl 1.102 final Action<? super U> action;
4807 dl 1.52 ForEachTransformedEntryTask
4808 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4809     Fun<Map.Entry<K,V>, ? extends U> transformer, Action<? super U> action) {
4810     super(p, b, i, f, t);
4811 dl 1.79 this.transformer = transformer; this.action = action;
4812     }
4813 dl 1.102 public final void compute() {
4814 dl 1.79 final Fun<Map.Entry<K,V>, ? extends U> transformer;
4815 dl 1.102 final Action<? super U> action;
4816 dl 1.82 if ((transformer = this.transformer) != null &&
4817     (action = this.action) != null) {
4818 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4819     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4820     addToPendingCount(1);
4821 dl 1.82 new ForEachTransformedEntryTask<K,V,U>
4822 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
4823     transformer, action).fork();
4824     }
4825     for (Node<K,V> p; (p = advance()) != null; ) {
4826     U u;
4827     if ((u = transformer.apply(p)) != null)
4828 dl 1.82 action.apply(u);
4829     }
4830     propagateCompletion();
4831 dl 1.52 }
4832     }
4833     }
4834    
4835 dl 1.102 @SuppressWarnings("serial")
4836     static final class ForEachTransformedMappingTask<K,V,U>
4837     extends BulkTask<K,V,Void> {
4838 dl 1.52 final BiFun<? super K, ? super V, ? extends U> transformer;
4839 dl 1.102 final Action<? super U> action;
4840 dl 1.52 ForEachTransformedMappingTask
4841 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4842 dl 1.52 BiFun<? super K, ? super V, ? extends U> transformer,
4843 dl 1.102 Action<? super U> action) {
4844     super(p, b, i, f, t);
4845 dl 1.79 this.transformer = transformer; this.action = action;
4846 dl 1.52 }
4847 dl 1.102 public final void compute() {
4848 dl 1.79 final BiFun<? super K, ? super V, ? extends U> transformer;
4849 dl 1.102 final Action<? super U> action;
4850 dl 1.82 if ((transformer = this.transformer) != null &&
4851     (action = this.action) != null) {
4852 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4853     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4854     addToPendingCount(1);
4855 dl 1.82 new ForEachTransformedMappingTask<K,V,U>
4856 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
4857     transformer, action).fork();
4858     }
4859     for (Node<K,V> p; (p = advance()) != null; ) {
4860     U u;
4861     if ((u = transformer.apply(p.key, p.val)) != null)
4862 dl 1.82 action.apply(u);
4863     }
4864     propagateCompletion();
4865 dl 1.52 }
4866     }
4867     }
4868    
4869 dl 1.102 @SuppressWarnings("serial")
4870     static final class SearchKeysTask<K,V,U>
4871     extends BulkTask<K,V,U> {
4872 dl 1.52 final Fun<? super K, ? extends U> searchFunction;
4873     final AtomicReference<U> result;
4874     SearchKeysTask
4875 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4876 dl 1.52 Fun<? super K, ? extends U> searchFunction,
4877     AtomicReference<U> result) {
4878 dl 1.102 super(p, b, i, f, t);
4879 dl 1.52 this.searchFunction = searchFunction; this.result = result;
4880     }
4881 dl 1.79 public final U getRawResult() { return result.get(); }
4882 dl 1.102 public final void compute() {
4883 dl 1.79 final Fun<? super K, ? extends U> searchFunction;
4884     final AtomicReference<U> result;
4885 dl 1.82 if ((searchFunction = this.searchFunction) != null &&
4886     (result = this.result) != null) {
4887 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4888     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4889 dl 1.82 if (result.get() != null)
4890     return;
4891 dl 1.102 addToPendingCount(1);
4892 dl 1.82 new SearchKeysTask<K,V,U>
4893 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
4894     searchFunction, result).fork();
4895 dl 1.61 }
4896 dl 1.82 while (result.get() == null) {
4897     U u;
4898 dl 1.102 Node<K,V> p;
4899     if ((p = advance()) == null) {
4900 dl 1.82 propagateCompletion();
4901     break;
4902     }
4903 dl 1.102 if ((u = searchFunction.apply(p.key)) != null) {
4904 dl 1.82 if (result.compareAndSet(null, u))
4905     quietlyCompleteRoot();
4906     break;
4907     }
4908 dl 1.52 }
4909     }
4910     }
4911     }
4912    
4913 dl 1.102 @SuppressWarnings("serial")
4914     static final class SearchValuesTask<K,V,U>
4915     extends BulkTask<K,V,U> {
4916 dl 1.52 final Fun<? super V, ? extends U> searchFunction;
4917     final AtomicReference<U> result;
4918     SearchValuesTask
4919 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4920 dl 1.52 Fun<? super V, ? extends U> searchFunction,
4921     AtomicReference<U> result) {
4922 dl 1.102 super(p, b, i, f, t);
4923 dl 1.52 this.searchFunction = searchFunction; this.result = result;
4924     }
4925 dl 1.79 public final U getRawResult() { return result.get(); }
4926 dl 1.102 public final void compute() {
4927 dl 1.79 final Fun<? super V, ? extends U> searchFunction;
4928     final AtomicReference<U> result;
4929 dl 1.82 if ((searchFunction = this.searchFunction) != null &&
4930     (result = this.result) != null) {
4931 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4932     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4933 dl 1.82 if (result.get() != null)
4934     return;
4935 dl 1.102 addToPendingCount(1);
4936 dl 1.82 new SearchValuesTask<K,V,U>
4937 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
4938     searchFunction, result).fork();
4939 dl 1.61 }
4940 dl 1.82 while (result.get() == null) {
4941 dl 1.102 U u;
4942     Node<K,V> p;
4943     if ((p = advance()) == null) {
4944 dl 1.82 propagateCompletion();
4945     break;
4946     }
4947 dl 1.102 if ((u = searchFunction.apply(p.val)) != null) {
4948 dl 1.82 if (result.compareAndSet(null, u))
4949     quietlyCompleteRoot();
4950     break;
4951     }
4952 dl 1.52 }
4953     }
4954     }
4955     }
4956    
4957 dl 1.102 @SuppressWarnings("serial")
4958     static final class SearchEntriesTask<K,V,U>
4959     extends BulkTask<K,V,U> {
4960 dl 1.52 final Fun<Entry<K,V>, ? extends U> searchFunction;
4961     final AtomicReference<U> result;
4962     SearchEntriesTask
4963 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4964 dl 1.52 Fun<Entry<K,V>, ? extends U> searchFunction,
4965     AtomicReference<U> result) {
4966 dl 1.102 super(p, b, i, f, t);
4967 dl 1.52 this.searchFunction = searchFunction; this.result = result;
4968     }
4969 dl 1.79 public final U getRawResult() { return result.get(); }
4970 dl 1.102 public final void compute() {
4971 dl 1.79 final Fun<Entry<K,V>, ? extends U> searchFunction;
4972     final AtomicReference<U> result;
4973 dl 1.82 if ((searchFunction = this.searchFunction) != null &&
4974     (result = this.result) != null) {
4975 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4976     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4977 dl 1.82 if (result.get() != null)
4978     return;
4979 dl 1.102 addToPendingCount(1);
4980 dl 1.82 new SearchEntriesTask<K,V,U>
4981 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
4982     searchFunction, result).fork();
4983 dl 1.61 }
4984 dl 1.82 while (result.get() == null) {
4985 dl 1.102 U u;
4986     Node<K,V> p;
4987     if ((p = advance()) == null) {
4988 dl 1.82 propagateCompletion();
4989     break;
4990     }
4991 dl 1.102 if ((u = searchFunction.apply(p)) != null) {
4992 dl 1.82 if (result.compareAndSet(null, u))
4993     quietlyCompleteRoot();
4994     return;
4995     }
4996 dl 1.52 }
4997     }
4998     }
4999     }
5000    
5001 dl 1.102 @SuppressWarnings("serial")
5002     static final class SearchMappingsTask<K,V,U>
5003     extends BulkTask<K,V,U> {
5004 dl 1.52 final BiFun<? super K, ? super V, ? extends U> searchFunction;
5005     final AtomicReference<U> result;
5006     SearchMappingsTask
5007 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5008 dl 1.52 BiFun<? super K, ? super V, ? extends U> searchFunction,
5009     AtomicReference<U> result) {
5010 dl 1.102 super(p, b, i, f, t);
5011 dl 1.52 this.searchFunction = searchFunction; this.result = result;
5012     }
5013 dl 1.79 public final U getRawResult() { return result.get(); }
5014 dl 1.102 public final void compute() {
5015 dl 1.79 final BiFun<? super K, ? super V, ? extends U> searchFunction;
5016     final AtomicReference<U> result;
5017 dl 1.82 if ((searchFunction = this.searchFunction) != null &&
5018     (result = this.result) != null) {
5019 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5020     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5021 dl 1.82 if (result.get() != null)
5022     return;
5023 dl 1.102 addToPendingCount(1);
5024 dl 1.82 new SearchMappingsTask<K,V,U>
5025 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5026     searchFunction, result).fork();
5027 dl 1.61 }
5028 dl 1.82 while (result.get() == null) {
5029 dl 1.102 U u;
5030     Node<K,V> p;
5031     if ((p = advance()) == null) {
5032 dl 1.82 propagateCompletion();
5033     break;
5034     }
5035 dl 1.102 if ((u = searchFunction.apply(p.key, p.val)) != null) {
5036 dl 1.82 if (result.compareAndSet(null, u))
5037     quietlyCompleteRoot();
5038     break;
5039     }
5040 dl 1.52 }
5041     }
5042     }
5043     }
5044    
5045 dl 1.102 @SuppressWarnings("serial")
5046     static final class ReduceKeysTask<K,V>
5047     extends BulkTask<K,V,K> {
5048 dl 1.52 final BiFun<? super K, ? super K, ? extends K> reducer;
5049     K result;
5050 dl 1.61 ReduceKeysTask<K,V> rights, nextRight;
5051 dl 1.52 ReduceKeysTask
5052 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5053 dl 1.61 ReduceKeysTask<K,V> nextRight,
5054 dl 1.52 BiFun<? super K, ? super K, ? extends K> reducer) {
5055 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5056 dl 1.52 this.reducer = reducer;
5057     }
5058 dl 1.79 public final K getRawResult() { return result; }
5059 dl 1.102 public final void compute() {
5060 dl 1.82 final BiFun<? super K, ? super K, ? extends K> reducer;
5061     if ((reducer = this.reducer) != null) {
5062 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5063     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5064     addToPendingCount(1);
5065 dl 1.82 (rights = new ReduceKeysTask<K,V>
5066 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5067     rights, reducer)).fork();
5068     }
5069 dl 1.82 K r = null;
5070 dl 1.102 for (Node<K,V> p; (p = advance()) != null; ) {
5071     K u = p.key;
5072     r = (r == null) ? u : u == null ? r : reducer.apply(r, u);
5073 dl 1.82 }
5074     result = r;
5075     CountedCompleter<?> c;
5076     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5077 dl 1.102 @SuppressWarnings("unchecked") ReduceKeysTask<K,V>
5078 dl 1.82 t = (ReduceKeysTask<K,V>)c,
5079     s = t.rights;
5080     while (s != null) {
5081     K tr, sr;
5082     if ((sr = s.result) != null)
5083     t.result = (((tr = t.result) == null) ? sr :
5084     reducer.apply(tr, sr));
5085     s = t.rights = s.nextRight;
5086     }
5087 dl 1.52 }
5088 dl 1.71 }
5089 dl 1.52 }
5090     }
5091    
5092 dl 1.102 @SuppressWarnings("serial")
5093     static final class ReduceValuesTask<K,V>
5094     extends BulkTask<K,V,V> {
5095 dl 1.52 final BiFun<? super V, ? super V, ? extends V> reducer;
5096     V result;
5097 dl 1.61 ReduceValuesTask<K,V> rights, nextRight;
5098 dl 1.52 ReduceValuesTask
5099 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5100 dl 1.61 ReduceValuesTask<K,V> nextRight,
5101 dl 1.52 BiFun<? super V, ? super V, ? extends V> reducer) {
5102 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5103 dl 1.52 this.reducer = reducer;
5104     }
5105 dl 1.79 public final V getRawResult() { return result; }
5106 dl 1.102 public final void compute() {
5107 dl 1.82 final BiFun<? super V, ? super V, ? extends V> reducer;
5108     if ((reducer = this.reducer) != null) {
5109 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5110     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5111     addToPendingCount(1);
5112 dl 1.82 (rights = new ReduceValuesTask<K,V>
5113 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5114     rights, reducer)).fork();
5115     }
5116 dl 1.82 V r = null;
5117 dl 1.102 for (Node<K,V> p; (p = advance()) != null; ) {
5118     V v = p.val;
5119     r = (r == null) ? v : reducer.apply(r, v);
5120 dl 1.82 }
5121     result = r;
5122     CountedCompleter<?> c;
5123     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5124 dl 1.102 @SuppressWarnings("unchecked") ReduceValuesTask<K,V>
5125 dl 1.82 t = (ReduceValuesTask<K,V>)c,
5126     s = t.rights;
5127     while (s != null) {
5128     V tr, sr;
5129     if ((sr = s.result) != null)
5130     t.result = (((tr = t.result) == null) ? sr :
5131     reducer.apply(tr, sr));
5132     s = t.rights = s.nextRight;
5133     }
5134 dl 1.52 }
5135     }
5136     }
5137     }
5138    
5139 dl 1.102 @SuppressWarnings("serial")
5140     static final class ReduceEntriesTask<K,V>
5141     extends BulkTask<K,V,Map.Entry<K,V>> {
5142 dl 1.52 final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5143     Map.Entry<K,V> result;
5144 dl 1.61 ReduceEntriesTask<K,V> rights, nextRight;
5145 dl 1.52 ReduceEntriesTask
5146 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5147 dl 1.63 ReduceEntriesTask<K,V> nextRight,
5148 dl 1.52 BiFun<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5149 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5150 dl 1.52 this.reducer = reducer;
5151     }
5152 dl 1.79 public final Map.Entry<K,V> getRawResult() { return result; }
5153 dl 1.102 public final void compute() {
5154 dl 1.82 final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5155     if ((reducer = this.reducer) != null) {
5156 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5157     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5158     addToPendingCount(1);
5159 dl 1.82 (rights = new ReduceEntriesTask<K,V>
5160 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5161     rights, reducer)).fork();
5162     }
5163 dl 1.82 Map.Entry<K,V> r = null;
5164 dl 1.102 for (Node<K,V> p; (p = advance()) != null; )
5165     r = (r == null) ? p : reducer.apply(r, p);
5166 dl 1.82 result = r;
5167     CountedCompleter<?> c;
5168     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5169 dl 1.102 @SuppressWarnings("unchecked") ReduceEntriesTask<K,V>
5170 dl 1.82 t = (ReduceEntriesTask<K,V>)c,
5171     s = t.rights;
5172     while (s != null) {
5173     Map.Entry<K,V> tr, sr;
5174     if ((sr = s.result) != null)
5175     t.result = (((tr = t.result) == null) ? sr :
5176     reducer.apply(tr, sr));
5177     s = t.rights = s.nextRight;
5178     }
5179 dl 1.52 }
5180     }
5181     }
5182     }
5183    
5184 dl 1.102 @SuppressWarnings("serial")
5185     static final class MapReduceKeysTask<K,V,U>
5186     extends BulkTask<K,V,U> {
5187 dl 1.52 final Fun<? super K, ? extends U> transformer;
5188     final BiFun<? super U, ? super U, ? extends U> reducer;
5189     U result;
5190 dl 1.61 MapReduceKeysTask<K,V,U> rights, nextRight;
5191 dl 1.52 MapReduceKeysTask
5192 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5193 dl 1.61 MapReduceKeysTask<K,V,U> nextRight,
5194 dl 1.52 Fun<? super K, ? extends U> transformer,
5195     BiFun<? super U, ? super U, ? extends U> reducer) {
5196 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5197 dl 1.52 this.transformer = transformer;
5198     this.reducer = reducer;
5199     }
5200 dl 1.79 public final U getRawResult() { return result; }
5201 dl 1.102 public final void compute() {
5202 dl 1.82 final Fun<? super K, ? extends U> transformer;
5203     final BiFun<? super U, ? super U, ? extends U> reducer;
5204     if ((transformer = this.transformer) != null &&
5205     (reducer = this.reducer) != null) {
5206 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5207     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5208     addToPendingCount(1);
5209 dl 1.82 (rights = new MapReduceKeysTask<K,V,U>
5210 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5211     rights, transformer, reducer)).fork();
5212     }
5213     U r = null;
5214     for (Node<K,V> p; (p = advance()) != null; ) {
5215     U u;
5216     if ((u = transformer.apply(p.key)) != null)
5217 dl 1.82 r = (r == null) ? u : reducer.apply(r, u);
5218     }
5219     result = r;
5220     CountedCompleter<?> c;
5221     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5222 dl 1.102 @SuppressWarnings("unchecked") MapReduceKeysTask<K,V,U>
5223 dl 1.82 t = (MapReduceKeysTask<K,V,U>)c,
5224     s = t.rights;
5225     while (s != null) {
5226     U tr, sr;
5227     if ((sr = s.result) != null)
5228     t.result = (((tr = t.result) == null) ? sr :
5229     reducer.apply(tr, sr));
5230     s = t.rights = s.nextRight;
5231     }
5232 dl 1.52 }
5233     }
5234     }
5235     }
5236    
5237 dl 1.102 @SuppressWarnings("serial")
5238     static final class MapReduceValuesTask<K,V,U>
5239     extends BulkTask<K,V,U> {
5240 dl 1.52 final Fun<? super V, ? extends U> transformer;
5241     final BiFun<? super U, ? super U, ? extends U> reducer;
5242     U result;
5243 dl 1.61 MapReduceValuesTask<K,V,U> rights, nextRight;
5244 dl 1.52 MapReduceValuesTask
5245 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5246 dl 1.61 MapReduceValuesTask<K,V,U> nextRight,
5247 dl 1.52 Fun<? super V, ? extends U> transformer,
5248     BiFun<? super U, ? super U, ? extends U> reducer) {
5249 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5250 dl 1.52 this.transformer = transformer;
5251     this.reducer = reducer;
5252     }
5253 dl 1.79 public final U getRawResult() { return result; }
5254 dl 1.102 public final void compute() {
5255 dl 1.82 final Fun<? super V, ? extends U> transformer;
5256     final BiFun<? super U, ? super U, ? extends U> reducer;
5257     if ((transformer = this.transformer) != null &&
5258     (reducer = this.reducer) != null) {
5259 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5260     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5261     addToPendingCount(1);
5262 dl 1.82 (rights = new MapReduceValuesTask<K,V,U>
5263 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5264     rights, transformer, reducer)).fork();
5265     }
5266     U r = null;
5267     for (Node<K,V> p; (p = advance()) != null; ) {
5268     U u;
5269     if ((u = transformer.apply(p.val)) != null)
5270 dl 1.82 r = (r == null) ? u : reducer.apply(r, u);
5271     }
5272     result = r;
5273     CountedCompleter<?> c;
5274     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5275 dl 1.102 @SuppressWarnings("unchecked") MapReduceValuesTask<K,V,U>
5276 dl 1.82 t = (MapReduceValuesTask<K,V,U>)c,
5277     s = t.rights;
5278     while (s != null) {
5279     U tr, sr;
5280     if ((sr = s.result) != null)
5281     t.result = (((tr = t.result) == null) ? sr :
5282     reducer.apply(tr, sr));
5283     s = t.rights = s.nextRight;
5284     }
5285 dl 1.52 }
5286 dl 1.71 }
5287 dl 1.52 }
5288     }
5289    
5290 dl 1.102 @SuppressWarnings("serial")
5291     static final class MapReduceEntriesTask<K,V,U>
5292     extends BulkTask<K,V,U> {
5293 dl 1.52 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5294     final BiFun<? super U, ? super U, ? extends U> reducer;
5295     U result;
5296 dl 1.61 MapReduceEntriesTask<K,V,U> rights, nextRight;
5297 dl 1.52 MapReduceEntriesTask
5298 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5299 dl 1.61 MapReduceEntriesTask<K,V,U> nextRight,
5300 dl 1.52 Fun<Map.Entry<K,V>, ? extends U> transformer,
5301     BiFun<? super U, ? super U, ? extends U> reducer) {
5302 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5303 dl 1.52 this.transformer = transformer;
5304     this.reducer = reducer;
5305     }
5306 dl 1.79 public final U getRawResult() { return result; }
5307 dl 1.102 public final void compute() {
5308 dl 1.82 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5309     final BiFun<? super U, ? super U, ? extends U> reducer;
5310     if ((transformer = this.transformer) != null &&
5311     (reducer = this.reducer) != null) {
5312 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5313     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5314     addToPendingCount(1);
5315 dl 1.82 (rights = new MapReduceEntriesTask<K,V,U>
5316 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5317     rights, transformer, reducer)).fork();
5318     }
5319     U r = null;
5320     for (Node<K,V> p; (p = advance()) != null; ) {
5321     U u;
5322     if ((u = transformer.apply(p)) != null)
5323 dl 1.82 r = (r == null) ? u : reducer.apply(r, u);
5324     }
5325     result = r;
5326     CountedCompleter<?> c;
5327     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5328 dl 1.102 @SuppressWarnings("unchecked") MapReduceEntriesTask<K,V,U>
5329 dl 1.82 t = (MapReduceEntriesTask<K,V,U>)c,
5330     s = t.rights;
5331     while (s != null) {
5332     U tr, sr;
5333     if ((sr = s.result) != null)
5334     t.result = (((tr = t.result) == null) ? sr :
5335     reducer.apply(tr, sr));
5336     s = t.rights = s.nextRight;
5337     }
5338 dl 1.52 }
5339     }
5340     }
5341     }
5342    
5343 dl 1.102 @SuppressWarnings("serial")
5344     static final class MapReduceMappingsTask<K,V,U>
5345     extends BulkTask<K,V,U> {
5346 dl 1.52 final BiFun<? super K, ? super V, ? extends U> transformer;
5347     final BiFun<? super U, ? super U, ? extends U> reducer;
5348     U result;
5349 dl 1.61 MapReduceMappingsTask<K,V,U> rights, nextRight;
5350 dl 1.52 MapReduceMappingsTask
5351 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5352 dl 1.61 MapReduceMappingsTask<K,V,U> nextRight,
5353 dl 1.52 BiFun<? super K, ? super V, ? extends U> transformer,
5354     BiFun<? super U, ? super U, ? extends U> reducer) {
5355 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5356 dl 1.52 this.transformer = transformer;
5357     this.reducer = reducer;
5358     }
5359 dl 1.79 public final U getRawResult() { return result; }
5360 dl 1.102 public final void compute() {
5361 dl 1.82 final BiFun<? super K, ? super V, ? extends U> transformer;
5362     final BiFun<? super U, ? super U, ? extends U> reducer;
5363     if ((transformer = this.transformer) != null &&
5364     (reducer = this.reducer) != null) {
5365 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5366     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5367     addToPendingCount(1);
5368 dl 1.82 (rights = new MapReduceMappingsTask<K,V,U>
5369 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5370     rights, transformer, reducer)).fork();
5371     }
5372     U r = null;
5373     for (Node<K,V> p; (p = advance()) != null; ) {
5374     U u;
5375     if ((u = transformer.apply(p.key, p.val)) != null)
5376 dl 1.82 r = (r == null) ? u : reducer.apply(r, u);
5377     }
5378     result = r;
5379     CountedCompleter<?> c;
5380     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5381 dl 1.102 @SuppressWarnings("unchecked") MapReduceMappingsTask<K,V,U>
5382 dl 1.82 t = (MapReduceMappingsTask<K,V,U>)c,
5383     s = t.rights;
5384     while (s != null) {
5385     U tr, sr;
5386     if ((sr = s.result) != null)
5387     t.result = (((tr = t.result) == null) ? sr :
5388     reducer.apply(tr, sr));
5389     s = t.rights = s.nextRight;
5390     }
5391 dl 1.52 }
5392 dl 1.71 }
5393 dl 1.52 }
5394     }
5395    
5396 dl 1.102 @SuppressWarnings("serial")
5397     static final class MapReduceKeysToDoubleTask<K,V>
5398     extends BulkTask<K,V,Double> {
5399 dl 1.52 final ObjectToDouble<? super K> transformer;
5400     final DoubleByDoubleToDouble reducer;
5401     final double basis;
5402     double result;
5403 dl 1.61 MapReduceKeysToDoubleTask<K,V> rights, nextRight;
5404 dl 1.52 MapReduceKeysToDoubleTask
5405 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5406 dl 1.61 MapReduceKeysToDoubleTask<K,V> nextRight,
5407 dl 1.52 ObjectToDouble<? super K> transformer,
5408     double basis,
5409     DoubleByDoubleToDouble reducer) {
5410 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5411 dl 1.52 this.transformer = transformer;
5412     this.basis = basis; this.reducer = reducer;
5413     }
5414 dl 1.79 public final Double getRawResult() { return result; }
5415 dl 1.102 public final void compute() {
5416 dl 1.82 final ObjectToDouble<? super K> transformer;
5417     final DoubleByDoubleToDouble reducer;
5418     if ((transformer = this.transformer) != null &&
5419     (reducer = this.reducer) != null) {
5420     double r = this.basis;
5421 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5422     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5423     addToPendingCount(1);
5424 dl 1.82 (rights = new MapReduceKeysToDoubleTask<K,V>
5425 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5426     rights, transformer, r, reducer)).fork();
5427     }
5428     for (Node<K,V> p; (p = advance()) != null; )
5429     r = reducer.apply(r, transformer.apply(p.key));
5430 dl 1.82 result = r;
5431     CountedCompleter<?> c;
5432     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5433 dl 1.102 @SuppressWarnings("unchecked") MapReduceKeysToDoubleTask<K,V>
5434 dl 1.82 t = (MapReduceKeysToDoubleTask<K,V>)c,
5435     s = t.rights;
5436     while (s != null) {
5437     t.result = reducer.apply(t.result, s.result);
5438     s = t.rights = s.nextRight;
5439     }
5440 dl 1.52 }
5441 dl 1.71 }
5442 dl 1.52 }
5443     }
5444    
5445 dl 1.102 @SuppressWarnings("serial")
5446     static final class MapReduceValuesToDoubleTask<K,V>
5447     extends BulkTask<K,V,Double> {
5448 dl 1.52 final ObjectToDouble<? super V> transformer;
5449     final DoubleByDoubleToDouble reducer;
5450     final double basis;
5451     double result;
5452 dl 1.61 MapReduceValuesToDoubleTask<K,V> rights, nextRight;
5453 dl 1.52 MapReduceValuesToDoubleTask
5454 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5455 dl 1.61 MapReduceValuesToDoubleTask<K,V> nextRight,
5456 dl 1.52 ObjectToDouble<? super V> transformer,
5457     double basis,
5458     DoubleByDoubleToDouble reducer) {
5459 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5460 dl 1.52 this.transformer = transformer;
5461     this.basis = basis; this.reducer = reducer;
5462     }
5463 dl 1.79 public final Double getRawResult() { return result; }
5464 dl 1.102 public final void compute() {
5465 dl 1.82 final ObjectToDouble<? super V> transformer;
5466     final DoubleByDoubleToDouble reducer;
5467     if ((transformer = this.transformer) != null &&
5468     (reducer = this.reducer) != null) {
5469     double r = this.basis;
5470 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5471     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5472     addToPendingCount(1);
5473 dl 1.82 (rights = new MapReduceValuesToDoubleTask<K,V>
5474 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5475     rights, transformer, r, reducer)).fork();
5476     }
5477     for (Node<K,V> p; (p = advance()) != null; )
5478     r = reducer.apply(r, transformer.apply(p.val));
5479 dl 1.82 result = r;
5480     CountedCompleter<?> c;
5481     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5482 dl 1.102 @SuppressWarnings("unchecked") MapReduceValuesToDoubleTask<K,V>
5483 dl 1.82 t = (MapReduceValuesToDoubleTask<K,V>)c,
5484     s = t.rights;
5485     while (s != null) {
5486     t.result = reducer.apply(t.result, s.result);
5487     s = t.rights = s.nextRight;
5488     }
5489 dl 1.52 }
5490 dl 1.71 }
5491 dl 1.52 }
5492     }
5493    
5494 dl 1.102 @SuppressWarnings("serial")
5495     static final class MapReduceEntriesToDoubleTask<K,V>
5496     extends BulkTask<K,V,Double> {
5497 dl 1.52 final ObjectToDouble<Map.Entry<K,V>> transformer;
5498     final DoubleByDoubleToDouble reducer;
5499     final double basis;
5500     double result;
5501 dl 1.61 MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
5502 dl 1.52 MapReduceEntriesToDoubleTask
5503 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5504 dl 1.61 MapReduceEntriesToDoubleTask<K,V> nextRight,
5505 dl 1.52 ObjectToDouble<Map.Entry<K,V>> transformer,
5506     double basis,
5507     DoubleByDoubleToDouble reducer) {
5508 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5509 dl 1.52 this.transformer = transformer;
5510     this.basis = basis; this.reducer = reducer;
5511     }
5512 dl 1.79 public final Double getRawResult() { return result; }
5513 dl 1.102 public final void compute() {
5514 dl 1.82 final ObjectToDouble<Map.Entry<K,V>> transformer;
5515     final DoubleByDoubleToDouble reducer;
5516     if ((transformer = this.transformer) != null &&
5517     (reducer = this.reducer) != null) {
5518     double r = this.basis;
5519 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5520     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5521     addToPendingCount(1);
5522 dl 1.82 (rights = new MapReduceEntriesToDoubleTask<K,V>
5523 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5524     rights, transformer, r, reducer)).fork();
5525     }
5526     for (Node<K,V> p; (p = advance()) != null; )
5527     r = reducer.apply(r, transformer.apply(p));
5528 dl 1.82 result = r;
5529     CountedCompleter<?> c;
5530     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5531 dl 1.102 @SuppressWarnings("unchecked") MapReduceEntriesToDoubleTask<K,V>
5532 dl 1.82 t = (MapReduceEntriesToDoubleTask<K,V>)c,
5533     s = t.rights;
5534     while (s != null) {
5535     t.result = reducer.apply(t.result, s.result);
5536     s = t.rights = s.nextRight;
5537     }
5538 dl 1.52 }
5539 dl 1.71 }
5540 dl 1.52 }
5541     }
5542    
5543 dl 1.102 @SuppressWarnings("serial")
5544     static final class MapReduceMappingsToDoubleTask<K,V>
5545     extends BulkTask<K,V,Double> {
5546 dl 1.52 final ObjectByObjectToDouble<? super K, ? super V> transformer;
5547     final DoubleByDoubleToDouble reducer;
5548     final double basis;
5549     double result;
5550 dl 1.61 MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
5551 dl 1.52 MapReduceMappingsToDoubleTask
5552 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5553 dl 1.61 MapReduceMappingsToDoubleTask<K,V> nextRight,
5554 dl 1.52 ObjectByObjectToDouble<? super K, ? super V> transformer,
5555     double basis,
5556     DoubleByDoubleToDouble reducer) {
5557 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5558 dl 1.52 this.transformer = transformer;
5559     this.basis = basis; this.reducer = reducer;
5560     }
5561 dl 1.79 public final Double getRawResult() { return result; }
5562 dl 1.102 public final void compute() {
5563 dl 1.82 final ObjectByObjectToDouble<? super K, ? super V> transformer;
5564     final DoubleByDoubleToDouble reducer;
5565     if ((transformer = this.transformer) != null &&
5566     (reducer = this.reducer) != null) {
5567     double r = this.basis;
5568 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5569     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5570     addToPendingCount(1);
5571 dl 1.82 (rights = new MapReduceMappingsToDoubleTask<K,V>
5572 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5573     rights, transformer, r, reducer)).fork();
5574     }
5575     for (Node<K,V> p; (p = advance()) != null; )
5576     r = reducer.apply(r, transformer.apply(p.key, p.val));
5577 dl 1.82 result = r;
5578     CountedCompleter<?> c;
5579     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5580 dl 1.102 @SuppressWarnings("unchecked") MapReduceMappingsToDoubleTask<K,V>
5581 dl 1.82 t = (MapReduceMappingsToDoubleTask<K,V>)c,
5582     s = t.rights;
5583     while (s != null) {
5584     t.result = reducer.apply(t.result, s.result);
5585     s = t.rights = s.nextRight;
5586     }
5587 dl 1.52 }
5588 dl 1.71 }
5589 dl 1.52 }
5590     }
5591    
5592 dl 1.102 @SuppressWarnings("serial")
5593     static final class MapReduceKeysToLongTask<K,V>
5594     extends BulkTask<K,V,Long> {
5595 dl 1.52 final ObjectToLong<? super K> transformer;
5596     final LongByLongToLong reducer;
5597     final long basis;
5598     long result;
5599 dl 1.61 MapReduceKeysToLongTask<K,V> rights, nextRight;
5600 dl 1.52 MapReduceKeysToLongTask
5601 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5602 dl 1.61 MapReduceKeysToLongTask<K,V> nextRight,
5603 dl 1.52 ObjectToLong<? super K> transformer,
5604     long basis,
5605     LongByLongToLong reducer) {
5606 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5607 dl 1.52 this.transformer = transformer;
5608     this.basis = basis; this.reducer = reducer;
5609     }
5610 dl 1.79 public final Long getRawResult() { return result; }
5611 dl 1.102 public final void compute() {
5612 dl 1.82 final ObjectToLong<? super K> transformer;
5613     final LongByLongToLong reducer;
5614     if ((transformer = this.transformer) != null &&
5615     (reducer = this.reducer) != null) {
5616     long r = this.basis;
5617 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5618     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5619     addToPendingCount(1);
5620 dl 1.82 (rights = new MapReduceKeysToLongTask<K,V>
5621 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5622     rights, transformer, r, reducer)).fork();
5623     }
5624     for (Node<K,V> p; (p = advance()) != null; )
5625     r = reducer.apply(r, transformer.apply(p.key));
5626 dl 1.82 result = r;
5627     CountedCompleter<?> c;
5628     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5629 dl 1.102 @SuppressWarnings("unchecked") MapReduceKeysToLongTask<K,V>
5630 dl 1.82 t = (MapReduceKeysToLongTask<K,V>)c,
5631     s = t.rights;
5632     while (s != null) {
5633     t.result = reducer.apply(t.result, s.result);
5634     s = t.rights = s.nextRight;
5635     }
5636 dl 1.52 }
5637 dl 1.71 }
5638 dl 1.52 }
5639     }
5640    
5641 dl 1.102 @SuppressWarnings("serial")
5642     static final class MapReduceValuesToLongTask<K,V>
5643     extends BulkTask<K,V,Long> {
5644 dl 1.52 final ObjectToLong<? super V> transformer;
5645     final LongByLongToLong reducer;
5646     final long basis;
5647     long result;
5648 dl 1.61 MapReduceValuesToLongTask<K,V> rights, nextRight;
5649 dl 1.52 MapReduceValuesToLongTask
5650 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5651 dl 1.61 MapReduceValuesToLongTask<K,V> nextRight,
5652 dl 1.52 ObjectToLong<? super V> transformer,
5653     long basis,
5654     LongByLongToLong reducer) {
5655 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5656 dl 1.52 this.transformer = transformer;
5657     this.basis = basis; this.reducer = reducer;
5658     }
5659 dl 1.79 public final Long getRawResult() { return result; }
5660 dl 1.102 public final void compute() {
5661 dl 1.82 final ObjectToLong<? super V> transformer;
5662     final LongByLongToLong reducer;
5663     if ((transformer = this.transformer) != null &&
5664     (reducer = this.reducer) != null) {
5665     long r = this.basis;
5666 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5667     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5668     addToPendingCount(1);
5669 dl 1.82 (rights = new MapReduceValuesToLongTask<K,V>
5670 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5671     rights, transformer, r, reducer)).fork();
5672     }
5673     for (Node<K,V> p; (p = advance()) != null; )
5674     r = reducer.apply(r, transformer.apply(p.val));
5675 dl 1.82 result = r;
5676     CountedCompleter<?> c;
5677     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5678 dl 1.102 @SuppressWarnings("unchecked") MapReduceValuesToLongTask<K,V>
5679 dl 1.82 t = (MapReduceValuesToLongTask<K,V>)c,
5680     s = t.rights;
5681     while (s != null) {
5682     t.result = reducer.apply(t.result, s.result);
5683     s = t.rights = s.nextRight;
5684     }
5685 dl 1.52 }
5686 dl 1.71 }
5687 dl 1.52 }
5688     }
5689    
5690 dl 1.102 @SuppressWarnings("serial")
5691     static final class MapReduceEntriesToLongTask<K,V>
5692     extends BulkTask<K,V,Long> {
5693 dl 1.52 final ObjectToLong<Map.Entry<K,V>> transformer;
5694     final LongByLongToLong reducer;
5695     final long basis;
5696     long result;
5697 dl 1.61 MapReduceEntriesToLongTask<K,V> rights, nextRight;
5698 dl 1.52 MapReduceEntriesToLongTask
5699 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5700 dl 1.61 MapReduceEntriesToLongTask<K,V> nextRight,
5701 dl 1.52 ObjectToLong<Map.Entry<K,V>> transformer,
5702     long basis,
5703     LongByLongToLong reducer) {
5704 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5705 dl 1.52 this.transformer = transformer;
5706     this.basis = basis; this.reducer = reducer;
5707     }
5708 dl 1.79 public final Long getRawResult() { return result; }
5709 dl 1.102 public final void compute() {
5710 dl 1.82 final ObjectToLong<Map.Entry<K,V>> transformer;
5711     final LongByLongToLong reducer;
5712     if ((transformer = this.transformer) != null &&
5713     (reducer = this.reducer) != null) {
5714     long r = this.basis;
5715 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5716     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5717     addToPendingCount(1);
5718 dl 1.82 (rights = new MapReduceEntriesToLongTask<K,V>
5719 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5720     rights, transformer, r, reducer)).fork();
5721     }
5722     for (Node<K,V> p; (p = advance()) != null; )
5723     r = reducer.apply(r, transformer.apply(p));
5724 dl 1.82 result = r;
5725     CountedCompleter<?> c;
5726     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5727 dl 1.102 @SuppressWarnings("unchecked") MapReduceEntriesToLongTask<K,V>
5728 dl 1.82 t = (MapReduceEntriesToLongTask<K,V>)c,
5729     s = t.rights;
5730     while (s != null) {
5731     t.result = reducer.apply(t.result, s.result);
5732     s = t.rights = s.nextRight;
5733     }
5734 dl 1.52 }
5735     }
5736     }
5737     }
5738    
5739 dl 1.102 @SuppressWarnings("serial")
5740     static final class MapReduceMappingsToLongTask<K,V>
5741     extends BulkTask<K,V,Long> {
5742 dl 1.52 final ObjectByObjectToLong<? super K, ? super V> transformer;
5743     final LongByLongToLong reducer;
5744     final long basis;
5745     long result;
5746 dl 1.61 MapReduceMappingsToLongTask<K,V> rights, nextRight;
5747 dl 1.52 MapReduceMappingsToLongTask
5748 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5749 dl 1.61 MapReduceMappingsToLongTask<K,V> nextRight,
5750 dl 1.52 ObjectByObjectToLong<? super K, ? super V> transformer,
5751     long basis,
5752     LongByLongToLong reducer) {
5753 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5754 dl 1.52 this.transformer = transformer;
5755     this.basis = basis; this.reducer = reducer;
5756     }
5757 dl 1.79 public final Long getRawResult() { return result; }
5758 dl 1.102 public final void compute() {
5759 dl 1.82 final ObjectByObjectToLong<? super K, ? super V> transformer;
5760     final LongByLongToLong reducer;
5761     if ((transformer = this.transformer) != null &&
5762     (reducer = this.reducer) != null) {
5763     long r = this.basis;
5764 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5765     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5766     addToPendingCount(1);
5767 dl 1.82 (rights = new MapReduceMappingsToLongTask<K,V>
5768 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5769     rights, transformer, r, reducer)).fork();
5770     }
5771     for (Node<K,V> p; (p = advance()) != null; )
5772     r = reducer.apply(r, transformer.apply(p.key, p.val));
5773 dl 1.82 result = r;
5774     CountedCompleter<?> c;
5775     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5776 dl 1.102 @SuppressWarnings("unchecked") MapReduceMappingsToLongTask<K,V>
5777 dl 1.82 t = (MapReduceMappingsToLongTask<K,V>)c,
5778     s = t.rights;
5779     while (s != null) {
5780     t.result = reducer.apply(t.result, s.result);
5781     s = t.rights = s.nextRight;
5782     }
5783 dl 1.52 }
5784 dl 1.71 }
5785 dl 1.52 }
5786     }
5787    
5788 dl 1.102 @SuppressWarnings("serial")
5789     static final class MapReduceKeysToIntTask<K,V>
5790     extends BulkTask<K,V,Integer> {
5791 dl 1.52 final ObjectToInt<? super K> transformer;
5792     final IntByIntToInt reducer;
5793     final int basis;
5794     int result;
5795 dl 1.61 MapReduceKeysToIntTask<K,V> rights, nextRight;
5796 dl 1.52 MapReduceKeysToIntTask
5797 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5798 dl 1.61 MapReduceKeysToIntTask<K,V> nextRight,
5799 dl 1.52 ObjectToInt<? super K> transformer,
5800     int basis,
5801     IntByIntToInt reducer) {
5802 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5803 dl 1.52 this.transformer = transformer;
5804     this.basis = basis; this.reducer = reducer;
5805     }
5806 dl 1.79 public final Integer getRawResult() { return result; }
5807 dl 1.102 public final void compute() {
5808 dl 1.82 final ObjectToInt<? super K> transformer;
5809     final IntByIntToInt reducer;
5810     if ((transformer = this.transformer) != null &&
5811     (reducer = this.reducer) != null) {
5812     int r = this.basis;
5813 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5814     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5815     addToPendingCount(1);
5816 dl 1.82 (rights = new MapReduceKeysToIntTask<K,V>
5817 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5818     rights, transformer, r, reducer)).fork();
5819     }
5820     for (Node<K,V> p; (p = advance()) != null; )
5821     r = reducer.apply(r, transformer.apply(p.key));
5822 dl 1.82 result = r;
5823     CountedCompleter<?> c;
5824     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5825 dl 1.102 @SuppressWarnings("unchecked") MapReduceKeysToIntTask<K,V>
5826 dl 1.82 t = (MapReduceKeysToIntTask<K,V>)c,
5827     s = t.rights;
5828     while (s != null) {
5829     t.result = reducer.apply(t.result, s.result);
5830     s = t.rights = s.nextRight;
5831     }
5832 dl 1.52 }
5833 dl 1.71 }
5834 dl 1.52 }
5835     }
5836    
5837 dl 1.102 @SuppressWarnings("serial")
5838     static final class MapReduceValuesToIntTask<K,V>
5839     extends BulkTask<K,V,Integer> {
5840 dl 1.52 final ObjectToInt<? super V> transformer;
5841     final IntByIntToInt reducer;
5842     final int basis;
5843     int result;
5844 dl 1.61 MapReduceValuesToIntTask<K,V> rights, nextRight;
5845 dl 1.52 MapReduceValuesToIntTask
5846 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5847 dl 1.61 MapReduceValuesToIntTask<K,V> nextRight,
5848 dl 1.52 ObjectToInt<? super V> transformer,
5849     int basis,
5850     IntByIntToInt reducer) {
5851 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5852 dl 1.52 this.transformer = transformer;
5853     this.basis = basis; this.reducer = reducer;
5854     }
5855 dl 1.79 public final Integer getRawResult() { return result; }
5856 dl 1.102 public final void compute() {
5857 dl 1.82 final ObjectToInt<? super V> transformer;
5858     final IntByIntToInt reducer;
5859     if ((transformer = this.transformer) != null &&
5860     (reducer = this.reducer) != null) {
5861     int r = this.basis;
5862 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5863     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5864     addToPendingCount(1);
5865 dl 1.82 (rights = new MapReduceValuesToIntTask<K,V>
5866 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5867     rights, transformer, r, reducer)).fork();
5868     }
5869     for (Node<K,V> p; (p = advance()) != null; )
5870     r = reducer.apply(r, transformer.apply(p.val));
5871 dl 1.82 result = r;
5872     CountedCompleter<?> c;
5873     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5874 dl 1.102 @SuppressWarnings("unchecked") MapReduceValuesToIntTask<K,V>
5875 dl 1.82 t = (MapReduceValuesToIntTask<K,V>)c,
5876     s = t.rights;
5877     while (s != null) {
5878     t.result = reducer.apply(t.result, s.result);
5879     s = t.rights = s.nextRight;
5880     }
5881 dl 1.52 }
5882 dl 1.71 }
5883 dl 1.52 }
5884     }
5885    
5886 dl 1.102 @SuppressWarnings("serial")
5887     static final class MapReduceEntriesToIntTask<K,V>
5888     extends BulkTask<K,V,Integer> {
5889 dl 1.52 final ObjectToInt<Map.Entry<K,V>> transformer;
5890     final IntByIntToInt reducer;
5891     final int basis;
5892     int result;
5893 dl 1.61 MapReduceEntriesToIntTask<K,V> rights, nextRight;
5894 dl 1.52 MapReduceEntriesToIntTask
5895 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5896 dl 1.61 MapReduceEntriesToIntTask<K,V> nextRight,
5897 dl 1.52 ObjectToInt<Map.Entry<K,V>> transformer,
5898     int basis,
5899     IntByIntToInt reducer) {
5900 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5901 dl 1.52 this.transformer = transformer;
5902     this.basis = basis; this.reducer = reducer;
5903     }
5904 dl 1.79 public final Integer getRawResult() { return result; }
5905 dl 1.102 public final void compute() {
5906 dl 1.82 final ObjectToInt<Map.Entry<K,V>> transformer;
5907     final IntByIntToInt reducer;
5908     if ((transformer = this.transformer) != null &&
5909     (reducer = this.reducer) != null) {
5910     int r = this.basis;
5911 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5912     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5913     addToPendingCount(1);
5914 dl 1.82 (rights = new MapReduceEntriesToIntTask<K,V>
5915 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5916     rights, transformer, r, reducer)).fork();
5917     }
5918     for (Node<K,V> p; (p = advance()) != null; )
5919     r = reducer.apply(r, transformer.apply(p));
5920 dl 1.82 result = r;
5921     CountedCompleter<?> c;
5922     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5923 dl 1.102 @SuppressWarnings("unchecked") MapReduceEntriesToIntTask<K,V>
5924 dl 1.82 t = (MapReduceEntriesToIntTask<K,V>)c,
5925     s = t.rights;
5926     while (s != null) {
5927     t.result = reducer.apply(t.result, s.result);
5928     s = t.rights = s.nextRight;
5929     }
5930 dl 1.52 }
5931     }
5932     }
5933     }
5934    
5935 dl 1.102 @SuppressWarnings("serial")
5936     static final class MapReduceMappingsToIntTask<K,V>
5937     extends BulkTask<K,V,Integer> {
5938 dl 1.52 final ObjectByObjectToInt<? super K, ? super V> transformer;
5939     final IntByIntToInt reducer;
5940     final int basis;
5941     int result;
5942 dl 1.61 MapReduceMappingsToIntTask<K,V> rights, nextRight;
5943 dl 1.52 MapReduceMappingsToIntTask
5944 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5945 dl 1.79 MapReduceMappingsToIntTask<K,V> nextRight,
5946 dl 1.52 ObjectByObjectToInt<? super K, ? super V> transformer,
5947     int basis,
5948     IntByIntToInt reducer) {
5949 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5950 dl 1.52 this.transformer = transformer;
5951     this.basis = basis; this.reducer = reducer;
5952     }
5953 dl 1.79 public final Integer getRawResult() { return result; }
5954 dl 1.102 public final void compute() {
5955 dl 1.82 final ObjectByObjectToInt<? super K, ? super V> transformer;
5956     final IntByIntToInt reducer;
5957     if ((transformer = this.transformer) != null &&
5958     (reducer = this.reducer) != null) {
5959     int r = this.basis;
5960 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5961     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5962     addToPendingCount(1);
5963 dl 1.82 (rights = new MapReduceMappingsToIntTask<K,V>
5964 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5965     rights, transformer, r, reducer)).fork();
5966     }
5967     for (Node<K,V> p; (p = advance()) != null; )
5968     r = reducer.apply(r, transformer.apply(p.key, p.val));
5969 dl 1.82 result = r;
5970     CountedCompleter<?> c;
5971     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5972 dl 1.102 @SuppressWarnings("unchecked") MapReduceMappingsToIntTask<K,V>
5973 dl 1.82 t = (MapReduceMappingsToIntTask<K,V>)c,
5974     s = t.rights;
5975     while (s != null) {
5976     t.result = reducer.apply(t.result, s.result);
5977     s = t.rights = s.nextRight;
5978     }
5979 dl 1.52 }
5980     }
5981     }
5982     }
5983    
5984 dl 1.102 /* ---------------- Counters -------------- */
5985    
5986     // Adapted from LongAdder and Striped64.
5987     // See their internal docs for explanation.
5988    
5989     // A padded cell for distributing counts
5990     static final class CounterCell {
5991     volatile long p0, p1, p2, p3, p4, p5, p6;
5992     volatile long value;
5993     volatile long q0, q1, q2, q3, q4, q5, q6;
5994     CounterCell(long x) { value = x; }
5995     }
5996    
5997     /**
5998     * Holder for the thread-local hash code determining which
5999     * CounterCell to use. The code is initialized via the
6000     * counterHashCodeGenerator, but may be moved upon collisions.
6001     */
6002     static final class CounterHashCode {
6003     int code;
6004     }
6005    
6006     /**
6007 jsr166 1.105 * Generates initial value for per-thread CounterHashCodes.
6008 dl 1.102 */
6009     static final AtomicInteger counterHashCodeGenerator = new AtomicInteger();
6010    
6011     /**
6012     * Increment for counterHashCodeGenerator. See class ThreadLocal
6013     * for explanation.
6014     */
6015     static final int SEED_INCREMENT = 0x61c88647;
6016    
6017     /**
6018     * Per-thread counter hash codes. Shared across all instances.
6019     */
6020     static final ThreadLocal<CounterHashCode> threadCounterHashCode =
6021     new ThreadLocal<CounterHashCode>();
6022    
6023    
6024     final long sumCount() {
6025     CounterCell[] as = counterCells; CounterCell a;
6026     long sum = baseCount;
6027     if (as != null) {
6028     for (int i = 0; i < as.length; ++i) {
6029     if ((a = as[i]) != null)
6030     sum += a.value;
6031     }
6032     }
6033     return sum;
6034     }
6035    
6036     // See LongAdder version for explanation
6037     private final void fullAddCount(long x, CounterHashCode hc,
6038     boolean wasUncontended) {
6039     int h;
6040     if (hc == null) {
6041     hc = new CounterHashCode();
6042     int s = counterHashCodeGenerator.addAndGet(SEED_INCREMENT);
6043     h = hc.code = (s == 0) ? 1 : s; // Avoid zero
6044     threadCounterHashCode.set(hc);
6045     }
6046     else
6047     h = hc.code;
6048     boolean collide = false; // True if last slot nonempty
6049     for (;;) {
6050     CounterCell[] as; CounterCell a; int n; long v;
6051     if ((as = counterCells) != null && (n = as.length) > 0) {
6052     if ((a = as[(n - 1) & h]) == null) {
6053     if (cellsBusy == 0) { // Try to attach new Cell
6054     CounterCell r = new CounterCell(x); // Optimistic create
6055     if (cellsBusy == 0 &&
6056     U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
6057     boolean created = false;
6058     try { // Recheck under lock
6059     CounterCell[] rs; int m, j;
6060     if ((rs = counterCells) != null &&
6061     (m = rs.length) > 0 &&
6062     rs[j = (m - 1) & h] == null) {
6063     rs[j] = r;
6064     created = true;
6065     }
6066     } finally {
6067     cellsBusy = 0;
6068     }
6069     if (created)
6070     break;
6071     continue; // Slot is now non-empty
6072     }
6073     }
6074     collide = false;
6075     }
6076     else if (!wasUncontended) // CAS already known to fail
6077     wasUncontended = true; // Continue after rehash
6078     else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))
6079     break;
6080     else if (counterCells != as || n >= NCPU)
6081     collide = false; // At max size or stale
6082     else if (!collide)
6083     collide = true;
6084     else if (cellsBusy == 0 &&
6085     U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
6086     try {
6087     if (counterCells == as) {// Expand table unless stale
6088     CounterCell[] rs = new CounterCell[n << 1];
6089     for (int i = 0; i < n; ++i)
6090     rs[i] = as[i];
6091     counterCells = rs;
6092     }
6093     } finally {
6094     cellsBusy = 0;
6095     }
6096     collide = false;
6097     continue; // Retry with expanded table
6098     }
6099     h ^= h << 13; // Rehash
6100     h ^= h >>> 17;
6101     h ^= h << 5;
6102     }
6103     else if (cellsBusy == 0 && counterCells == as &&
6104     U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
6105     boolean init = false;
6106     try { // Initialize table
6107     if (counterCells == as) {
6108     CounterCell[] rs = new CounterCell[2];
6109     rs[h & 1] = new CounterCell(x);
6110     counterCells = rs;
6111     init = true;
6112     }
6113     } finally {
6114     cellsBusy = 0;
6115     }
6116     if (init)
6117     break;
6118     }
6119     else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x))
6120     break; // Fall back on using base
6121     }
6122     hc.code = h; // Record index for next time
6123     }
6124    
6125 dl 1.52 // Unsafe mechanics
6126 dl 1.82 private static final sun.misc.Unsafe U;
6127     private static final long SIZECTL;
6128     private static final long TRANSFERINDEX;
6129     private static final long TRANSFERORIGIN;
6130     private static final long BASECOUNT;
6131 dl 1.102 private static final long CELLSBUSY;
6132 dl 1.82 private static final long CELLVALUE;
6133 dl 1.52 private static final long ABASE;
6134     private static final int ASHIFT;
6135    
6136     static {
6137     try {
6138 dl 1.82 U = getUnsafe();
6139 dl 1.52 Class<?> k = ConcurrentHashMapV8.class;
6140 dl 1.82 SIZECTL = U.objectFieldOffset
6141 dl 1.52 (k.getDeclaredField("sizeCtl"));
6142 dl 1.82 TRANSFERINDEX = U.objectFieldOffset
6143     (k.getDeclaredField("transferIndex"));
6144     TRANSFERORIGIN = U.objectFieldOffset
6145     (k.getDeclaredField("transferOrigin"));
6146     BASECOUNT = U.objectFieldOffset
6147     (k.getDeclaredField("baseCount"));
6148 dl 1.102 CELLSBUSY = U.objectFieldOffset
6149     (k.getDeclaredField("cellsBusy"));
6150 dl 1.82 Class<?> ck = CounterCell.class;
6151     CELLVALUE = U.objectFieldOffset
6152     (ck.getDeclaredField("value"));
6153 jsr166 1.101 Class<?> ak = Node[].class;
6154     ABASE = U.arrayBaseOffset(ak);
6155     int scale = U.arrayIndexScale(ak);
6156 jsr166 1.89 if ((scale & (scale - 1)) != 0)
6157     throw new Error("data type scale not a power of two");
6158     ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
6159 dl 1.52 } catch (Exception e) {
6160     throw new Error(e);
6161     }
6162     }
6163    
6164     /**
6165     * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
6166     * Replace with a simple call to Unsafe.getUnsafe when integrating
6167     * into a jdk.
6168     *
6169     * @return a sun.misc.Unsafe
6170     */
6171     private static sun.misc.Unsafe getUnsafe() {
6172     try {
6173     return sun.misc.Unsafe.getUnsafe();
6174 jsr166 1.87 } catch (SecurityException tryReflectionInstead) {}
6175     try {
6176     return java.security.AccessController.doPrivileged
6177     (new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
6178     public sun.misc.Unsafe run() throws Exception {
6179     Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
6180     for (java.lang.reflect.Field f : k.getDeclaredFields()) {
6181     f.setAccessible(true);
6182     Object x = f.get(null);
6183     if (k.isInstance(x))
6184     return k.cast(x);
6185     }
6186     throw new NoSuchFieldError("the Unsafe");
6187     }});
6188     } catch (java.security.PrivilegedActionException e) {
6189     throw new RuntimeException("Could not initialize intrinsics",
6190     e.getCause());
6191 dl 1.52 }
6192     }
6193 dl 1.1 }