ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/jsr166e/ConcurrentHashMapV8.java
Revision: 1.104
Committed: Wed Jun 19 17:00:58 2013 UTC (10 years, 10 months ago) by jsr166
Branch: MAIN
Changes since 1.103: +4 -4 lines
Log Message:
coding style

File Contents

# User Rev Content
1 dl 1.1 /*
2     * Written by Doug Lea with assistance from members of JCP JSR-166
3     * Expert Group and released to the public domain, as explained at
4     * http://creativecommons.org/publicdomain/zero/1.0/
5     */
6    
7     package jsr166e;
8 dl 1.71
9 dl 1.102 import jsr166e.ForkJoinPool;
10    
11     import java.io.ObjectStreamField;
12     import java.io.Serializable;
13     import java.lang.reflect.ParameterizedType;
14     import java.lang.reflect.Type;
15 dl 1.24 import java.util.Arrays;
16 dl 1.1 import java.util.Collection;
17 dl 1.102 import java.util.Comparator;
18     import java.util.ConcurrentModificationException;
19     import java.util.Enumeration;
20     import java.util.HashMap;
21 dl 1.1 import java.util.Hashtable;
22     import java.util.Iterator;
23 dl 1.102 import java.util.Map;
24 dl 1.1 import java.util.NoSuchElementException;
25 dl 1.102 import java.util.Set;
26 dl 1.1 import java.util.concurrent.ConcurrentMap;
27 dl 1.102 import java.util.concurrent.atomic.AtomicReference;
28 dl 1.82 import java.util.concurrent.atomic.AtomicInteger;
29 dl 1.102 import java.util.concurrent.locks.LockSupport;
30     import java.util.concurrent.locks.ReentrantLock;
31 dl 1.79
32 dl 1.1 /**
33     * A hash table supporting full concurrency of retrievals and
34     * high expected concurrency for updates. This class obeys the
35     * same functional specification as {@link java.util.Hashtable}, and
36     * includes versions of methods corresponding to each method of
37     * {@code Hashtable}. However, even though all operations are
38     * thread-safe, retrieval operations do <em>not</em> entail locking,
39     * and there is <em>not</em> any support for locking the entire table
40     * in a way that prevents all access. This class is fully
41     * interoperable with {@code Hashtable} in programs that rely on its
42     * thread safety but not on its synchronization details.
43     *
44 jsr166 1.78 * <p>Retrieval operations (including {@code get}) generally do not
45 dl 1.1 * block, so may overlap with update operations (including {@code put}
46     * and {@code remove}). Retrievals reflect the results of the most
47     * recently <em>completed</em> update operations holding upon their
48 dl 1.59 * onset. (More formally, an update operation for a given key bears a
49     * <em>happens-before</em> relation with any (non-null) retrieval for
50     * that key reporting the updated value.) For aggregate operations
51     * such as {@code putAll} and {@code clear}, concurrent retrievals may
52     * reflect insertion or removal of only some entries. Similarly,
53     * Iterators and Enumerations return elements reflecting the state of
54     * the hash table at some point at or since the creation of the
55     * iterator/enumeration. They do <em>not</em> throw {@link
56     * ConcurrentModificationException}. However, iterators are designed
57     * to be used by only one thread at a time. Bear in mind that the
58     * results of aggregate status methods including {@code size}, {@code
59     * isEmpty}, and {@code containsValue} are typically useful only when
60     * a map is not undergoing concurrent updates in other threads.
61     * Otherwise the results of these methods reflect transient states
62     * that may be adequate for monitoring or estimation purposes, but not
63     * for program control.
64 dl 1.1 *
65 jsr166 1.78 * <p>The table is dynamically expanded when there are too many
66 dl 1.16 * collisions (i.e., keys that have distinct hash codes but fall into
67     * the same slot modulo the table size), with the expected average
68 dl 1.24 * effect of maintaining roughly two bins per mapping (corresponding
69     * to a 0.75 load factor threshold for resizing). There may be much
70     * variance around this average as mappings are added and removed, but
71     * overall, this maintains a commonly accepted time/space tradeoff for
72     * hash tables. However, resizing this or any other kind of hash
73     * table may be a relatively slow operation. When possible, it is a
74     * good idea to provide a size estimate as an optional {@code
75 dl 1.16 * initialCapacity} constructor argument. An additional optional
76     * {@code loadFactor} constructor argument provides a further means of
77     * customizing initial table capacity by specifying the table density
78     * to be used in calculating the amount of space to allocate for the
79     * given number of elements. Also, for compatibility with previous
80     * versions of this class, constructors may optionally specify an
81     * expected {@code concurrencyLevel} as an additional hint for
82     * internal sizing. Note that using many keys with exactly the same
83 jsr166 1.31 * {@code hashCode()} is a sure way to slow down performance of any
84 dl 1.102 * hash table. To ameliorate impact, when keys are {@link Comparable},
85     * this class may use comparison order among keys to help break ties.
86 dl 1.1 *
87 jsr166 1.78 * <p>A {@link Set} projection of a ConcurrentHashMapV8 may be created
88 dl 1.70 * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
89     * (using {@link #keySet(Object)} when only keys are of interest, and the
90     * mapped values are (perhaps transiently) not used or all take the
91     * same mapping value.
92     *
93 dl 1.1 * <p>This class and its views and iterators implement all of the
94     * <em>optional</em> methods of the {@link Map} and {@link Iterator}
95     * interfaces.
96     *
97 jsr166 1.78 * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
98 dl 1.1 * does <em>not</em> allow {@code null} to be used as a key or value.
99     *
100 dl 1.102 * <p>ConcurrentHashMapV8s support a set of sequential and parallel bulk
101     * operations that are designed
102     * to be safely, and often sensibly, applied even with maps that are
103     * being concurrently updated by other threads; for example, when
104     * computing a snapshot summary of the values in a shared registry.
105     * There are three kinds of operation, each with four forms, accepting
106     * functions with Keys, Values, Entries, and (Key, Value) arguments
107     * and/or return values. Because the elements of a ConcurrentHashMapV8
108     * are not ordered in any particular way, and may be processed in
109     * different orders in different parallel executions, the correctness
110     * of supplied functions should not depend on any ordering, or on any
111     * other objects or values that may transiently change while
112     * computation is in progress; and except for forEach actions, should
113     * ideally be side-effect-free. Bulk operations on {@link java.util.Map.Entry}
114     * objects do not support method {@code setValue}.
115 dl 1.70 *
116     * <ul>
117     * <li> forEach: Perform a given action on each element.
118     * A variant form applies a given transformation on each element
119     * before performing the action.</li>
120     *
121     * <li> search: Return the first available non-null result of
122     * applying a given function on each element; skipping further
123     * search when a result is found.</li>
124     *
125     * <li> reduce: Accumulate each element. The supplied reduction
126     * function cannot rely on ordering (more formally, it should be
127     * both associative and commutative). There are five variants:
128     *
129     * <ul>
130     *
131     * <li> Plain reductions. (There is not a form of this method for
132     * (key, value) function arguments since there is no corresponding
133     * return type.)</li>
134     *
135     * <li> Mapped reductions that accumulate the results of a given
136     * function applied to each element.</li>
137     *
138     * <li> Reductions to scalar doubles, longs, and ints, using a
139     * given basis value.</li>
140     *
141 dl 1.102 * </ul>
142 dl 1.70 * </li>
143     * </ul>
144 dl 1.102 *
145     * <p>These bulk operations accept a {@code parallelismThreshold}
146     * argument. Methods proceed sequentially if the current map size is
147     * estimated to be less than the given threshold. Using a value of
148     * {@code Long.MAX_VALUE} suppresses all parallelism. Using a value
149     * of {@code 1} results in maximal parallelism by partitioning into
150     * enough subtasks to fully utilize the {@link
151     * ForkJoinPool#commonPool()} that is used for all parallel
152     * computations. Normally, you would initially choose one of these
153     * extreme values, and then measure performance of using in-between
154     * values that trade off overhead versus throughput.
155 dl 1.70 *
156     * <p>The concurrency properties of bulk operations follow
157     * from those of ConcurrentHashMapV8: Any non-null result returned
158     * from {@code get(key)} and related access methods bears a
159     * happens-before relation with the associated insertion or
160     * update. The result of any bulk operation reflects the
161     * composition of these per-element relations (but is not
162     * necessarily atomic with respect to the map as a whole unless it
163     * is somehow known to be quiescent). Conversely, because keys
164     * and values in the map are never null, null serves as a reliable
165     * atomic indicator of the current lack of any result. To
166     * maintain this property, null serves as an implicit basis for
167     * all non-scalar reduction operations. For the double, long, and
168     * int versions, the basis should be one that, when combined with
169     * any other value, returns that other value (more formally, it
170     * should be the identity element for the reduction). Most common
171     * reductions have these properties; for example, computing a sum
172     * with basis 0 or a minimum with basis MAX_VALUE.
173     *
174     * <p>Search and transformation functions provided as arguments
175     * should similarly return null to indicate the lack of any result
176     * (in which case it is not used). In the case of mapped
177     * reductions, this also enables transformations to serve as
178     * filters, returning null (or, in the case of primitive
179     * specializations, the identity basis) if the element should not
180     * be combined. You can create compound transformations and
181     * filterings by composing them yourself under this "null means
182     * there is nothing there now" rule before using them in search or
183     * reduce operations.
184     *
185     * <p>Methods accepting and/or returning Entry arguments maintain
186     * key-value associations. They may be useful for example when
187     * finding the key for the greatest value. Note that "plain" Entry
188     * arguments can be supplied using {@code new
189     * AbstractMap.SimpleEntry(k,v)}.
190     *
191 jsr166 1.78 * <p>Bulk operations may complete abruptly, throwing an
192 dl 1.70 * exception encountered in the application of a supplied
193     * function. Bear in mind when handling such exceptions that other
194     * concurrently executing functions could also have thrown
195     * exceptions, or would have done so if the first exception had
196     * not occurred.
197     *
198 dl 1.84 * <p>Speedups for parallel compared to sequential forms are common
199     * but not guaranteed. Parallel operations involving brief functions
200     * on small maps may execute more slowly than sequential forms if the
201     * underlying work to parallelize the computation is more expensive
202     * than the computation itself. Similarly, parallelization may not
203     * lead to much actual parallelism if all processors are busy
204     * performing unrelated tasks.
205 dl 1.70 *
206 jsr166 1.78 * <p>All arguments to all task methods must be non-null.
207 dl 1.70 *
208     * <p><em>jsr166e note: During transition, this class
209     * uses nested functional interfaces with different names but the
210 jsr166 1.77 * same forms as those expected for JDK8.</em>
211 dl 1.70 *
212 dl 1.1 * <p>This class is a member of the
213     * <a href="{@docRoot}/../technotes/guides/collections/index.html">
214     * Java Collections Framework</a>.
215     *
216 jsr166 1.22 * @since 1.5
217 dl 1.1 * @author Doug Lea
218     * @param <K> the type of keys maintained by this map
219     * @param <V> the type of mapped values
220     */
221 jsr166 1.99 public class ConcurrentHashMapV8<K,V>
222     implements ConcurrentMap<K,V>, Serializable {
223 dl 1.1 private static final long serialVersionUID = 7249069246763182397L;
224    
225     /**
226 dl 1.102 * An object for traversing and partitioning elements of a source.
227     * This interface provides a subset of the functionality of JDK8
228     * java.util.Spliterator.
229     */
230     public static interface ConcurrentHashMapSpliterator<T> {
231 jsr166 1.103 /**
232 dl 1.102 * If possible, returns a new spliterator covering
233     * approximately one half of the elements, which will not be
234     * covered by this spliterator. Returns null if cannot be
235     * split.
236     */
237     ConcurrentHashMapSpliterator<T> trySplit();
238     /**
239     * Returns an estimate of the number of elements covered by
240     * this Spliterator.
241     */
242     long estimateSize();
243    
244     /** Applies the action to each untraversed element */
245     void forEachRemaining(Action<? super T> action);
246     /** If an element remains, applies the action and returns true. */
247     boolean tryAdvance(Action<? super T> action);
248 dl 1.41 }
249    
250 dl 1.102 // Sams
251     /** Interface describing a void action of one argument */
252     public interface Action<A> { void apply(A a); }
253     /** Interface describing a void action of two arguments */
254     public interface BiAction<A,B> { void apply(A a, B b); }
255     /** Interface describing a function of one argument */
256     public interface Fun<A,T> { T apply(A a); }
257     /** Interface describing a function of two arguments */
258     public interface BiFun<A,B,T> { T apply(A a, B b); }
259     /** Interface describing a function mapping its argument to a double */
260     public interface ObjectToDouble<A> { double apply(A a); }
261     /** Interface describing a function mapping its argument to a long */
262     public interface ObjectToLong<A> { long apply(A a); }
263     /** Interface describing a function mapping its argument to an int */
264     public interface ObjectToInt<A> {int apply(A a); }
265     /** Interface describing a function mapping two arguments to a double */
266     public interface ObjectByObjectToDouble<A,B> { double apply(A a, B b); }
267     /** Interface describing a function mapping two arguments to a long */
268     public interface ObjectByObjectToLong<A,B> { long apply(A a, B b); }
269     /** Interface describing a function mapping two arguments to an int */
270     public interface ObjectByObjectToInt<A,B> {int apply(A a, B b); }
271     /** Interface describing a function mapping two doubles to a double */
272     public interface DoubleByDoubleToDouble { double apply(double a, double b); }
273     /** Interface describing a function mapping two longs to a long */
274     public interface LongByLongToLong { long apply(long a, long b); }
275     /** Interface describing a function mapping two ints to an int */
276     public interface IntByIntToInt { int apply(int a, int b); }
277    
278 dl 1.1 /*
279     * Overview:
280     *
281     * The primary design goal of this hash table is to maintain
282     * concurrent readability (typically method get(), but also
283     * iterators and related methods) while minimizing update
284 dl 1.24 * contention. Secondary goals are to keep space consumption about
285     * the same or better than java.util.HashMap, and to support high
286     * initial insertion rates on an empty table by many threads.
287 dl 1.1 *
288 dl 1.102 * This map usually acts as a binned (bucketed) hash table. Each
289     * key-value mapping is held in a Node. Most nodes are instances
290     * of the basic Node class with hash, key, value, and next
291     * fields. However, various subclasses exist: TreeNodes are
292     * arranged in balanced trees, not lists. TreeBins hold the roots
293     * of sets of TreeNodes. ForwardingNodes are placed at the heads
294     * of bins during resizing. ReservationNodes are used as
295     * placeholders while establishing values in computeIfAbsent and
296     * related methods. The types TreeBin, ForwardingNode, and
297     * ReservationNode do not hold normal user keys, values, or
298     * hashes, and are readily distinguishable during search etc
299     * because they have negative hash fields and null key and value
300     * fields. (These special nodes are either uncommon or transient,
301     * so the impact of carrying around some unused fields is
302     * insignficant.)
303 dl 1.1 *
304     * The table is lazily initialized to a power-of-two size upon the
305 dl 1.38 * first insertion. Each bin in the table normally contains a
306     * list of Nodes (most often, the list has only zero or one Node).
307     * Table accesses require volatile/atomic reads, writes, and
308     * CASes. Because there is no other way to arrange this without
309     * adding further indirections, we use intrinsics
310 dl 1.102 * (sun.misc.Unsafe) operations.
311 dl 1.24 *
312 dl 1.82 * We use the top (sign) bit of Node hash fields for control
313     * purposes -- it is available anyway because of addressing
314 dl 1.102 * constraints. Nodes with negative hash fields are specially
315     * handled or ignored in map methods.
316 dl 1.14 *
317 dl 1.27 * Insertion (via put or its variants) of the first node in an
318 dl 1.14 * empty bin is performed by just CASing it to the bin. This is
319 dl 1.38 * by far the most common case for put operations under most
320     * key/hash distributions. Other update operations (insert,
321     * delete, and replace) require locks. We do not want to waste
322     * the space required to associate a distinct lock object with
323     * each bin, so instead use the first node of a bin list itself as
324 dl 1.82 * a lock. Locking support for these locks relies on builtin
325     * "synchronized" monitors.
326 dl 1.24 *
327     * Using the first node of a list as a lock does not by itself
328     * suffice though: When a node is locked, any update must first
329     * validate that it is still the first node after locking it, and
330     * retry if not. Because new nodes are always appended to lists,
331     * once a node is first in a bin, it remains first until deleted
332 dl 1.102 * or the bin becomes invalidated (upon resizing).
333 dl 1.14 *
334 dl 1.24 * The main disadvantage of per-bin locks is that other update
335 dl 1.14 * operations on other nodes in a bin list protected by the same
336     * lock can stall, for example when user equals() or mapping
337 dl 1.38 * functions take a long time. However, statistically, under
338     * random hash codes, this is not a common problem. Ideally, the
339     * frequency of nodes in bins follows a Poisson distribution
340 dl 1.14 * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
341 dl 1.16 * parameter of about 0.5 on average, given the resizing threshold
342     * of 0.75, although with a large variance because of resizing
343     * granularity. Ignoring variance, the expected occurrences of
344     * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
345 dl 1.38 * first values are:
346 dl 1.16 *
347 dl 1.38 * 0: 0.60653066
348     * 1: 0.30326533
349     * 2: 0.07581633
350     * 3: 0.01263606
351     * 4: 0.00157952
352     * 5: 0.00015795
353     * 6: 0.00001316
354     * 7: 0.00000094
355     * 8: 0.00000006
356     * more: less than 1 in ten million
357 dl 1.16 *
358     * Lock contention probability for two threads accessing distinct
359 dl 1.38 * elements is roughly 1 / (8 * #elements) under random hashes.
360     *
361     * Actual hash code distributions encountered in practice
362     * sometimes deviate significantly from uniform randomness. This
363     * includes the case when N > (1<<30), so some keys MUST collide.
364     * Similarly for dumb or hostile usages in which multiple keys are
365 dl 1.102 * designed to have identical hash codes or ones that differs only
366     * in masked-out high bits. So we use a secondary strategy that
367     * applies when the number of nodes in a bin exceeds a
368     * threshold. These TreeBins use a balanced tree to hold nodes (a
369     * specialized form of red-black trees), bounding search time to
370     * O(log N). Each search step in a TreeBin is at least twice as
371 dl 1.38 * slow as in a regular list, but given that N cannot exceed
372     * (1<<64) (before running out of addresses) this bounds search
373     * steps, lock hold times, etc, to reasonable constants (roughly
374     * 100 nodes inspected per operation worst case) so long as keys
375     * are Comparable (which is very common -- String, Long, etc).
376     * TreeBin nodes (TreeNodes) also maintain the same "next"
377     * traversal pointers as regular nodes, so can be traversed in
378     * iterators in the same way.
379 dl 1.1 *
380 dl 1.38 * The table is resized when occupancy exceeds a percentage
381 dl 1.82 * threshold (nominally, 0.75, but see below). Any thread
382     * noticing an overfull bin may assist in resizing after the
383     * initiating thread allocates and sets up the replacement
384     * array. However, rather than stalling, these other threads may
385     * proceed with insertions etc. The use of TreeBins shields us
386     * from the worst case effects of overfilling while resizes are in
387     * progress. Resizing proceeds by transferring bins, one by one,
388     * from the table to the next table. To enable concurrency, the
389     * next table must be (incrementally) prefilled with place-holders
390     * serving as reverse forwarders to the old table. Because we are
391     * using power-of-two expansion, the elements from each bin must
392     * either stay at same index, or move with a power of two
393     * offset. We eliminate unnecessary node creation by catching
394     * cases where old nodes can be reused because their next fields
395     * won't change. On average, only about one-sixth of them need
396     * cloning when a table doubles. The nodes they replace will be
397     * garbage collectable as soon as they are no longer referenced by
398     * any reader thread that may be in the midst of concurrently
399     * traversing table. Upon transfer, the old table bin contains
400     * only a special forwarding node (with hash field "MOVED") that
401     * contains the next table as its key. On encountering a
402     * forwarding node, access and update operations restart, using
403     * the new table.
404     *
405     * Each bin transfer requires its bin lock, which can stall
406     * waiting for locks while resizing. However, because other
407     * threads can join in and help resize rather than contend for
408     * locks, average aggregate waits become shorter as resizing
409     * progresses. The transfer operation must also ensure that all
410     * accessible bins in both the old and new table are usable by any
411     * traversal. This is arranged by proceeding from the last bin
412     * (table.length - 1) up towards the first. Upon seeing a
413     * forwarding node, traversals (see class Traverser) arrange to
414     * move to the new table without revisiting nodes. However, to
415     * ensure that no intervening nodes are skipped, bin splitting can
416     * only begin after the associated reverse-forwarders are in
417     * place.
418 dl 1.14 *
419 dl 1.24 * The traversal scheme also applies to partial traversals of
420 dl 1.52 * ranges of bins (via an alternate Traverser constructor)
421 dl 1.41 * to support partitioned aggregate operations. Also, read-only
422     * operations give up if ever forwarded to a null table, which
423     * provides support for shutdown-style clearing, which is also not
424     * currently implemented.
425 dl 1.14 *
426     * Lazy table initialization minimizes footprint until first use,
427     * and also avoids resizings when the first operation is from a
428     * putAll, constructor with map argument, or deserialization.
429 dl 1.24 * These cases attempt to override the initial capacity settings,
430     * but harmlessly fail to take effect in cases of races.
431 dl 1.1 *
432 dl 1.82 * The element count is maintained using a specialization of
433     * LongAdder. We need to incorporate a specialization rather than
434     * just use a LongAdder in order to access implicit
435     * contention-sensing that leads to creation of multiple
436     * CounterCells. The counter mechanics avoid contention on
437     * updates but can encounter cache thrashing if read too
438     * frequently during concurrent access. To avoid reading so often,
439     * resizing under contention is attempted only upon adding to a
440     * bin already holding two or more nodes. Under uniform hash
441     * distributions, the probability of this occurring at threshold
442     * is around 13%, meaning that only about 1 in 8 puts check
443 dl 1.102 * threshold (and after resizing, many fewer do so).
444     *
445     * TreeBins use a special form of comparison for search and
446     * related operations (which is the main reason we cannot use
447     * existing collections such as TreeMaps). TreeBins contain
448     * Comparable elements, but may contain others, as well as
449     * elements that are Comparable but not necessarily Comparable
450     * for the same T, so we cannot invoke compareTo among them. To
451     * handle this, the tree is ordered primarily by hash value, then
452     * by Comparable.compareTo order if applicable. On lookup at a
453     * node, if elements are not comparable or compare as 0 then both
454     * left and right children may need to be searched in the case of
455     * tied hash values. (This corresponds to the full list search
456     * that would be necessary if all elements were non-Comparable and
457     * had tied hashes.) The red-black balancing code is updated from
458     * pre-jdk-collections
459     * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
460     * based in turn on Cormen, Leiserson, and Rivest "Introduction to
461     * Algorithms" (CLR).
462     *
463     * TreeBins also require an additional locking mechanism. While
464     * list traversal is always possible by readers even during
465     * updates, tree traversal is not, mainly beause of tree-rotations
466     * that may change the root node and/or its linkages. TreeBins
467     * include a simple read-write lock mechanism parasitic on the
468     * main bin-synchronization strategy: Structural adjustments
469     * associated with an insertion or removal are already bin-locked
470     * (and so cannot conflict with other writers) but must wait for
471     * ongoing readers to finish. Since there can be only one such
472     * waiter, we use a simple scheme using a single "waiter" field to
473     * block writers. However, readers need never block. If the root
474     * lock is held, they proceed along the slow traversal path (via
475     * next-pointers) until the lock becomes available or the list is
476     * exhausted, whichever comes first. These cases are not fast, but
477     * maximize aggregate expected throughput.
478 dl 1.14 *
479     * Maintaining API and serialization compatibility with previous
480     * versions of this class introduces several oddities. Mainly: We
481     * leave untouched but unused constructor arguments refering to
482 dl 1.24 * concurrencyLevel. We accept a loadFactor constructor argument,
483     * but apply it only to initial table capacity (which is the only
484     * time that we can guarantee to honor it.) We also declare an
485     * unused "Segment" class that is instantiated in minimal form
486     * only when serializing.
487 dl 1.102 *
488     * This file is organized to make things a little easier to follow
489     * while reading than they might otherwise: First the main static
490     * declarations and utilities, then fields, then main public
491     * methods (with a few factorings of multiple public methods into
492     * internal ones), then sizing methods, trees, traversers, and
493     * bulk operations.
494 dl 1.1 */
495    
496     /* ---------------- Constants -------------- */
497    
498     /**
499 dl 1.16 * The largest possible table capacity. This value must be
500     * exactly 1<<30 to stay within Java array allocation and indexing
501 dl 1.24 * bounds for power of two table sizes, and is further required
502     * because the top two bits of 32bit hash fields are used for
503     * control purposes.
504 dl 1.1 */
505 dl 1.14 private static final int MAXIMUM_CAPACITY = 1 << 30;
506 dl 1.1
507     /**
508 dl 1.14 * The default initial table capacity. Must be a power of 2
509     * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
510 dl 1.1 */
511 dl 1.14 private static final int DEFAULT_CAPACITY = 16;
512 dl 1.1
513     /**
514 dl 1.24 * The largest possible (non-power of two) array size.
515     * Needed by toArray and related methods.
516     */
517     static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
518    
519     /**
520     * The default concurrency level for this table. Unused but
521     * defined for compatibility with previous versions of this class.
522     */
523     private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
524    
525     /**
526 dl 1.16 * The load factor for this table. Overrides of this value in
527     * constructors affect only the initial table capacity. The
528 dl 1.24 * actual floating point value isn't normally used -- it is
529     * simpler to use expressions such as {@code n - (n >>> 2)} for
530     * the associated resizing threshold.
531 dl 1.1 */
532 dl 1.16 private static final float LOAD_FACTOR = 0.75f;
533 dl 1.1
534     /**
535 dl 1.38 * The bin count threshold for using a tree rather than list for a
536 dl 1.102 * bin. Bins are converted to trees when adding an element to a
537     * bin with at least this many nodes. The value must be greater
538     * than 2, and should be at least 8 to mesh with assumptions in
539     * tree removal about conversion back to plain bins upon
540     * shrinkage.
541     */
542     static final int TREEIFY_THRESHOLD = 8;
543    
544     /**
545     * The bin count threshold for untreeifying a (split) bin during a
546     * resize operation. Should be less than TREEIFY_THRESHOLD, and at
547     * most 6 to mesh with shrinkage detection under removal.
548     */
549     static final int UNTREEIFY_THRESHOLD = 6;
550    
551     /**
552     * The smallest table capacity for which bins may be treeified.
553     * (Otherwise the table is resized if too many nodes in a bin.)
554     * The value should be at least 4 * TREEIFY_THRESHOLD to avoid
555     * conflicts between resizing and treeification thresholds.
556 dl 1.38 */
557 dl 1.102 static final int MIN_TREEIFY_CAPACITY = 64;
558 dl 1.38
559 dl 1.82 /**
560     * Minimum number of rebinnings per transfer step. Ranges are
561     * subdivided to allow multiple resizer threads. This value
562     * serves as a lower bound to avoid resizers encountering
563     * excessive memory contention. The value should be at least
564     * DEFAULT_CAPACITY.
565     */
566     private static final int MIN_TRANSFER_STRIDE = 16;
567    
568 dl 1.24 /*
569 dl 1.82 * Encodings for Node hash fields. See above for explanation.
570 dl 1.1 */
571 dl 1.102 static final int MOVED = 0x8fffffff; // (-1) hash for forwarding nodes
572     static final int TREEBIN = 0x80000000; // hash for heads of treea
573     static final int RESERVED = 0x80000001; // hash for transient reservations
574 dl 1.82 static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
575    
576     /** Number of CPUS, to place bounds on some sizings */
577     static final int NCPU = Runtime.getRuntime().availableProcessors();
578    
579 dl 1.102 /** For serialization compatibility. */
580     private static final ObjectStreamField[] serialPersistentFields = {
581     new ObjectStreamField("segments", Segment[].class),
582     new ObjectStreamField("segmentMask", Integer.TYPE),
583     new ObjectStreamField("segmentShift", Integer.TYPE)
584     };
585    
586     /* ---------------- Nodes -------------- */
587    
588     /**
589     * Key-value entry. This class is never exported out as a
590     * user-mutable Map.Entry (i.e., one supporting setValue; see
591     * MapEntry below), but can be used for read-only traversals used
592     * in bulk tasks. Subclasses of Node with a negativehash field
593     * are special, and contain null keys and values (but are never
594     * exported). Otherwise, keys and vals are never null.
595     */
596     static class Node<K,V> implements Map.Entry<K,V> {
597     final int hash;
598     final K key;
599     volatile V val;
600     Node<K,V> next;
601    
602     Node(int hash, K key, V val, Node<K,V> next) {
603     this.hash = hash;
604     this.key = key;
605     this.val = val;
606     this.next = next;
607     }
608    
609     public final K getKey() { return key; }
610     public final V getValue() { return val; }
611     public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
612     public final String toString(){ return key + "=" + val; }
613     public final V setValue(V value) {
614     throw new UnsupportedOperationException();
615     }
616 dl 1.82
617 dl 1.102 public final boolean equals(Object o) {
618     Object k, v, u; Map.Entry<?,?> e;
619     return ((o instanceof Map.Entry) &&
620     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
621     (v = e.getValue()) != null &&
622     (k == key || k.equals(key)) &&
623     (v == (u = val) || v.equals(u)));
624     }
625 dl 1.82
626 dl 1.102 /**
627     * Virtualized support for map.get(); overridden in subclasses.
628     */
629     Node<K,V> find(int h, Object k) {
630     Node<K,V> e = this;
631     if (k != null) {
632     do {
633     K ek;
634     if (e.hash == h &&
635     ((ek = e.key) == k || (ek != null && k.equals(ek))))
636     return e;
637     } while ((e = e.next) != null);
638     }
639     return null;
640     }
641 dl 1.82 }
642    
643 dl 1.102 /* ---------------- Static utilities -------------- */
644    
645 dl 1.82 /**
646 dl 1.102 * Spreads (XORs) higher bits of hash to lower and also forces top
647     * bit to 0. Because the table uses power-of-two masking, sets of
648     * hashes that vary only in bits above the current mask will
649     * always collide. (Among known examples are sets of Float keys
650     * holding consecutive whole numbers in small tables.) So we
651     * apply a transform that spreads the impact of higher bits
652     * downward. There is a tradeoff between speed, utility, and
653     * quality of bit-spreading. Because many common sets of hashes
654     * are already reasonably distributed (so don't benefit from
655     * spreading), and because we use trees to handle large sets of
656     * collisions in bins, we just XOR some shifted bits in the
657     * cheapest possible way to reduce systematic lossage, as well as
658     * to incorporate impact of the highest bits that would otherwise
659     * never be used in index calculations because of table bounds.
660 dl 1.82 */
661 dl 1.102 static final int spread(int h) {
662     return (h ^ (h >>> 16)) & HASH_BITS;
663 dl 1.82 }
664    
665     /**
666 dl 1.102 * Returns a power of two table size for the given desired capacity.
667     * See Hackers Delight, sec 3.2
668 dl 1.82 */
669 dl 1.102 private static final int tableSizeFor(int c) {
670     int n = c - 1;
671     n |= n >>> 1;
672     n |= n >>> 2;
673     n |= n >>> 4;
674     n |= n >>> 8;
675     n |= n >>> 16;
676     return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
677     }
678 dl 1.82
679     /**
680 dl 1.102 * Returns x's Class if it is of the form "class C implements
681     * Comparable<C>", else null.
682 dl 1.82 */
683 dl 1.102 static Class<?> comparableClassFor(Object x) {
684     if (x instanceof Comparable) {
685     Class<?> c; Type[] ts, as; Type t; ParameterizedType p;
686     if ((c = x.getClass()) == String.class) // bypass checks
687     return c;
688     if ((ts = c.getGenericInterfaces()) != null) {
689     for (int i = 0; i < ts.length; ++i) {
690     if (((t = ts[i]) instanceof ParameterizedType) &&
691     ((p = (ParameterizedType)t).getRawType() ==
692     Comparable.class) &&
693     (as = p.getActualTypeArguments()) != null &&
694     as.length == 1 && as[0] == c) // type arg is c
695     return c;
696     }
697     }
698     }
699     return null;
700     }
701 dl 1.82
702     /**
703 dl 1.102 * Returns k.compareTo(x) if x matches kc (k's screened comparable
704     * class), else 0.
705 dl 1.82 */
706 dl 1.102 @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
707     static int compareComparables(Class<?> kc, Object k, Object x) {
708     return (x == null || x.getClass() != kc ? 0 :
709     ((Comparable)k).compareTo(x));
710     }
711    
712     /* ---------------- Table element access -------------- */
713    
714     /*
715     * Volatile access methods are used for table elements as well as
716     * elements of in-progress next table while resizing. All uses of
717     * the tab arguments must be null checked by callers. All callers
718     * also paranoically precheck that tab's length is not zero (or an
719     * equivalent check), thus ensuring that any index argument taking
720     * the form of a hash value anded with (length - 1) is a valid
721     * index. Note that, to be correct wrt arbitrary concurrency
722     * errors by users, these checks must operate on local variables,
723     * which accounts for some odd-looking inline assignments below.
724     * Note that calls to setTabAt always occur within locked regions,
725     * and so do not need full volatile semantics, but still require
726     * ordering to maintain concurrent readability.
727     */
728    
729     @SuppressWarnings("unchecked")
730     static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) {
731     return (Node<K,V>)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE);
732     }
733 dl 1.24
734 dl 1.102 static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i,
735     Node<K,V> c, Node<K,V> v) {
736     return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
737     }
738 jsr166 1.103
739 dl 1.102 static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) {
740     U.putOrderedObject(tab, ((long)i << ASHIFT) + ABASE, v);
741     }
742 jsr166 1.103
743 dl 1.24 /* ---------------- Fields -------------- */
744    
745     /**
746     * The array of bins. Lazily initialized upon first insertion.
747     * Size is always a power of two. Accessed directly by iterators.
748     */
749 dl 1.102 transient volatile Node<K,V>[] table;
750 dl 1.14
751 dl 1.16 /**
752 dl 1.82 * The next table to use; non-null only while resizing.
753     */
754 dl 1.102 private transient volatile Node<K,V>[] nextTable;
755 dl 1.82
756     /**
757     * Base counter value, used mainly when there is no contention,
758     * but also as a fallback during table initialization
759     * races. Updated via CAS.
760 dl 1.16 */
761 dl 1.82 private transient volatile long baseCount;
762 dl 1.24
763     /**
764     * Table initialization and resizing control. When negative, the
765 dl 1.82 * table is being initialized or resized: -1 for initialization,
766     * else -(1 + the number of active resizing threads). Otherwise,
767     * when table is null, holds the initial table size to use upon
768     * creation, or 0 for default. After initialization, holds the
769     * next element count value upon which to resize the table.
770 dl 1.24 */
771     private transient volatile int sizeCtl;
772    
773 dl 1.82 /**
774     * The next table index (plus one) to split while resizing.
775     */
776     private transient volatile int transferIndex;
777    
778     /**
779     * The least available table index to split while resizing.
780     */
781     private transient volatile int transferOrigin;
782    
783     /**
784 dl 1.102 * Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
785 dl 1.82 */
786 dl 1.102 private transient volatile int cellsBusy;
787 dl 1.82
788     /**
789     * Table of counter cells. When non-null, size is a power of 2.
790     */
791     private transient volatile CounterCell[] counterCells;
792    
793 dl 1.24 // views
794 dl 1.70 private transient KeySetView<K,V> keySet;
795 dl 1.75 private transient ValuesView<K,V> values;
796     private transient EntrySetView<K,V> entrySet;
797 dl 1.24
798 dl 1.16
799 dl 1.102 /* ---------------- Public operations -------------- */
800 dl 1.38
801 dl 1.102 /**
802     * Creates a new, empty map with the default initial table size (16).
803     */
804     public ConcurrentHashMapV8() {
805 dl 1.38 }
806    
807 dl 1.102 /**
808     * Creates a new, empty map with an initial table size
809     * accommodating the specified number of elements without the need
810     * to dynamically resize.
811     *
812     * @param initialCapacity The implementation performs internal
813     * sizing to accommodate this many elements.
814     * @throws IllegalArgumentException if the initial capacity of
815     * elements is negative
816     */
817     public ConcurrentHashMapV8(int initialCapacity) {
818     if (initialCapacity < 0)
819     throw new IllegalArgumentException();
820     int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
821     MAXIMUM_CAPACITY :
822     tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
823     this.sizeCtl = cap;
824 dl 1.38 }
825    
826 dl 1.102 /**
827     * Creates a new map with the same mappings as the given map.
828     *
829     * @param m the map
830     */
831     public ConcurrentHashMapV8(Map<? extends K, ? extends V> m) {
832     this.sizeCtl = DEFAULT_CAPACITY;
833     putAll(m);
834 dl 1.38 }
835    
836 dl 1.102 /**
837     * Creates a new, empty map with an initial table size based on
838     * the given number of elements ({@code initialCapacity}) and
839     * initial table density ({@code loadFactor}).
840     *
841     * @param initialCapacity the initial capacity. The implementation
842     * performs internal sizing to accommodate this many elements,
843     * given the specified load factor.
844     * @param loadFactor the load factor (table density) for
845     * establishing the initial table size
846     * @throws IllegalArgumentException if the initial capacity of
847     * elements is negative or the load factor is nonpositive
848     *
849     * @since 1.6
850     */
851     public ConcurrentHashMapV8(int initialCapacity, float loadFactor) {
852     this(initialCapacity, loadFactor, 1);
853     }
854 dl 1.1
855     /**
856 dl 1.102 * Creates a new, empty map with an initial table size based on
857     * the given number of elements ({@code initialCapacity}), table
858     * density ({@code loadFactor}), and number of concurrently
859     * updating threads ({@code concurrencyLevel}).
860     *
861     * @param initialCapacity the initial capacity. The implementation
862     * performs internal sizing to accommodate this many elements,
863     * given the specified load factor.
864     * @param loadFactor the load factor (table density) for
865     * establishing the initial table size
866     * @param concurrencyLevel the estimated number of concurrently
867     * updating threads. The implementation may use this value as
868     * a sizing hint.
869     * @throws IllegalArgumentException if the initial capacity is
870     * negative or the load factor or concurrencyLevel are
871     * nonpositive
872 dl 1.1 */
873 dl 1.102 public ConcurrentHashMapV8(int initialCapacity,
874     float loadFactor, int concurrencyLevel) {
875     if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
876     throw new IllegalArgumentException();
877     if (initialCapacity < concurrencyLevel) // Use at least as many bins
878     initialCapacity = concurrencyLevel; // as estimated threads
879     long size = (long)(1.0 + (long)initialCapacity / loadFactor);
880     int cap = (size >= (long)MAXIMUM_CAPACITY) ?
881     MAXIMUM_CAPACITY : tableSizeFor((int)size);
882     this.sizeCtl = cap;
883 dl 1.24 }
884 dl 1.1
885 dl 1.102 // Original (since JDK1.2) Map methods
886 dl 1.38
887     /**
888 dl 1.102 * {@inheritDoc}
889 dl 1.38 */
890 dl 1.102 public int size() {
891     long n = sumCount();
892     return ((n < 0L) ? 0 :
893     (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
894     (int)n);
895 dl 1.38 }
896 dl 1.1
897 dl 1.38 /**
898 dl 1.102 * {@inheritDoc}
899     */
900     public boolean isEmpty() {
901     return sumCount() <= 0L; // ignore transient negative values
902     }
903    
904     /**
905     * Returns the value to which the specified key is mapped,
906     * or {@code null} if this map contains no mapping for the key.
907 dl 1.38 *
908 dl 1.102 * <p>More formally, if this map contains a mapping from a key
909     * {@code k} to a value {@code v} such that {@code key.equals(k)},
910     * then this method returns {@code v}; otherwise it returns
911     * {@code null}. (There can be at most one such mapping.)
912 dl 1.38 *
913 dl 1.102 * @throws NullPointerException if the specified key is null
914 dl 1.1 */
915 dl 1.102 public V get(Object key) {
916     Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
917     int h = spread(key.hashCode());
918     if ((tab = table) != null && (n = tab.length) > 0 &&
919     (e = tabAt(tab, (n - 1) & h)) != null) {
920     if ((eh = e.hash) == h) {
921     if ((ek = e.key) == key || (ek != null && key.equals(ek)))
922     return e.val;
923     }
924     else if (eh < 0)
925     return (p = e.find(h, key)) != null ? p.val : null;
926     while ((e = e.next) != null) {
927     if (e.hash == h &&
928     ((ek = e.key) == key || (ek != null && key.equals(ek))))
929     return e.val;
930 dl 1.41 }
931     }
932 dl 1.102 return null;
933     }
934 dl 1.41
935 dl 1.102 /**
936     * Tests if the specified object is a key in this table.
937     *
938     * @param key possible key
939     * @return {@code true} if and only if the specified object
940     * is a key in this table, as determined by the
941     * {@code equals} method; {@code false} otherwise
942     * @throws NullPointerException if the specified key is null
943     */
944     public boolean containsKey(Object key) {
945     return get(key) != null;
946     }
947 dl 1.41
948 dl 1.102 /**
949     * Returns {@code true} if this map maps one or more keys to the
950     * specified value. Note: This method may require a full traversal
951     * of the map, and is much slower than method {@code containsKey}.
952     *
953     * @param value value whose presence in this map is to be tested
954     * @return {@code true} if this map maps one or more keys to the
955     * specified value
956     * @throws NullPointerException if the specified value is null
957     */
958     public boolean containsValue(Object value) {
959     if (value == null)
960     throw new NullPointerException();
961     Node<K,V>[] t;
962     if ((t = table) != null) {
963     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
964     for (Node<K,V> p; (p = it.advance()) != null; ) {
965     V v;
966     if ((v = p.val) == value || (v != null && value.equals(v)))
967     return true;
968 dl 1.38 }
969     }
970 dl 1.102 return false;
971     }
972 dl 1.38
973 dl 1.102 /**
974     * Maps the specified key to the specified value in this table.
975     * Neither the key nor the value can be null.
976     *
977     * <p>The value can be retrieved by calling the {@code get} method
978     * with a key that is equal to the original key.
979     *
980     * @param key key with which the specified value is to be associated
981     * @param value value to be associated with the specified key
982     * @return the previous value associated with {@code key}, or
983     * {@code null} if there was no mapping for {@code key}
984     * @throws NullPointerException if the specified key or value is null
985     */
986     public V put(K key, V value) {
987     return putVal(key, value, false);
988     }
989 dl 1.38
990 dl 1.102 /** Implementation for put and putIfAbsent */
991     final V putVal(K key, V value, boolean onlyIfAbsent) {
992     if (key == null || value == null) throw new NullPointerException();
993     int hash = spread(key.hashCode());
994     int binCount = 0;
995     for (Node<K,V>[] tab = table;;) {
996     Node<K,V> f; int n, i, fh;
997     if (tab == null || (n = tab.length) == 0)
998     tab = initTable();
999     else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
1000     if (casTabAt(tab, i, null,
1001     new Node<K,V>(hash, key, value, null)))
1002     break; // no lock when adding to empty bin
1003 dl 1.38 }
1004 dl 1.102 else if ((fh = f.hash) == MOVED)
1005     tab = helpTransfer(tab, f);
1006 dl 1.38 else {
1007 dl 1.102 V oldVal = null;
1008     synchronized (f) {
1009     if (tabAt(tab, i) == f) {
1010     if (fh >= 0) {
1011     binCount = 1;
1012     for (Node<K,V> e = f;; ++binCount) {
1013     K ek;
1014     if (e.hash == hash &&
1015     ((ek = e.key) == key ||
1016     (ek != null && key.equals(ek)))) {
1017     oldVal = e.val;
1018     if (!onlyIfAbsent)
1019     e.val = value;
1020     break;
1021 dl 1.41 }
1022 dl 1.102 Node<K,V> pred = e;
1023     if ((e = e.next) == null) {
1024     pred.next = new Node<K,V>(hash, key,
1025     value, null);
1026     break;
1027 dl 1.41 }
1028 dl 1.38 }
1029     }
1030 dl 1.102 else if (f instanceof TreeBin) {
1031     Node<K,V> p;
1032     binCount = 2;
1033     if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
1034     value)) != null) {
1035     oldVal = p.val;
1036     if (!onlyIfAbsent)
1037     p.val = value;
1038 dl 1.38 }
1039     }
1040     }
1041     }
1042 dl 1.102 if (binCount != 0) {
1043     if (binCount >= TREEIFY_THRESHOLD)
1044     treeifyBin(tab, i);
1045     if (oldVal != null)
1046     return oldVal;
1047     break;
1048     }
1049 dl 1.41 }
1050 dl 1.38 }
1051 dl 1.102 addCount(1L, binCount);
1052     return null;
1053 dl 1.1 }
1054    
1055 dl 1.14 /**
1056 dl 1.102 * Copies all of the mappings from the specified map to this one.
1057     * These mappings replace any mappings that this map had for any of the
1058     * keys currently in the specified map.
1059     *
1060     * @param m mappings to be stored in this map
1061 dl 1.14 */
1062 dl 1.102 public void putAll(Map<? extends K, ? extends V> m) {
1063     tryPresize(m.size());
1064     for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
1065     putVal(e.getKey(), e.getValue(), false);
1066 dl 1.38 }
1067    
1068     /**
1069 dl 1.102 * Removes the key (and its corresponding value) from this map.
1070     * This method does nothing if the key is not in the map.
1071     *
1072     * @param key the key that needs to be removed
1073     * @return the previous value associated with {@code key}, or
1074     * {@code null} if there was no mapping for {@code key}
1075     * @throws NullPointerException if the specified key is null
1076 dl 1.38 */
1077 dl 1.102 public V remove(Object key) {
1078     return replaceNode(key, null, null);
1079 dl 1.1 }
1080    
1081 dl 1.27 /**
1082     * Implementation for the four public remove/replace methods:
1083     * Replaces node value with v, conditional upon match of cv if
1084     * non-null. If resulting value is null, delete.
1085     */
1086 dl 1.102 final V replaceNode(Object key, V value, Object cv) {
1087     int hash = spread(key.hashCode());
1088     for (Node<K,V>[] tab = table;;) {
1089     Node<K,V> f; int n, i, fh;
1090     if (tab == null || (n = tab.length) == 0 ||
1091     (f = tabAt(tab, i = (n - 1) & hash)) == null)
1092 dl 1.27 break;
1093 dl 1.102 else if ((fh = f.hash) == MOVED)
1094     tab = helpTransfer(tab, f);
1095     else {
1096     V oldVal = null;
1097     boolean validated = false;
1098     synchronized (f) {
1099     if (tabAt(tab, i) == f) {
1100     if (fh >= 0) {
1101     validated = true;
1102     for (Node<K,V> e = f, pred = null;;) {
1103     K ek;
1104     if (e.hash == hash &&
1105     ((ek = e.key) == key ||
1106     (ek != null && key.equals(ek)))) {
1107     V ev = e.val;
1108     if (cv == null || cv == ev ||
1109     (ev != null && cv.equals(ev))) {
1110     oldVal = ev;
1111     if (value != null)
1112     e.val = value;
1113     else if (pred != null)
1114     pred.next = e.next;
1115     else
1116     setTabAt(tab, i, e.next);
1117     }
1118     break;
1119     }
1120     pred = e;
1121     if ((e = e.next) == null)
1122     break;
1123     }
1124     }
1125     else if (f instanceof TreeBin) {
1126 dl 1.38 validated = true;
1127 dl 1.102 TreeBin<K,V> t = (TreeBin<K,V>)f;
1128     TreeNode<K,V> r, p;
1129     if ((r = t.root) != null &&
1130     (p = r.findTreeNode(hash, key, null)) != null) {
1131 dl 1.84 V pv = p.val;
1132 dl 1.102 if (cv == null || cv == pv ||
1133     (pv != null && cv.equals(pv))) {
1134 dl 1.38 oldVal = pv;
1135 dl 1.102 if (value != null)
1136     p.val = value;
1137     else if (t.removeTreeNode(p))
1138     setTabAt(tab, i, untreeify(t.first));
1139 dl 1.38 }
1140     }
1141     }
1142     }
1143 dl 1.102 }
1144     if (validated) {
1145     if (oldVal != null) {
1146     if (value == null)
1147 dl 1.82 addCount(-1L, -1);
1148 dl 1.102 return oldVal;
1149 dl 1.38 }
1150 dl 1.102 break;
1151 dl 1.38 }
1152     }
1153 dl 1.102 }
1154     return null;
1155     }
1156    
1157     /**
1158     * Removes all of the mappings from this map.
1159     */
1160     public void clear() {
1161     long delta = 0L; // negative number of deletions
1162     int i = 0;
1163     Node<K,V>[] tab = table;
1164     while (tab != null && i < tab.length) {
1165     int fh;
1166     Node<K,V> f = tabAt(tab, i);
1167     if (f == null)
1168     ++i;
1169     else if ((fh = f.hash) == MOVED) {
1170     tab = helpTransfer(tab, f);
1171     i = 0; // restart
1172     }
1173 dl 1.82 else {
1174 jsr166 1.83 synchronized (f) {
1175 dl 1.27 if (tabAt(tab, i) == f) {
1176 dl 1.102 Node<K,V> p = (fh >= 0 ? f :
1177     (f instanceof TreeBin) ?
1178     ((TreeBin<K,V>)f).first : null);
1179     while (p != null) {
1180     --delta;
1181     p = p.next;
1182 dl 1.27 }
1183 dl 1.102 setTabAt(tab, i++, null);
1184 dl 1.27 }
1185     }
1186     }
1187     }
1188 dl 1.102 if (delta != 0L)
1189     addCount(delta, -1);
1190     }
1191    
1192     /**
1193     * Returns a {@link Set} view of the keys contained in this map.
1194     * The set is backed by the map, so changes to the map are
1195     * reflected in the set, and vice-versa. The set supports element
1196     * removal, which removes the corresponding mapping from this map,
1197     * via the {@code Iterator.remove}, {@code Set.remove},
1198     * {@code removeAll}, {@code retainAll}, and {@code clear}
1199     * operations. It does not support the {@code add} or
1200     * {@code addAll} operations.
1201     *
1202     * <p>The view's {@code iterator} is a "weakly consistent" iterator
1203     * that will never throw {@link ConcurrentModificationException},
1204     * and guarantees to traverse elements as they existed upon
1205     * construction of the iterator, and may (but is not guaranteed to)
1206     * reflect any modifications subsequent to construction.
1207     *
1208     * @return the set view
1209     */
1210     public KeySetView<K,V> keySet() {
1211     KeySetView<K,V> ks;
1212     return (ks = keySet) != null ? ks : (keySet = new KeySetView<K,V>(this, null));
1213     }
1214    
1215     /**
1216     * Returns a {@link Collection} view of the values contained in this map.
1217     * The collection is backed by the map, so changes to the map are
1218     * reflected in the collection, and vice-versa. The collection
1219     * supports element removal, which removes the corresponding
1220     * mapping from this map, via the {@code Iterator.remove},
1221     * {@code Collection.remove}, {@code removeAll},
1222     * {@code retainAll}, and {@code clear} operations. It does not
1223     * support the {@code add} or {@code addAll} operations.
1224     *
1225     * <p>The view's {@code iterator} is a "weakly consistent" iterator
1226     * that will never throw {@link ConcurrentModificationException},
1227     * and guarantees to traverse elements as they existed upon
1228     * construction of the iterator, and may (but is not guaranteed to)
1229     * reflect any modifications subsequent to construction.
1230     *
1231     * @return the collection view
1232     */
1233     public Collection<V> values() {
1234     ValuesView<K,V> vs;
1235     return (vs = values) != null ? vs : (values = new ValuesView<K,V>(this));
1236     }
1237    
1238     /**
1239     * Returns a {@link Set} view of the mappings contained in this map.
1240     * The set is backed by the map, so changes to the map are
1241     * reflected in the set, and vice-versa. The set supports element
1242     * removal, which removes the corresponding mapping from the map,
1243     * via the {@code Iterator.remove}, {@code Set.remove},
1244     * {@code removeAll}, {@code retainAll}, and {@code clear}
1245     * operations.
1246     *
1247     * <p>The view's {@code iterator} is a "weakly consistent" iterator
1248     * that will never throw {@link ConcurrentModificationException},
1249     * and guarantees to traverse elements as they existed upon
1250     * construction of the iterator, and may (but is not guaranteed to)
1251     * reflect any modifications subsequent to construction.
1252     *
1253     * @return the set view
1254     */
1255     public Set<Map.Entry<K,V>> entrySet() {
1256     EntrySetView<K,V> es;
1257     return (es = entrySet) != null ? es : (entrySet = new EntrySetView<K,V>(this));
1258 dl 1.27 }
1259    
1260 dl 1.102 /**
1261     * Returns the hash code value for this {@link Map}, i.e.,
1262     * the sum of, for each key-value pair in the map,
1263     * {@code key.hashCode() ^ value.hashCode()}.
1264     *
1265     * @return the hash code value for this map
1266 dl 1.27 */
1267 dl 1.102 public int hashCode() {
1268     int h = 0;
1269     Node<K,V>[] t;
1270     if ((t = table) != null) {
1271     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1272     for (Node<K,V> p; (p = it.advance()) != null; )
1273     h += p.key.hashCode() ^ p.val.hashCode();
1274     }
1275     return h;
1276     }
1277 dl 1.27
1278 dl 1.102 /**
1279     * Returns a string representation of this map. The string
1280     * representation consists of a list of key-value mappings (in no
1281     * particular order) enclosed in braces ("{@code {}}"). Adjacent
1282     * mappings are separated by the characters {@code ", "} (comma
1283     * and space). Each key-value mapping is rendered as the key
1284     * followed by an equals sign ("{@code =}") followed by the
1285     * associated value.
1286     *
1287     * @return a string representation of this map
1288     */
1289     public String toString() {
1290     Node<K,V>[] t;
1291     int f = (t = table) == null ? 0 : t.length;
1292     Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1293     StringBuilder sb = new StringBuilder();
1294     sb.append('{');
1295     Node<K,V> p;
1296     if ((p = it.advance()) != null) {
1297     for (;;) {
1298     K k = p.key;
1299     V v = p.val;
1300     sb.append(k == this ? "(this Map)" : k);
1301     sb.append('=');
1302     sb.append(v == this ? "(this Map)" : v);
1303     if ((p = it.advance()) == null)
1304     break;
1305     sb.append(',').append(' ');
1306 dl 1.14 }
1307 dl 1.102 }
1308     return sb.append('}').toString();
1309     }
1310    
1311     /**
1312     * Compares the specified object with this map for equality.
1313     * Returns {@code true} if the given object is a map with the same
1314     * mappings as this map. This operation may return misleading
1315     * results if either map is concurrently modified during execution
1316     * of this method.
1317     *
1318     * @param o object to be compared for equality with this map
1319     * @return {@code true} if the specified object is equal to this map
1320     */
1321     public boolean equals(Object o) {
1322     if (o != this) {
1323     if (!(o instanceof Map))
1324     return false;
1325     Map<?,?> m = (Map<?,?>) o;
1326     Node<K,V>[] t;
1327     int f = (t = table) == null ? 0 : t.length;
1328     Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1329     for (Node<K,V> p; (p = it.advance()) != null; ) {
1330     V val = p.val;
1331     Object v = m.get(p.key);
1332     if (v == null || (v != val && !v.equals(val)))
1333     return false;
1334 dl 1.38 }
1335 dl 1.102 for (Map.Entry<?,?> e : m.entrySet()) {
1336     Object mk, mv, v;
1337     if ((mk = e.getKey()) == null ||
1338     (mv = e.getValue()) == null ||
1339     (v = get(mk)) == null ||
1340     (mv != v && !mv.equals(v)))
1341     return false;
1342 dl 1.1 }
1343     }
1344 dl 1.102 return true;
1345     }
1346    
1347     /**
1348     * Stripped-down version of helper class used in previous version,
1349     * declared for the sake of serialization compatibility
1350     */
1351     static class Segment<K,V> extends ReentrantLock implements Serializable {
1352     private static final long serialVersionUID = 2249069246763182397L;
1353     final float loadFactor;
1354     Segment(float lf) { this.loadFactor = lf; }
1355 dl 1.27 }
1356    
1357 dl 1.102 /**
1358     * Saves the state of the {@code ConcurrentHashMapV8} instance to a
1359     * stream (i.e., serializes it).
1360     * @param s the stream
1361     * @serialData
1362     * the key (Object) and value (Object)
1363     * for each key-value mapping, followed by a null pair.
1364     * The key-value mappings are emitted in no particular order.
1365     */
1366     private void writeObject(java.io.ObjectOutputStream s)
1367     throws java.io.IOException {
1368     // For serialization compatibility
1369     // Emulate segment calculation from previous version of this class
1370     int sshift = 0;
1371     int ssize = 1;
1372     while (ssize < DEFAULT_CONCURRENCY_LEVEL) {
1373     ++sshift;
1374     ssize <<= 1;
1375     }
1376     int segmentShift = 32 - sshift;
1377     int segmentMask = ssize - 1;
1378     @SuppressWarnings("unchecked") Segment<K,V>[] segments = (Segment<K,V>[])
1379     new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
1380     for (int i = 0; i < segments.length; ++i)
1381     segments[i] = new Segment<K,V>(LOAD_FACTOR);
1382     s.putFields().put("segments", segments);
1383     s.putFields().put("segmentShift", segmentShift);
1384     s.putFields().put("segmentMask", segmentMask);
1385     s.writeFields();
1386    
1387     Node<K,V>[] t;
1388     if ((t = table) != null) {
1389     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1390     for (Node<K,V> p; (p = it.advance()) != null; ) {
1391     s.writeObject(p.key);
1392     s.writeObject(p.val);
1393 dl 1.27 }
1394 dl 1.102 }
1395     s.writeObject(null);
1396     s.writeObject(null);
1397     segments = null; // throw away
1398     }
1399    
1400     /**
1401     * Reconstitutes the instance from a stream (that is, deserializes it).
1402     * @param s the stream
1403     */
1404     private void readObject(java.io.ObjectInputStream s)
1405     throws java.io.IOException, ClassNotFoundException {
1406     /*
1407     * To improve performance in typical cases, we create nodes
1408     * while reading, then place in table once size is known.
1409     * However, we must also validate uniqueness and deal with
1410     * overpopulated bins while doing so, which requires
1411     * specialized versions of putVal mechanics.
1412     */
1413     sizeCtl = -1; // force exclusion for table construction
1414     s.defaultReadObject();
1415     long size = 0L;
1416     Node<K,V> p = null;
1417     for (;;) {
1418     @SuppressWarnings("unchecked") K k = (K) s.readObject();
1419     @SuppressWarnings("unchecked") V v = (V) s.readObject();
1420     if (k != null && v != null) {
1421     p = new Node<K,V>(spread(k.hashCode()), k, v, p);
1422     ++size;
1423 dl 1.38 }
1424 dl 1.102 else
1425     break;
1426     }
1427     if (size == 0L)
1428     sizeCtl = 0;
1429     else {
1430     int n;
1431     if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
1432     n = MAXIMUM_CAPACITY;
1433 dl 1.27 else {
1434 dl 1.102 int sz = (int)size;
1435     n = tableSizeFor(sz + (sz >>> 1) + 1);
1436     }
1437     @SuppressWarnings({"rawtypes","unchecked"})
1438     Node<K,V>[] tab = (Node<K,V>[])new Node[n];
1439     int mask = n - 1;
1440     long added = 0L;
1441     while (p != null) {
1442     boolean insertAtFront;
1443     Node<K,V> next = p.next, first;
1444     int h = p.hash, j = h & mask;
1445     if ((first = tabAt(tab, j)) == null)
1446     insertAtFront = true;
1447     else {
1448     K k = p.key;
1449     if (first.hash < 0) {
1450     TreeBin<K,V> t = (TreeBin<K,V>)first;
1451     if (t.putTreeVal(h, k, p.val) == null)
1452     ++added;
1453     insertAtFront = false;
1454     }
1455     else {
1456     int binCount = 0;
1457     insertAtFront = true;
1458     Node<K,V> q; K qk;
1459     for (q = first; q != null; q = q.next) {
1460     if (q.hash == h &&
1461     ((qk = q.key) == k ||
1462     (qk != null && k.equals(qk)))) {
1463     insertAtFront = false;
1464 dl 1.82 break;
1465     }
1466 dl 1.102 ++binCount;
1467     }
1468     if (insertAtFront && binCount >= TREEIFY_THRESHOLD) {
1469     insertAtFront = false;
1470     ++added;
1471     p.next = first;
1472     TreeNode<K,V> hd = null, tl = null;
1473     for (q = p; q != null; q = q.next) {
1474     TreeNode<K,V> t = new TreeNode<K,V>
1475     (q.hash, q.key, q.val, null, null);
1476     if ((t.prev = tl) == null)
1477     hd = t;
1478     else
1479     tl.next = t;
1480     tl = t;
1481 dl 1.27 }
1482 dl 1.102 setTabAt(tab, j, new TreeBin<K,V>(hd));
1483 dl 1.27 }
1484 dl 1.38 }
1485 dl 1.82 }
1486 dl 1.102 if (insertAtFront) {
1487     ++added;
1488     p.next = first;
1489     setTabAt(tab, j, p);
1490     }
1491     p = next;
1492     }
1493     table = tab;
1494     sizeCtl = n - (n >>> 2);
1495     baseCount = added;
1496     }
1497     }
1498    
1499     // ConcurrentMap methods
1500    
1501     /**
1502     * {@inheritDoc}
1503     *
1504     * @return the previous value associated with the specified key,
1505     * or {@code null} if there was no mapping for the key
1506     * @throws NullPointerException if the specified key or value is null
1507     */
1508     public V putIfAbsent(K key, V value) {
1509     return putVal(key, value, true);
1510     }
1511    
1512     /**
1513     * {@inheritDoc}
1514     *
1515     * @throws NullPointerException if the specified key is null
1516     */
1517     public boolean remove(Object key, Object value) {
1518     if (key == null)
1519     throw new NullPointerException();
1520     return value != null && replaceNode(key, null, value) != null;
1521     }
1522    
1523     /**
1524     * {@inheritDoc}
1525     *
1526     * @throws NullPointerException if any of the arguments are null
1527     */
1528     public boolean replace(K key, V oldValue, V newValue) {
1529     if (key == null || oldValue == null || newValue == null)
1530     throw new NullPointerException();
1531     return replaceNode(key, newValue, oldValue) != null;
1532     }
1533    
1534     /**
1535     * {@inheritDoc}
1536     *
1537     * @return the previous value associated with the specified key,
1538     * or {@code null} if there was no mapping for the key
1539     * @throws NullPointerException if the specified key or value is null
1540     */
1541     public V replace(K key, V value) {
1542     if (key == null || value == null)
1543     throw new NullPointerException();
1544     return replaceNode(key, value, null);
1545     }
1546    
1547     // Overrides of JDK8+ Map extension method defaults
1548    
1549     /**
1550     * Returns the value to which the specified key is mapped, or the
1551     * given default value if this map contains no mapping for the
1552     * key.
1553     *
1554     * @param key the key whose associated value is to be returned
1555     * @param defaultValue the value to return if this map contains
1556     * no mapping for the given key
1557     * @return the mapping for the key, if present; else the default value
1558     * @throws NullPointerException if the specified key is null
1559     */
1560     public V getOrDefault(Object key, V defaultValue) {
1561     V v;
1562     return (v = get(key)) == null ? defaultValue : v;
1563     }
1564    
1565     public void forEach(BiAction<? super K, ? super V> action) {
1566     if (action == null) throw new NullPointerException();
1567     Node<K,V>[] t;
1568     if ((t = table) != null) {
1569     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1570     for (Node<K,V> p; (p = it.advance()) != null; ) {
1571     action.apply(p.key, p.val);
1572     }
1573     }
1574     }
1575    
1576     public void replaceAll(BiFun<? super K, ? super V, ? extends V> function) {
1577     if (function == null) throw new NullPointerException();
1578     Node<K,V>[] t;
1579     if ((t = table) != null) {
1580     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1581     for (Node<K,V> p; (p = it.advance()) != null; ) {
1582     V oldValue = p.val;
1583     for (K key = p.key;;) {
1584     V newValue = function.apply(key, oldValue);
1585     if (newValue == null)
1586     throw new NullPointerException();
1587     if (replaceNode(key, newValue, oldValue) != null ||
1588     (oldValue = get(key)) == null)
1589     break;
1590 dl 1.1 }
1591     }
1592     }
1593     }
1594    
1595 dl 1.102 /**
1596     * If the specified key is not already associated with a value,
1597     * attempts to compute its value using the given mapping function
1598     * and enters it into this map unless {@code null}. The entire
1599     * method invocation is performed atomically, so the function is
1600     * applied at most once per key. Some attempted update operations
1601     * on this map by other threads may be blocked while computation
1602     * is in progress, so the computation should be short and simple,
1603     * and must not attempt to update any other mappings of this map.
1604     *
1605     * @param key key with which the specified value is to be associated
1606     * @param mappingFunction the function to compute a value
1607     * @return the current (existing or computed) value associated with
1608     * the specified key, or null if the computed value is null
1609     * @throws NullPointerException if the specified key or mappingFunction
1610     * is null
1611     * @throws IllegalStateException if the computation detectably
1612     * attempts a recursive update to this map that would
1613     * otherwise never complete
1614     * @throws RuntimeException or Error if the mappingFunction does so,
1615     * in which case the mapping is left unestablished
1616     */
1617     public V computeIfAbsent(K key, Fun<? super K, ? extends V> mappingFunction) {
1618     if (key == null || mappingFunction == null)
1619 dl 1.82 throw new NullPointerException();
1620 dl 1.102 int h = spread(key.hashCode());
1621 dl 1.84 V val = null;
1622 dl 1.102 int binCount = 0;
1623     for (Node<K,V>[] tab = table;;) {
1624     Node<K,V> f; int n, i, fh;
1625     if (tab == null || (n = tab.length) == 0)
1626 dl 1.24 tab = initTable();
1627 dl 1.102 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1628     Node<K,V> r = new ReservationNode<K,V>();
1629     synchronized (r) {
1630     if (casTabAt(tab, i, null, r)) {
1631     binCount = 1;
1632     Node<K,V> node = null;
1633 dl 1.82 try {
1634 dl 1.102 if ((val = mappingFunction.apply(key)) != null)
1635     node = new Node<K,V>(h, key, val, null);
1636 dl 1.82 } finally {
1637 dl 1.102 setTabAt(tab, i, node);
1638 dl 1.1 }
1639     }
1640     }
1641 dl 1.102 if (binCount != 0)
1642 dl 1.10 break;
1643 dl 1.1 }
1644 dl 1.102 else if ((fh = f.hash) == MOVED)
1645     tab = helpTransfer(tab, f);
1646     else {
1647     boolean added = false;
1648     synchronized (f) {
1649     if (tabAt(tab, i) == f) {
1650     if (fh >= 0) {
1651     binCount = 1;
1652     for (Node<K,V> e = f;; ++binCount) {
1653     K ek; V ev;
1654     if (e.hash == h &&
1655     ((ek = e.key) == key ||
1656     (ek != null && key.equals(ek)))) {
1657     val = e.val;
1658     break;
1659     }
1660     Node<K,V> pred = e;
1661     if ((e = e.next) == null) {
1662     if ((val = mappingFunction.apply(key)) != null) {
1663     added = true;
1664     pred.next = new Node<K,V>(h, key, val, null);
1665     }
1666     break;
1667 dl 1.38 }
1668     }
1669 dl 1.102 }
1670     else if (f instanceof TreeBin) {
1671     binCount = 2;
1672     TreeBin<K,V> t = (TreeBin<K,V>)f;
1673     TreeNode<K,V> r, p;
1674     if ((r = t.root) != null &&
1675     (p = r.findTreeNode(h, key, null)) != null)
1676     val = p.val;
1677     else if ((val = mappingFunction.apply(key)) != null) {
1678     added = true;
1679     t.putTreeVal(h, key, val);
1680 dl 1.41 }
1681 dl 1.38 }
1682     }
1683     }
1684 dl 1.102 if (binCount != 0) {
1685     if (binCount >= TREEIFY_THRESHOLD)
1686     treeifyBin(tab, i);
1687     if (!added)
1688     return val;
1689     break;
1690     }
1691 dl 1.38 }
1692 dl 1.102 }
1693     if (val != null)
1694     addCount(1L, binCount);
1695     return val;
1696     }
1697    
1698     /**
1699     * If the value for the specified key is present, attempts to
1700     * compute a new mapping given the key and its current mapped
1701     * value. The entire method invocation is performed atomically.
1702     * Some attempted update operations on this map by other threads
1703     * may be blocked while computation is in progress, so the
1704     * computation should be short and simple, and must not attempt to
1705     * update any other mappings of this map.
1706     *
1707     * @param key key with which a value may be associated
1708     * @param remappingFunction the function to compute a value
1709     * @return the new value associated with the specified key, or null if none
1710     * @throws NullPointerException if the specified key or remappingFunction
1711     * is null
1712     * @throws IllegalStateException if the computation detectably
1713     * attempts a recursive update to this map that would
1714     * otherwise never complete
1715     * @throws RuntimeException or Error if the remappingFunction does so,
1716     * in which case the mapping is unchanged
1717     */
1718     public V computeIfPresent(K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
1719     if (key == null || remappingFunction == null)
1720     throw new NullPointerException();
1721     int h = spread(key.hashCode());
1722     V val = null;
1723     int delta = 0;
1724     int binCount = 0;
1725     for (Node<K,V>[] tab = table;;) {
1726     Node<K,V> f; int n, i, fh;
1727     if (tab == null || (n = tab.length) == 0)
1728     tab = initTable();
1729     else if ((f = tabAt(tab, i = (n - 1) & h)) == null)
1730     break;
1731     else if ((fh = f.hash) == MOVED)
1732     tab = helpTransfer(tab, f);
1733 dl 1.82 else {
1734 jsr166 1.83 synchronized (f) {
1735 dl 1.24 if (tabAt(tab, i) == f) {
1736 dl 1.102 if (fh >= 0) {
1737     binCount = 1;
1738     for (Node<K,V> e = f, pred = null;; ++binCount) {
1739     K ek;
1740     if (e.hash == h &&
1741     ((ek = e.key) == key ||
1742     (ek != null && key.equals(ek)))) {
1743     val = remappingFunction.apply(key, e.val);
1744     if (val != null)
1745     e.val = val;
1746     else {
1747     delta = -1;
1748     Node<K,V> en = e.next;
1749     if (pred != null)
1750     pred.next = en;
1751     else
1752     setTabAt(tab, i, en);
1753     }
1754     break;
1755     }
1756     pred = e;
1757     if ((e = e.next) == null)
1758     break;
1759     }
1760     }
1761     else if (f instanceof TreeBin) {
1762     binCount = 2;
1763     TreeBin<K,V> t = (TreeBin<K,V>)f;
1764     TreeNode<K,V> r, p;
1765     if ((r = t.root) != null &&
1766     (p = r.findTreeNode(h, key, null)) != null) {
1767     val = remappingFunction.apply(key, p.val);
1768 dl 1.27 if (val != null)
1769 dl 1.102 p.val = val;
1770 dl 1.41 else {
1771     delta = -1;
1772 dl 1.102 if (t.removeTreeNode(p))
1773     setTabAt(tab, i, untreeify(t.first));
1774 dl 1.1 }
1775     }
1776     }
1777     }
1778     }
1779 dl 1.102 if (binCount != 0)
1780 dl 1.10 break;
1781 dl 1.1 }
1782 dl 1.10 }
1783 dl 1.82 if (delta != 0)
1784 dl 1.102 addCount((long)delta, binCount);
1785 dl 1.84 return val;
1786 dl 1.1 }
1787    
1788 dl 1.102 /**
1789     * Attempts to compute a mapping for the specified key and its
1790     * current mapped value (or {@code null} if there is no current
1791     * mapping). The entire method invocation is performed atomically.
1792     * Some attempted update operations on this map by other threads
1793     * may be blocked while computation is in progress, so the
1794     * computation should be short and simple, and must not attempt to
1795     * update any other mappings of this Map.
1796     *
1797     * @param key key with which the specified value is to be associated
1798     * @param remappingFunction the function to compute a value
1799     * @return the new value associated with the specified key, or null if none
1800     * @throws NullPointerException if the specified key or remappingFunction
1801     * is null
1802     * @throws IllegalStateException if the computation detectably
1803     * attempts a recursive update to this map that would
1804     * otherwise never complete
1805     * @throws RuntimeException or Error if the remappingFunction does so,
1806     * in which case the mapping is unchanged
1807     */
1808     public V compute(K key,
1809     BiFun<? super K, ? super V, ? extends V> remappingFunction) {
1810     if (key == null || remappingFunction == null)
1811 dl 1.82 throw new NullPointerException();
1812 dl 1.102 int h = spread(key.hashCode());
1813 dl 1.84 V val = null;
1814 dl 1.52 int delta = 0;
1815 dl 1.102 int binCount = 0;
1816     for (Node<K,V>[] tab = table;;) {
1817     Node<K,V> f; int n, i, fh;
1818     if (tab == null || (n = tab.length) == 0)
1819 dl 1.52 tab = initTable();
1820 dl 1.102 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1821     Node<K,V> r = new ReservationNode<K,V>();
1822     synchronized (r) {
1823     if (casTabAt(tab, i, null, r)) {
1824     binCount = 1;
1825     Node<K,V> node = null;
1826     try {
1827     if ((val = remappingFunction.apply(key, null)) != null) {
1828     delta = 1;
1829     node = new Node<K,V>(h, key, val, null);
1830     }
1831     } finally {
1832     setTabAt(tab, i, node);
1833     }
1834     }
1835     }
1836     if (binCount != 0)
1837 dl 1.52 break;
1838     }
1839 dl 1.102 else if ((fh = f.hash) == MOVED)
1840     tab = helpTransfer(tab, f);
1841     else {
1842     synchronized (f) {
1843     if (tabAt(tab, i) == f) {
1844     if (fh >= 0) {
1845     binCount = 1;
1846     for (Node<K,V> e = f, pred = null;; ++binCount) {
1847     K ek;
1848     if (e.hash == h &&
1849     ((ek = e.key) == key ||
1850     (ek != null && key.equals(ek)))) {
1851     val = remappingFunction.apply(key, e.val);
1852     if (val != null)
1853     e.val = val;
1854     else {
1855     delta = -1;
1856     Node<K,V> en = e.next;
1857     if (pred != null)
1858     pred.next = en;
1859     else
1860     setTabAt(tab, i, en);
1861     }
1862     break;
1863     }
1864     pred = e;
1865     if ((e = e.next) == null) {
1866     val = remappingFunction.apply(key, null);
1867     if (val != null) {
1868     delta = 1;
1869     pred.next =
1870     new Node<K,V>(h, key, val, null);
1871     }
1872     break;
1873     }
1874     }
1875     }
1876     else if (f instanceof TreeBin) {
1877     binCount = 1;
1878     TreeBin<K,V> t = (TreeBin<K,V>)f;
1879     TreeNode<K,V> r, p;
1880     if ((r = t.root) != null)
1881     p = r.findTreeNode(h, key, null);
1882     else
1883     p = null;
1884     V pv = (p == null) ? null : p.val;
1885     val = remappingFunction.apply(key, pv);
1886 dl 1.52 if (val != null) {
1887     if (p != null)
1888     p.val = val;
1889     else {
1890     delta = 1;
1891 dl 1.102 t.putTreeVal(h, key, val);
1892 dl 1.52 }
1893     }
1894     else if (p != null) {
1895     delta = -1;
1896 dl 1.102 if (t.removeTreeNode(p))
1897     setTabAt(tab, i, untreeify(t.first));
1898 dl 1.52 }
1899     }
1900     }
1901     }
1902 dl 1.102 if (binCount != 0) {
1903     if (binCount >= TREEIFY_THRESHOLD)
1904     treeifyBin(tab, i);
1905     break;
1906 dl 1.52 }
1907     }
1908     }
1909 dl 1.82 if (delta != 0)
1910 dl 1.102 addCount((long)delta, binCount);
1911 dl 1.84 return val;
1912 dl 1.52 }
1913    
1914 dl 1.102 /**
1915     * If the specified key is not already associated with a
1916     * (non-null) value, associates it with the given value.
1917     * Otherwise, replaces the value with the results of the given
1918     * remapping function, or removes if {@code null}. The entire
1919     * method invocation is performed atomically. Some attempted
1920     * update operations on this map by other threads may be blocked
1921     * while computation is in progress, so the computation should be
1922     * short and simple, and must not attempt to update any other
1923     * mappings of this Map.
1924     *
1925     * @param key key with which the specified value is to be associated
1926     * @param value the value to use if absent
1927     * @param remappingFunction the function to recompute a value if present
1928     * @return the new value associated with the specified key, or null if none
1929     * @throws NullPointerException if the specified key or the
1930     * remappingFunction is null
1931     * @throws RuntimeException or Error if the remappingFunction does so,
1932     * in which case the mapping is unchanged
1933     */
1934     public V merge(K key, V value, BiFun<? super V, ? super V, ? extends V> remappingFunction) {
1935     if (key == null || value == null || remappingFunction == null)
1936     throw new NullPointerException();
1937     int h = spread(key.hashCode());
1938     V val = null;
1939     int delta = 0;
1940     int binCount = 0;
1941     for (Node<K,V>[] tab = table;;) {
1942     Node<K,V> f; int n, i, fh;
1943     if (tab == null || (n = tab.length) == 0)
1944     tab = initTable();
1945     else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1946     if (casTabAt(tab, i, null, new Node<K,V>(h, key, value, null))) {
1947     delta = 1;
1948     val = value;
1949 dl 1.27 break;
1950     }
1951 dl 1.102 }
1952     else if ((fh = f.hash) == MOVED)
1953     tab = helpTransfer(tab, f);
1954     else {
1955     synchronized (f) {
1956     if (tabAt(tab, i) == f) {
1957     if (fh >= 0) {
1958     binCount = 1;
1959     for (Node<K,V> e = f, pred = null;; ++binCount) {
1960     K ek;
1961     if (e.hash == h &&
1962     ((ek = e.key) == key ||
1963     (ek != null && key.equals(ek)))) {
1964     val = remappingFunction.apply(e.val, value);
1965     if (val != null)
1966     e.val = val;
1967 dl 1.38 else {
1968 dl 1.102 delta = -1;
1969     Node<K,V> en = e.next;
1970     if (pred != null)
1971     pred.next = en;
1972     else
1973     setTabAt(tab, i, en);
1974 dl 1.38 }
1975 dl 1.102 break;
1976     }
1977     pred = e;
1978     if ((e = e.next) == null) {
1979     delta = 1;
1980     val = value;
1981     pred.next =
1982     new Node<K,V>(h, key, val, null);
1983     break;
1984 dl 1.38 }
1985     }
1986     }
1987 dl 1.102 else if (f instanceof TreeBin) {
1988     binCount = 2;
1989     TreeBin<K,V> t = (TreeBin<K,V>)f;
1990     TreeNode<K,V> r = t.root;
1991     TreeNode<K,V> p = (r == null) ? null :
1992     r.findTreeNode(h, key, null);
1993     val = (p == null) ? value :
1994     remappingFunction.apply(p.val, value);
1995     if (val != null) {
1996     if (p != null)
1997     p.val = val;
1998     else {
1999     delta = 1;
2000     t.putTreeVal(h, key, val);
2001 dl 1.27 }
2002     }
2003 dl 1.102 else if (p != null) {
2004     delta = -1;
2005     if (t.removeTreeNode(p))
2006     setTabAt(tab, i, untreeify(t.first));
2007 dl 1.90 }
2008 dl 1.24 }
2009     }
2010 dl 1.1 }
2011 dl 1.102 if (binCount != 0) {
2012     if (binCount >= TREEIFY_THRESHOLD)
2013     treeifyBin(tab, i);
2014     break;
2015     }
2016 dl 1.1 }
2017     }
2018 dl 1.102 if (delta != 0)
2019     addCount((long)delta, binCount);
2020     return val;
2021 dl 1.1 }
2022    
2023 dl 1.102 // Hashtable legacy methods
2024    
2025 dl 1.82 /**
2026 dl 1.102 * Legacy method testing if some key maps into the specified value
2027     * in this table. This method is identical in functionality to
2028     * {@link #containsValue(Object)}, and exists solely to ensure
2029     * full compatibility with class {@link java.util.Hashtable},
2030     * which supported this method prior to introduction of the
2031     * Java Collections framework.
2032     *
2033     * @param value a value to search for
2034     * @return {@code true} if and only if some key maps to the
2035     * {@code value} argument in this table as
2036     * determined by the {@code equals} method;
2037     * {@code false} otherwise
2038     * @throws NullPointerException if the specified value is null
2039     */
2040     @Deprecated public boolean contains(Object value) {
2041     return containsValue(value);
2042     }
2043    
2044     /**
2045     * Returns an enumeration of the keys in this table.
2046     *
2047     * @return an enumeration of the keys in this table
2048     * @see #keySet()
2049     */
2050     public Enumeration<K> keys() {
2051     Node<K,V>[] t;
2052     int f = (t = table) == null ? 0 : t.length;
2053     return new KeyIterator<K,V>(t, f, 0, f, this);
2054     }
2055    
2056     /**
2057     * Returns an enumeration of the values in this table.
2058     *
2059     * @return an enumeration of the values in this table
2060     * @see #values()
2061     */
2062     public Enumeration<V> elements() {
2063     Node<K,V>[] t;
2064     int f = (t = table) == null ? 0 : t.length;
2065     return new ValueIterator<K,V>(t, f, 0, f, this);
2066     }
2067    
2068     // ConcurrentHashMapV8-only methods
2069    
2070     /**
2071     * Returns the number of mappings. This method should be used
2072     * instead of {@link #size} because a ConcurrentHashMapV8 may
2073     * contain more mappings than can be represented as an int. The
2074     * value returned is an estimate; the actual count may differ if
2075     * there are concurrent insertions or removals.
2076     *
2077     * @return the number of mappings
2078     * @since 1.8
2079     */
2080     public long mappingCount() {
2081     long n = sumCount();
2082     return (n < 0L) ? 0L : n; // ignore transient negative values
2083     }
2084    
2085     /**
2086     * Creates a new {@link Set} backed by a ConcurrentHashMapV8
2087     * from the given type to {@code Boolean.TRUE}.
2088     *
2089     * @return the new set
2090     * @since 1.8
2091     */
2092     public static <K> KeySetView<K,Boolean> newKeySet() {
2093     return new KeySetView<K,Boolean>
2094     (new ConcurrentHashMapV8<K,Boolean>(), Boolean.TRUE);
2095     }
2096    
2097     /**
2098     * Creates a new {@link Set} backed by a ConcurrentHashMapV8
2099     * from the given type to {@code Boolean.TRUE}.
2100     *
2101     * @param initialCapacity The implementation performs internal
2102     * sizing to accommodate this many elements.
2103     * @throws IllegalArgumentException if the initial capacity of
2104     * elements is negative
2105     * @return the new set
2106     * @since 1.8
2107     */
2108     public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2109     return new KeySetView<K,Boolean>
2110     (new ConcurrentHashMapV8<K,Boolean>(initialCapacity), Boolean.TRUE);
2111     }
2112    
2113     /**
2114     * Returns a {@link Set} view of the keys in this map, using the
2115     * given common mapped value for any additions (i.e., {@link
2116     * Collection#add} and {@link Collection#addAll(Collection)}).
2117     * This is of course only appropriate if it is acceptable to use
2118     * the same value for all additions from this view.
2119     *
2120     * @param mappedValue the mapped value to use for any additions
2121     * @return the set view
2122     * @throws NullPointerException if the mappedValue is null
2123 dl 1.82 */
2124 dl 1.102 public KeySetView<K,V> keySet(V mappedValue) {
2125     if (mappedValue == null)
2126     throw new NullPointerException();
2127     return new KeySetView<K,V>(this, mappedValue);
2128     }
2129    
2130     /* ---------------- Special Nodes -------------- */
2131    
2132     /**
2133     * A node inserted at head of bins during transfer operations.
2134     */
2135     static final class ForwardingNode<K,V> extends Node<K,V> {
2136     final Node<K,V>[] nextTable;
2137     ForwardingNode(Node<K,V>[] tab) {
2138     super(MOVED, null, null, null);
2139     this.nextTable = tab;
2140     }
2141    
2142     Node<K,V> find(int h, Object k) {
2143     Node<K,V> e; int n;
2144     Node<K,V>[] tab = nextTable;
2145     if (k != null && tab != null && (n = tab.length) > 0 &&
2146     (e = tabAt(tab, (n - 1) & h)) != null) {
2147     do {
2148     int eh; K ek;
2149     if ((eh = e.hash) == h &&
2150     ((ek = e.key) == k || (ek != null && k.equals(ek))))
2151     return e;
2152     if (eh < 0)
2153     return e.find(h, k);
2154     } while ((e = e.next) != null);
2155 dl 1.82 }
2156 dl 1.102 return null;
2157 dl 1.82 }
2158     }
2159    
2160 dl 1.24 /**
2161 dl 1.102 * A place-holder node used in computeIfAbsent and compute
2162 dl 1.24 */
2163 dl 1.102 static final class ReservationNode<K,V> extends Node<K,V> {
2164     ReservationNode() {
2165     super(RESERVED, null, null, null);
2166     }
2167    
2168     Node<K,V> find(int h, Object k) {
2169     return null;
2170     }
2171 dl 1.24 }
2172    
2173 dl 1.102 /* ---------------- Table Initialization and Resizing -------------- */
2174    
2175 dl 1.24 /**
2176     * Initializes table, using the size recorded in sizeCtl.
2177     */
2178 dl 1.102 private final Node<K,V>[] initTable() {
2179     Node<K,V>[] tab; int sc;
2180     while ((tab = table) == null || tab.length == 0) {
2181 dl 1.24 if ((sc = sizeCtl) < 0)
2182     Thread.yield(); // lost initialization race; just spin
2183 dl 1.82 else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
2184 dl 1.24 try {
2185 dl 1.102 if ((tab = table) == null || tab.length == 0) {
2186 dl 1.24 int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
2187 dl 1.102 @SuppressWarnings({"rawtypes","unchecked"})
2188     Node<K,V>[] nt = (Node<K,V>[])new Node[n];
2189     table = tab = nt;
2190 dl 1.27 sc = n - (n >>> 2);
2191 dl 1.24 }
2192     } finally {
2193     sizeCtl = sc;
2194     }
2195     break;
2196     }
2197     }
2198     return tab;
2199     }
2200    
2201     /**
2202 dl 1.82 * Adds to count, and if table is too small and not already
2203     * resizing, initiates transfer. If already resizing, helps
2204     * perform transfer if work is available. Rechecks occupancy
2205     * after a transfer to see if another resize is already needed
2206     * because resizings are lagging additions.
2207     *
2208     * @param x the count to add
2209     * @param check if <0, don't check resize, if <= 1 only check if uncontended
2210     */
2211     private final void addCount(long x, int check) {
2212     CounterCell[] as; long b, s;
2213     if ((as = counterCells) != null ||
2214     !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
2215     CounterHashCode hc; CounterCell a; long v; int m;
2216     boolean uncontended = true;
2217     if ((hc = threadCounterHashCode.get()) == null ||
2218     as == null || (m = as.length - 1) < 0 ||
2219     (a = as[m & hc.code]) == null ||
2220     !(uncontended =
2221     U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
2222     fullAddCount(x, hc, uncontended);
2223     return;
2224     }
2225     if (check <= 1)
2226     return;
2227     s = sumCount();
2228     }
2229     if (check >= 0) {
2230 dl 1.102 Node<K,V>[] tab, nt; int sc;
2231 dl 1.82 while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
2232     tab.length < MAXIMUM_CAPACITY) {
2233     if (sc < 0) {
2234     if (sc == -1 || transferIndex <= transferOrigin ||
2235     (nt = nextTable) == null)
2236     break;
2237     if (U.compareAndSwapInt(this, SIZECTL, sc, sc - 1))
2238     transfer(tab, nt);
2239 dl 1.24 }
2240 dl 1.82 else if (U.compareAndSwapInt(this, SIZECTL, sc, -2))
2241     transfer(tab, null);
2242     s = sumCount();
2243 dl 1.24 }
2244     }
2245     }
2246    
2247 dl 1.27 /**
2248 dl 1.102 * Helps transfer if a resize is in progress.
2249     */
2250     final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) {
2251     Node<K,V>[] nextTab; int sc;
2252     if ((f instanceof ForwardingNode) &&
2253     (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) {
2254     if (nextTab == nextTable && tab == table &&
2255     transferIndex > transferOrigin && (sc = sizeCtl) < -1 &&
2256     U.compareAndSwapInt(this, SIZECTL, sc, sc - 1))
2257     transfer(tab, nextTab);
2258     return nextTab;
2259     }
2260     return table;
2261     }
2262    
2263     /**
2264 dl 1.27 * Tries to presize table to accommodate the given number of elements.
2265     *
2266     * @param size number of elements (doesn't need to be perfectly accurate)
2267     */
2268 dl 1.102 private final void tryPresize(int size) {
2269 dl 1.27 int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
2270     tableSizeFor(size + (size >>> 1) + 1);
2271     int sc;
2272     while ((sc = sizeCtl) >= 0) {
2273 dl 1.102 Node<K,V>[] tab = table; int n;
2274 dl 1.27 if (tab == null || (n = tab.length) == 0) {
2275 jsr166 1.30 n = (sc > c) ? sc : c;
2276 dl 1.82 if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
2277 dl 1.27 try {
2278     if (table == tab) {
2279 dl 1.102 @SuppressWarnings({"rawtypes","unchecked"})
2280     Node<K,V>[] nt = (Node<K,V>[])new Node[n];
2281     table = nt;
2282 dl 1.27 sc = n - (n >>> 2);
2283     }
2284     } finally {
2285     sizeCtl = sc;
2286     }
2287     }
2288     }
2289     else if (c <= sc || n >= MAXIMUM_CAPACITY)
2290     break;
2291 dl 1.82 else if (tab == table &&
2292     U.compareAndSwapInt(this, SIZECTL, sc, -2))
2293     transfer(tab, null);
2294 dl 1.27 }
2295     }
2296    
2297 jsr166 1.92 /**
2298 dl 1.24 * Moves and/or copies the nodes in each bin to new table. See
2299     * above for explanation.
2300     */
2301 dl 1.102 private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
2302 dl 1.82 int n = tab.length, stride;
2303     if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
2304     stride = MIN_TRANSFER_STRIDE; // subdivide range
2305     if (nextTab == null) { // initiating
2306     try {
2307 dl 1.102 @SuppressWarnings({"rawtypes","unchecked"})
2308     Node<K,V>[] nt = (Node<K,V>[])new Node[n << 1];
2309     nextTab = nt;
2310 jsr166 1.83 } catch (Throwable ex) { // try to cope with OOME
2311 dl 1.82 sizeCtl = Integer.MAX_VALUE;
2312     return;
2313     }
2314     nextTable = nextTab;
2315     transferOrigin = n;
2316     transferIndex = n;
2317 dl 1.102 ForwardingNode<K,V> rev = new ForwardingNode<K,V>(tab);
2318 dl 1.82 for (int k = n; k > 0;) { // progressively reveal ready slots
2319 jsr166 1.83 int nextk = (k > stride) ? k - stride : 0;
2320 dl 1.82 for (int m = nextk; m < k; ++m)
2321     nextTab[m] = rev;
2322     for (int m = n + nextk; m < n + k; ++m)
2323     nextTab[m] = rev;
2324     U.putOrderedInt(this, TRANSFERORIGIN, k = nextk);
2325     }
2326     }
2327     int nextn = nextTab.length;
2328 dl 1.102 ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
2329 dl 1.82 boolean advance = true;
2330     for (int i = 0, bound = 0;;) {
2331 dl 1.102 int nextIndex, nextBound, fh; Node<K,V> f;
2332 dl 1.82 while (advance) {
2333     if (--i >= bound)
2334     advance = false;
2335     else if ((nextIndex = transferIndex) <= transferOrigin) {
2336     i = -1;
2337     advance = false;
2338     }
2339     else if (U.compareAndSwapInt
2340     (this, TRANSFERINDEX, nextIndex,
2341 jsr166 1.83 nextBound = (nextIndex > stride ?
2342 dl 1.82 nextIndex - stride : 0))) {
2343     bound = nextBound;
2344     i = nextIndex - 1;
2345     advance = false;
2346     }
2347     }
2348     if (i < 0 || i >= n || i + n >= nextn) {
2349     for (int sc;;) {
2350     if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, ++sc)) {
2351     if (sc == -1) {
2352     nextTable = null;
2353     table = nextTab;
2354     sizeCtl = (n << 1) - (n >>> 1);
2355     }
2356     return;
2357     }
2358     }
2359     }
2360     else if ((f = tabAt(tab, i)) == null) {
2361     if (casTabAt(tab, i, null, fwd)) {
2362 dl 1.24 setTabAt(nextTab, i, null);
2363     setTabAt(nextTab, i + n, null);
2364 dl 1.82 advance = true;
2365 dl 1.24 }
2366     }
2367 dl 1.102 else if ((fh = f.hash) == MOVED)
2368     advance = true; // already processed
2369     else {
2370 jsr166 1.83 synchronized (f) {
2371 dl 1.82 if (tabAt(tab, i) == f) {
2372 dl 1.102 Node<K,V> ln, hn;
2373     if (fh >= 0) {
2374     int runBit = fh & n;
2375     Node<K,V> lastRun = f;
2376     for (Node<K,V> p = f.next; p != null; p = p.next) {
2377     int b = p.hash & n;
2378     if (b != runBit) {
2379     runBit = b;
2380     lastRun = p;
2381     }
2382 dl 1.82 }
2383 dl 1.102 if (runBit == 0) {
2384     ln = lastRun;
2385     hn = null;
2386 dl 1.82 }
2387     else {
2388 dl 1.102 hn = lastRun;
2389     ln = null;
2390     }
2391     for (Node<K,V> p = f; p != lastRun; p = p.next) {
2392     int ph = p.hash; K pk = p.key; V pv = p.val;
2393     if ((ph & n) == 0)
2394     ln = new Node<K,V>(ph, pk, pv, ln);
2395     else
2396     hn = new Node<K,V>(ph, pk, pv, hn);
2397 dl 1.82 }
2398     }
2399 dl 1.102 else if (f instanceof TreeBin) {
2400     TreeBin<K,V> t = (TreeBin<K,V>)f;
2401     TreeNode<K,V> lo = null, loTail = null;
2402     TreeNode<K,V> hi = null, hiTail = null;
2403     int lc = 0, hc = 0;
2404     for (Node<K,V> e = t.first; e != null; e = e.next) {
2405     int h = e.hash;
2406     TreeNode<K,V> p = new TreeNode<K,V>
2407     (h, e.key, e.val, null, null);
2408     if ((h & n) == 0) {
2409     if ((p.prev = loTail) == null)
2410     lo = p;
2411     else
2412     loTail.next = p;
2413     loTail = p;
2414     ++lc;
2415     }
2416     else {
2417     if ((p.prev = hiTail) == null)
2418     hi = p;
2419     else
2420     hiTail.next = p;
2421     hiTail = p;
2422     ++hc;
2423     }
2424     }
2425 jsr166 1.104 ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
2426     (hc != 0) ? new TreeBin<K,V>(lo) : t;
2427     hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
2428     (lc != 0) ? new TreeBin<K,V>(hi) : t;
2429 dl 1.82 }
2430     else
2431 dl 1.102 ln = hn = null;
2432 dl 1.82 setTabAt(nextTab, i, ln);
2433     setTabAt(nextTab, i + n, hn);
2434 dl 1.24 setTabAt(tab, i, fwd);
2435 dl 1.82 advance = true;
2436 dl 1.24 }
2437     }
2438     }
2439     }
2440     }
2441    
2442 dl 1.102 /* ---------------- Conversion from/to TreeBins -------------- */
2443 dl 1.82
2444 dl 1.102 /**
2445     * Replaces all linked nodes in bin at given index unless table is
2446     * too small, in which case resizes instead.
2447     */
2448     private final void treeifyBin(Node<K,V>[] tab, int index) {
2449     Node<K,V> b; int n, sc;
2450     if (tab != null) {
2451     if ((n = tab.length) < MIN_TREEIFY_CAPACITY) {
2452     if (tab == table && (sc = sizeCtl) >= 0 &&
2453     U.compareAndSwapInt(this, SIZECTL, sc, -2))
2454     transfer(tab, null);
2455 dl 1.38 }
2456 dl 1.102 else if ((b = tabAt(tab, index)) != null) {
2457     synchronized (b) {
2458     if (tabAt(tab, index) == b) {
2459     TreeNode<K,V> hd = null, tl = null;
2460     for (Node<K,V> e = b; e != null; e = e.next) {
2461     TreeNode<K,V> p =
2462     new TreeNode<K,V>(e.hash, e.key, e.val,
2463     null, null);
2464     if ((p.prev = tl) == null)
2465     hd = p;
2466     else
2467     tl.next = p;
2468     tl = p;
2469 dl 1.38 }
2470 dl 1.102 setTabAt(tab, index, new TreeBin<K,V>(hd));
2471 dl 1.38 }
2472     }
2473 dl 1.82 }
2474 dl 1.27 }
2475     }
2476    
2477 dl 1.102 /**
2478     * Returns a list on non-TreeNodes replacing those in given list
2479     */
2480     static <K,V> Node<K,V> untreeify(Node<K,V> b) {
2481     Node<K,V> hd = null, tl = null;
2482     for (Node<K,V> q = b; q != null; q = q.next) {
2483     Node<K,V> p = new Node<K,V>(q.hash, q.key, q.val, null);
2484     if (tl == null)
2485     hd = p;
2486     else
2487     tl.next = p;
2488     tl = p;
2489     }
2490     return hd;
2491     }
2492    
2493     /* ---------------- TreeNodes -------------- */
2494 dl 1.14
2495 dl 1.1 /**
2496 dl 1.102 * Nodes for use in TreeBins
2497 dl 1.14 */
2498 dl 1.102 static final class TreeNode<K,V> extends Node<K,V> {
2499     TreeNode<K,V> parent; // red-black tree links
2500     TreeNode<K,V> left;
2501     TreeNode<K,V> right;
2502     TreeNode<K,V> prev; // needed to unlink next upon deletion
2503     boolean red;
2504    
2505     TreeNode(int hash, K key, V val, Node<K,V> next,
2506     TreeNode<K,V> parent) {
2507     super(hash, key, val, next);
2508     this.parent = parent;
2509     }
2510    
2511     Node<K,V> find(int h, Object k) {
2512     return findTreeNode(h, k, null);
2513     }
2514    
2515     /**
2516     * Returns the TreeNode (or null if not found) for the given key
2517     * starting at given root.
2518     */
2519     final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) {
2520     if (k != null) {
2521     TreeNode<K,V> p = this;
2522     do {
2523     int ph, dir; K pk; TreeNode<K,V> q;
2524     TreeNode<K,V> pl = p.left, pr = p.right;
2525     if ((ph = p.hash) > h)
2526     p = pl;
2527     else if (ph < h)
2528     p = pr;
2529     else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2530     return p;
2531     else if (pl == null && pr == null)
2532     break;
2533 jsr166 1.103 else if ((kc != null ||
2534 dl 1.102 (kc = comparableClassFor(k)) != null) &&
2535     (dir = compareComparables(kc, k, pk)) != 0)
2536     p = (dir < 0) ? pl : pr;
2537     else if (pl == null)
2538     p = pr;
2539 jsr166 1.103 else if (pr == null ||
2540 dl 1.102 (q = pr.findTreeNode(h, k, kc)) == null)
2541     p = pl;
2542     else
2543     return q;
2544     } while (p != null);
2545     }
2546     return null;
2547     }
2548     }
2549    
2550     /* ---------------- TreeBins -------------- */
2551    
2552     /**
2553     * TreeNodes used at the heads of bins. TreeBins do not hold user
2554     * keys or values, but instead point to list of TreeNodes and
2555     * their root. They also maintain a parasitic read-write lock
2556     * forcing writers (who hold bin lock) to wait for readers (who do
2557     * not) to complete before tree restructuring operations.
2558     */
2559     static final class TreeBin<K,V> extends Node<K,V> {
2560     TreeNode<K,V> root;
2561     volatile TreeNode<K,V> first;
2562     volatile Thread waiter;
2563     volatile int lockState;
2564     // values for lockState
2565     static final int WRITER = 1; // set while holding write lock
2566     static final int WAITER = 2; // set when waiting for write lock
2567     static final int READER = 4; // increment value for setting read lock
2568    
2569     /**
2570     * Creates bin with initial set of nodes headed by b.
2571     */
2572     TreeBin(TreeNode<K,V> b) {
2573     super(TREEBIN, null, null, null);
2574     this.first = b;
2575     TreeNode<K,V> r = null;
2576     for (TreeNode<K,V> x = b, next; x != null; x = next) {
2577     next = (TreeNode<K,V>)x.next;
2578     x.left = x.right = null;
2579     if (r == null) {
2580     x.parent = null;
2581     x.red = false;
2582     r = x;
2583     }
2584     else {
2585     Object key = x.key;
2586     int hash = x.hash;
2587     Class<?> kc = null;
2588     for (TreeNode<K,V> p = r;;) {
2589     int dir, ph;
2590     if ((ph = p.hash) > hash)
2591     dir = -1;
2592     else if (ph < hash)
2593     dir = 1;
2594     else if ((kc != null ||
2595     (kc = comparableClassFor(key)) != null))
2596     dir = compareComparables(kc, key, p.key);
2597     else
2598     dir = 0;
2599     TreeNode<K,V> xp = p;
2600     if ((p = (dir <= 0) ? p.left : p.right) == null) {
2601     x.parent = xp;
2602     if (dir <= 0)
2603     xp.left = x;
2604     else
2605     xp.right = x;
2606     r = balanceInsertion(r, x);
2607     break;
2608     }
2609     }
2610     }
2611     }
2612     this.root = r;
2613     }
2614    
2615     /**
2616     * Acquires write lock for tree restructuring
2617     */
2618     private final void lockRoot() {
2619     if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER))
2620     contendedLock(); // offload to separate method
2621     }
2622    
2623     /**
2624     * Releases write lock for tree restructuring
2625     */
2626     private final void unlockRoot() {
2627     lockState = 0;
2628     }
2629 dl 1.14
2630 dl 1.102 /**
2631     * Possibly blocks awaiting root lock
2632     */
2633     private final void contendedLock() {
2634     boolean waiting = false;
2635     for (int s;;) {
2636     if (((s = lockState) & WRITER) == 0) {
2637     if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) {
2638     if (waiting)
2639     waiter = null;
2640     return;
2641     }
2642     }
2643     else if ((s | WAITER) == 0) {
2644     if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) {
2645     waiting = true;
2646     waiter = Thread.currentThread();
2647     }
2648     }
2649     else if (waiting)
2650     LockSupport.park(this);
2651     }
2652 dl 1.14 }
2653    
2654 dl 1.102 /**
2655     * Returns matching node or null if none. Tries to search
2656     * using tree compareisons from root, but continues linear
2657     * search when lock not available.
2658     */
2659     final Node<K,V> find(int h, Object k) {
2660     if (k != null) {
2661     for (Node<K,V> e = first; e != null; e = e.next) {
2662     int s; K ek;
2663     if (((s = lockState) & (WAITER|WRITER)) != 0) {
2664     if (e.hash == h &&
2665     ((ek = e.key) == k || (ek != null && k.equals(ek))))
2666     return e;
2667     }
2668     else if (U.compareAndSwapInt(this, LOCKSTATE, s,
2669     s + READER)) {
2670     TreeNode<K,V> r, p;
2671     try {
2672     p = ((r = root) == null ? null :
2673     r.findTreeNode(h, k, null));
2674     } finally {
2675     Thread w;
2676     int ls;
2677     do {} while (!U.compareAndSwapInt
2678     (this, LOCKSTATE,
2679     ls = lockState, ls - READER));
2680     if (ls == (READER|WAITER) && (w = waiter) != null)
2681     LockSupport.unpark(w);
2682     }
2683     return p;
2684     }
2685     }
2686 dl 1.79 }
2687 dl 1.102 return null;
2688 dl 1.41 }
2689    
2690     /**
2691 dl 1.102 * Finds or adds a node.
2692     * @return null if added
2693 dl 1.41 */
2694 dl 1.102 final TreeNode<K,V> putTreeVal(int h, K k, V v) {
2695     Class<?> kc = null;
2696     for (TreeNode<K,V> p = root;;) {
2697     int dir, ph; K pk; TreeNode<K,V> q, pr;
2698     if (p == null) {
2699     first = root = new TreeNode<K,V>(h, k, v, null, null);
2700     break;
2701     }
2702     else if ((ph = p.hash) > h)
2703     dir = -1;
2704     else if (ph < h)
2705     dir = 1;
2706     else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2707     return p;
2708     else if ((kc == null &&
2709     (kc = comparableClassFor(k)) == null) ||
2710     (dir = compareComparables(kc, k, pk)) == 0) {
2711     if (p.left == null)
2712     dir = 1;
2713     else if ((pr = p.right) == null ||
2714     (q = pr.findTreeNode(h, k, kc)) == null)
2715     dir = -1;
2716     else
2717     return q;
2718     }
2719     TreeNode<K,V> xp = p;
2720     if ((p = (dir < 0) ? p.left : p.right) == null) {
2721     TreeNode<K,V> x, f = first;
2722     first = x = new TreeNode<K,V>(h, k, v, f, xp);
2723     if (f != null)
2724     f.prev = x;
2725     if (dir < 0)
2726     xp.left = x;
2727 dl 1.63 else
2728 dl 1.102 xp.right = x;
2729     if (!xp.red)
2730     x.red = true;
2731     else {
2732     lockRoot();
2733     try {
2734     root = balanceInsertion(root, x);
2735     } finally {
2736     unlockRoot();
2737 dl 1.38 }
2738 dl 1.102 }
2739     break;
2740 dl 1.1 }
2741 dl 1.102 }
2742     assert checkInvariants(root);
2743     return null;
2744 dl 1.1 }
2745 dl 1.41
2746 dl 1.102 /**
2747     * Removes the given node, that must be present before this
2748     * call. This is messier than typical red-black deletion code
2749     * because we cannot swap the contents of an interior node
2750     * with a leaf successor that is pinned by "next" pointers
2751     * that are accessible independently of lock. So instead we
2752     * swap the tree linkages.
2753     *
2754     * @return true if now too small so should be untreeified.
2755     */
2756     final boolean removeTreeNode(TreeNode<K,V> p) {
2757     TreeNode<K,V> next = (TreeNode<K,V>)p.next;
2758     TreeNode<K,V> pred = p.prev; // unlink traversal pointers
2759     TreeNode<K,V> r, rl;
2760     if (pred == null)
2761     first = next;
2762     else
2763     pred.next = next;
2764     if (next != null)
2765     next.prev = pred;
2766     if (first == null) {
2767     root = null;
2768     return true;
2769     }
2770     if ((r = root) == null || r.right == null || // too small
2771     (rl = r.left) == null || rl.left == null)
2772     return true;
2773     lockRoot();
2774     try {
2775     TreeNode<K,V> replacement;
2776     TreeNode<K,V> pl = p.left;
2777     TreeNode<K,V> pr = p.right;
2778     if (pl != null && pr != null) {
2779     TreeNode<K,V> s = pr, sl;
2780     while ((sl = s.left) != null) // find successor
2781     s = sl;
2782     boolean c = s.red; s.red = p.red; p.red = c; // swap colors
2783     TreeNode<K,V> sr = s.right;
2784     TreeNode<K,V> pp = p.parent;
2785     if (s == pr) { // p was s's direct parent
2786     p.parent = s;
2787     s.right = p;
2788     }
2789     else {
2790     TreeNode<K,V> sp = s.parent;
2791     if ((p.parent = sp) != null) {
2792     if (s == sp.left)
2793     sp.left = p;
2794     else
2795     sp.right = p;
2796     }
2797     if ((s.right = pr) != null)
2798     pr.parent = s;
2799     }
2800     p.left = null;
2801     if ((p.right = sr) != null)
2802     sr.parent = p;
2803     if ((s.left = pl) != null)
2804     pl.parent = s;
2805     if ((s.parent = pp) == null)
2806     r = s;
2807     else if (p == pp.left)
2808     pp.left = s;
2809     else
2810     pp.right = s;
2811     if (sr != null)
2812     replacement = sr;
2813     else
2814     replacement = p;
2815     }
2816     else if (pl != null)
2817     replacement = pl;
2818     else if (pr != null)
2819     replacement = pr;
2820     else
2821     replacement = p;
2822     if (replacement != p) {
2823     TreeNode<K,V> pp = replacement.parent = p.parent;
2824     if (pp == null)
2825     r = replacement;
2826     else if (p == pp.left)
2827     pp.left = replacement;
2828     else
2829     pp.right = replacement;
2830     p.left = p.right = p.parent = null;
2831     }
2832    
2833     root = (p.red) ? r : balanceDeletion(r, replacement);
2834    
2835     if (p == replacement) { // detach pointers
2836     TreeNode<K,V> pp;
2837     if ((pp = p.parent) != null) {
2838     if (p == pp.left)
2839     pp.left = null;
2840     else if (p == pp.right)
2841     pp.right = null;
2842     p.parent = null;
2843     }
2844     }
2845     } finally {
2846     unlockRoot();
2847     }
2848     assert checkInvariants(root);
2849     return false;
2850 dl 1.41 }
2851    
2852 dl 1.102 /* ------------------------------------------------------------ */
2853     // Red-black tree methods, all adapted from CLR
2854    
2855     static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
2856     TreeNode<K,V> p) {
2857     TreeNode<K,V> r, pp, rl;
2858     if (p != null && (r = p.right) != null) {
2859     if ((rl = p.right = r.left) != null)
2860     rl.parent = p;
2861     if ((pp = r.parent = p.parent) == null)
2862     (root = r).red = false;
2863     else if (pp.left == p)
2864     pp.left = r;
2865     else
2866     pp.right = r;
2867     r.left = p;
2868     p.parent = r;
2869     }
2870     return root;
2871 dl 1.41 }
2872    
2873 dl 1.102 static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
2874     TreeNode<K,V> p) {
2875     TreeNode<K,V> l, pp, lr;
2876     if (p != null && (l = p.left) != null) {
2877     if ((lr = p.left = l.right) != null)
2878     lr.parent = p;
2879     if ((pp = l.parent = p.parent) == null)
2880     (root = l).red = false;
2881     else if (pp.right == p)
2882     pp.right = l;
2883     else
2884     pp.left = l;
2885     l.right = p;
2886     p.parent = l;
2887     }
2888     return root;
2889     }
2890 dl 1.79
2891 dl 1.102 static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
2892     TreeNode<K,V> x) {
2893     x.red = true;
2894     for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
2895     if ((xp = x.parent) == null) {
2896     x.red = false;
2897     return x;
2898     }
2899     else if (!xp.red || (xpp = xp.parent) == null)
2900     return root;
2901     if (xp == (xppl = xpp.left)) {
2902     if ((xppr = xpp.right) != null && xppr.red) {
2903     xppr.red = false;
2904     xp.red = false;
2905     xpp.red = true;
2906     x = xpp;
2907     }
2908     else {
2909     if (x == xp.right) {
2910     root = rotateLeft(root, x = xp);
2911     xpp = (xp = x.parent) == null ? null : xp.parent;
2912     }
2913     if (xp != null) {
2914     xp.red = false;
2915     if (xpp != null) {
2916     xpp.red = true;
2917     root = rotateRight(root, xpp);
2918     }
2919     }
2920     }
2921     }
2922     else {
2923     if (xppl != null && xppl.red) {
2924     xppl.red = false;
2925     xp.red = false;
2926     xpp.red = true;
2927     x = xpp;
2928     }
2929     else {
2930     if (x == xp.left) {
2931     root = rotateRight(root, x = xp);
2932     xpp = (xp = x.parent) == null ? null : xp.parent;
2933     }
2934     if (xp != null) {
2935     xp.red = false;
2936     if (xpp != null) {
2937     xpp.red = true;
2938     root = rotateLeft(root, xpp);
2939     }
2940     }
2941     }
2942     }
2943     }
2944     }
2945 dl 1.79
2946 dl 1.102 static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
2947     TreeNode<K,V> x) {
2948     for (TreeNode<K,V> xp, xpl, xpr;;) {
2949     if (x == null || x == root)
2950     return root;
2951     else if ((xp = x.parent) == null) {
2952     x.red = false;
2953     return x;
2954     }
2955     else if (x.red) {
2956     x.red = false;
2957     return root;
2958     }
2959     else if ((xpl = xp.left) == x) {
2960     if ((xpr = xp.right) != null && xpr.red) {
2961     xpr.red = false;
2962     xp.red = true;
2963     root = rotateLeft(root, xp);
2964     xpr = (xp = x.parent) == null ? null : xp.right;
2965     }
2966     if (xpr == null)
2967     x = xp;
2968     else {
2969     TreeNode<K,V> sl = xpr.left, sr = xpr.right;
2970     if ((sr == null || !sr.red) &&
2971     (sl == null || !sl.red)) {
2972     xpr.red = true;
2973     x = xp;
2974     }
2975     else {
2976     if (sr == null || !sr.red) {
2977     if (sl != null)
2978     sl.red = false;
2979     xpr.red = true;
2980     root = rotateRight(root, xpr);
2981     xpr = (xp = x.parent) == null ?
2982     null : xp.right;
2983     }
2984     if (xpr != null) {
2985     xpr.red = (xp == null) ? false : xp.red;
2986     if ((sr = xpr.right) != null)
2987     sr.red = false;
2988     }
2989     if (xp != null) {
2990     xp.red = false;
2991     root = rotateLeft(root, xp);
2992     }
2993     x = root;
2994     }
2995     }
2996     }
2997     else { // symmetric
2998     if (xpl != null && xpl.red) {
2999     xpl.red = false;
3000     xp.red = true;
3001     root = rotateRight(root, xp);
3002     xpl = (xp = x.parent) == null ? null : xp.left;
3003     }
3004     if (xpl == null)
3005     x = xp;
3006     else {
3007     TreeNode<K,V> sl = xpl.left, sr = xpl.right;
3008     if ((sl == null || !sl.red) &&
3009     (sr == null || !sr.red)) {
3010     xpl.red = true;
3011     x = xp;
3012     }
3013     else {
3014     if (sl == null || !sl.red) {
3015     if (sr != null)
3016     sr.red = false;
3017     xpl.red = true;
3018     root = rotateLeft(root, xpl);
3019     xpl = (xp = x.parent) == null ?
3020     null : xp.left;
3021     }
3022     if (xpl != null) {
3023     xpl.red = (xp == null) ? false : xp.red;
3024     if ((sl = xpl.left) != null)
3025     sl.red = false;
3026     }
3027     if (xp != null) {
3028     xp.red = false;
3029     root = rotateRight(root, xp);
3030     }
3031     x = root;
3032     }
3033     }
3034     }
3035     }
3036     }
3037 jsr166 1.103
3038 dl 1.79 /**
3039 dl 1.102 * Recursive invariant check
3040 dl 1.79 */
3041 dl 1.102 static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
3042     TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
3043     tb = t.prev, tn = (TreeNode<K,V>)t.next;
3044     if (tb != null && tb.next != t)
3045     return false;
3046     if (tn != null && tn.prev != t)
3047     return false;
3048     if (tp != null && t != tp.left && t != tp.right)
3049     return false;
3050     if (tl != null && (tl.parent != t || tl.hash > t.hash))
3051     return false;
3052     if (tr != null && (tr.parent != t || tr.hash < t.hash))
3053     return false;
3054     if (t.red && tl != null && tl.red && tr != null && tr.red)
3055     return false;
3056     if (tl != null && !checkInvariants(tl))
3057     return false;
3058     if (tr != null && !checkInvariants(tr))
3059     return false;
3060     return true;
3061 dl 1.79 }
3062    
3063 dl 1.102 private static final sun.misc.Unsafe U;
3064     private static final long LOCKSTATE;
3065     static {
3066     try {
3067     U = getUnsafe();
3068     Class<?> k = TreeBin.class;
3069     LOCKSTATE = U.objectFieldOffset
3070     (k.getDeclaredField("lockState"));
3071     } catch (Exception e) {
3072     throw new Error(e);
3073     }
3074     }
3075 dl 1.1 }
3076    
3077 dl 1.102 /* ----------------Table Traversal -------------- */
3078 dl 1.1
3079     /**
3080 dl 1.102 * Encapsulates traversal for methods such as containsValue; also
3081     * serves as a base class for other iterators and spliterators.
3082     *
3083     * Method advance visits once each still-valid node that was
3084     * reachable upon iterator construction. It might miss some that
3085     * were added to a bin after the bin was visited, which is OK wrt
3086     * consistency guarantees. Maintaining this property in the face
3087     * of possible ongoing resizes requires a fair amount of
3088     * bookkeeping state that is difficult to optimize away amidst
3089     * volatile accesses. Even so, traversal maintains reasonable
3090     * throughput.
3091 dl 1.1 *
3092 dl 1.102 * Normally, iteration proceeds bin-by-bin traversing lists.
3093     * However, if the table has been resized, then all future steps
3094     * must traverse both the bin at the current index as well as at
3095     * (index + baseSize); and so on for further resizings. To
3096     * paranoically cope with potential sharing by users of iterators
3097     * across threads, iteration terminates if a bounds checks fails
3098     * for a table read.
3099 dl 1.1 */
3100 dl 1.102 static class Traverser<K,V> {
3101     Node<K,V>[] tab; // current table; updated if resized
3102     Node<K,V> next; // the next entry to use
3103     int index; // index of bin to use next
3104     int baseIndex; // current index of initial table
3105     int baseLimit; // index bound for initial table
3106     final int baseSize; // initial table size
3107    
3108     Traverser(Node<K,V>[] tab, int size, int index, int limit) {
3109     this.tab = tab;
3110     this.baseSize = size;
3111     this.baseIndex = this.index = index;
3112     this.baseLimit = limit;
3113     this.next = null;
3114     }
3115 dl 1.1
3116 dl 1.102 /**
3117     * Advances if possible, returning next valid node, or null if none.
3118     */
3119     final Node<K,V> advance() {
3120     Node<K,V> e;
3121     if ((e = next) != null)
3122     e = e.next;
3123     for (;;) {
3124     Node<K,V>[] t; int i, n; K ek; // must use locals in checks
3125     if (e != null)
3126     return next = e;
3127     if (baseIndex >= baseLimit || (t = tab) == null ||
3128     (n = t.length) <= (i = index) || i < 0)
3129     return next = null;
3130     if ((e = tabAt(t, index)) != null && e.hash < 0) {
3131     if (e instanceof ForwardingNode) {
3132     tab = ((ForwardingNode<K,V>)e).nextTable;
3133     e = null;
3134     continue;
3135 dl 1.75 }
3136 dl 1.102 else if (e instanceof TreeBin)
3137     e = ((TreeBin<K,V>)e).first;
3138     else
3139     e = null;
3140 dl 1.75 }
3141 dl 1.102 if ((index += baseSize) >= n)
3142     index = ++baseIndex; // visit upper slots if present
3143 dl 1.24 }
3144     }
3145 dl 1.75 }
3146    
3147     /**
3148 dl 1.102 * Base of key, value, and entry Iterators. Adds fields to
3149     * Traverser to support iterator.remove
3150 dl 1.75 */
3151 dl 1.102 static class BaseIterator<K,V> extends Traverser<K,V> {
3152     final ConcurrentHashMapV8<K,V> map;
3153     Node<K,V> lastReturned;
3154     BaseIterator(Node<K,V>[] tab, int size, int index, int limit,
3155     ConcurrentHashMapV8<K,V> map) {
3156     super(tab, size, index, limit);
3157     this.map = map;
3158     advance();
3159     }
3160    
3161     public final boolean hasNext() { return next != null; }
3162     public final boolean hasMoreElements() { return next != null; }
3163    
3164     public final void remove() {
3165     Node<K,V> p;
3166     if ((p = lastReturned) == null)
3167     throw new IllegalStateException();
3168     lastReturned = null;
3169     map.replaceNode(p.key, null, null);
3170     }
3171 dl 1.75 }
3172    
3173 dl 1.102 static final class KeyIterator<K,V> extends BaseIterator<K,V>
3174     implements Iterator<K>, Enumeration<K> {
3175     KeyIterator(Node<K,V>[] tab, int index, int size, int limit,
3176     ConcurrentHashMapV8<K,V> map) {
3177     super(tab, index, size, limit, map);
3178     }
3179    
3180     public final K next() {
3181     Node<K,V> p;
3182     if ((p = next) == null)
3183     throw new NoSuchElementException();
3184     K k = p.key;
3185     lastReturned = p;
3186     advance();
3187     return k;
3188     }
3189    
3190     public final K nextElement() { return next(); }
3191 dl 1.75 }
3192    
3193 dl 1.102 static final class ValueIterator<K,V> extends BaseIterator<K,V>
3194     implements Iterator<V>, Enumeration<V> {
3195     ValueIterator(Node<K,V>[] tab, int index, int size, int limit,
3196     ConcurrentHashMapV8<K,V> map) {
3197     super(tab, index, size, limit, map);
3198     }
3199    
3200     public final V next() {
3201     Node<K,V> p;
3202     if ((p = next) == null)
3203     throw new NoSuchElementException();
3204     V v = p.val;
3205     lastReturned = p;
3206     advance();
3207     return v;
3208 dl 1.84 }
3209 dl 1.102
3210     public final V nextElement() { return next(); }
3211 dl 1.75 }
3212    
3213 dl 1.102 static final class EntryIterator<K,V> extends BaseIterator<K,V>
3214     implements Iterator<Map.Entry<K,V>> {
3215     EntryIterator(Node<K,V>[] tab, int index, int size, int limit,
3216     ConcurrentHashMapV8<K,V> map) {
3217     super(tab, index, size, limit, map);
3218     }
3219    
3220     public final Map.Entry<K,V> next() {
3221     Node<K,V> p;
3222     if ((p = next) == null)
3223     throw new NoSuchElementException();
3224     K k = p.key;
3225     V v = p.val;
3226     lastReturned = p;
3227     advance();
3228     return new MapEntry<K,V>(k, v, map);
3229 dl 1.84 }
3230 dl 1.75 }
3231    
3232     /**
3233 dl 1.102 * Exported Entry for EntryIterator
3234 dl 1.75 */
3235 dl 1.102 static final class MapEntry<K,V> implements Map.Entry<K,V> {
3236     final K key; // non-null
3237     V val; // non-null
3238     final ConcurrentHashMapV8<K,V> map;
3239     MapEntry(K key, V val, ConcurrentHashMapV8<K,V> map) {
3240     this.key = key;
3241     this.val = val;
3242     this.map = map;
3243     }
3244     public K getKey() { return key; }
3245     public V getValue() { return val; }
3246     public int hashCode() { return key.hashCode() ^ val.hashCode(); }
3247     public String toString() { return key + "=" + val; }
3248    
3249     public boolean equals(Object o) {
3250     Object k, v; Map.Entry<?,?> e;
3251     return ((o instanceof Map.Entry) &&
3252     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3253     (v = e.getValue()) != null &&
3254     (k == key || k.equals(key)) &&
3255     (v == val || v.equals(val)));
3256     }
3257    
3258     /**
3259     * Sets our entry's value and writes through to the map. The
3260     * value to return is somewhat arbitrary here. Since we do not
3261     * necessarily track asynchronous changes, the most recent
3262     * "previous" value could be different from what we return (or
3263     * could even have been removed, in which case the put will
3264     * re-establish). We do not and cannot guarantee more.
3265     */
3266     public V setValue(V value) {
3267     if (value == null) throw new NullPointerException();
3268     V v = val;
3269     val = value;
3270     map.put(key, value);
3271     return v;
3272 dl 1.84 }
3273 dl 1.75 }
3274    
3275 dl 1.102 static final class KeySpliterator<K,V> extends Traverser<K,V>
3276     implements ConcurrentHashMapSpliterator<K> {
3277     long est; // size estimate
3278     KeySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3279     long est) {
3280     super(tab, size, index, limit);
3281     this.est = est;
3282     }
3283    
3284     public ConcurrentHashMapSpliterator<K> trySplit() {
3285     int i, f, h;
3286     return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3287     new KeySpliterator<K,V>(tab, baseSize, baseLimit = h,
3288     f, est >>>= 1);
3289     }
3290    
3291     public void forEachRemaining(Action<? super K> action) {
3292     if (action == null) throw new NullPointerException();
3293     for (Node<K,V> p; (p = advance()) != null;)
3294     action.apply(p.key);
3295     }
3296    
3297     public boolean tryAdvance(Action<? super K> action) {
3298     if (action == null) throw new NullPointerException();
3299     Node<K,V> p;
3300     if ((p = advance()) == null)
3301     return false;
3302     action.apply(p.key);
3303     return true;
3304 dl 1.84 }
3305 dl 1.102
3306     public long estimateSize() { return est; }
3307    
3308 dl 1.75 }
3309    
3310 dl 1.102 static final class ValueSpliterator<K,V> extends Traverser<K,V>
3311     implements ConcurrentHashMapSpliterator<V> {
3312     long est; // size estimate
3313     ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit,
3314     long est) {
3315     super(tab, size, index, limit);
3316     this.est = est;
3317     }
3318    
3319     public ConcurrentHashMapSpliterator<V> trySplit() {
3320     int i, f, h;
3321     return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3322     new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h,
3323     f, est >>>= 1);
3324     }
3325    
3326     public void forEachRemaining(Action<? super V> action) {
3327     if (action == null) throw new NullPointerException();
3328     for (Node<K,V> p; (p = advance()) != null;)
3329     action.apply(p.val);
3330     }
3331    
3332     public boolean tryAdvance(Action<? super V> action) {
3333     if (action == null) throw new NullPointerException();
3334     Node<K,V> p;
3335     if ((p = advance()) == null)
3336     return false;
3337     action.apply(p.val);
3338     return true;
3339     }
3340    
3341     public long estimateSize() { return est; }
3342    
3343 dl 1.75 }
3344    
3345 dl 1.102 static final class EntrySpliterator<K,V> extends Traverser<K,V>
3346     implements ConcurrentHashMapSpliterator<Map.Entry<K,V>> {
3347     final ConcurrentHashMapV8<K,V> map; // To export MapEntry
3348     long est; // size estimate
3349     EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3350     long est, ConcurrentHashMapV8<K,V> map) {
3351     super(tab, size, index, limit);
3352     this.map = map;
3353     this.est = est;
3354     }
3355    
3356     public ConcurrentHashMapSpliterator<Map.Entry<K,V>> trySplit() {
3357     int i, f, h;
3358     return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3359     new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h,
3360     f, est >>>= 1, map);
3361     }
3362    
3363     public void forEachRemaining(Action<? super Map.Entry<K,V>> action) {
3364     if (action == null) throw new NullPointerException();
3365     for (Node<K,V> p; (p = advance()) != null; )
3366     action.apply(new MapEntry<K,V>(p.key, p.val, map));
3367     }
3368    
3369     public boolean tryAdvance(Action<? super Map.Entry<K,V>> action) {
3370     if (action == null) throw new NullPointerException();
3371     Node<K,V> p;
3372     if ((p = advance()) == null)
3373     return false;
3374     action.apply(new MapEntry<K,V>(p.key, p.val, map));
3375     return true;
3376     }
3377    
3378     public long estimateSize() { return est; }
3379    
3380 dl 1.75 }
3381    
3382 dl 1.102 // Parallel bulk operations
3383    
3384 dl 1.75 /**
3385 dl 1.102 * Computes initial batch value for bulk tasks. The returned value
3386     * is approximately exp2 of the number of times (minus one) to
3387     * split task by two before executing leaf action. This value is
3388     * faster to compute and more convenient to use as a guide to
3389     * splitting than is the depth, since it is used while dividing by
3390     * two anyway.
3391     */
3392     final int batchFor(long b) {
3393     long n;
3394     if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b)
3395     return 0;
3396     int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4
3397     return (b <= 0L || (n /= b) >= sp) ? sp : (int)n;
3398 dl 1.75 }
3399    
3400     /**
3401 dl 1.84 * Performs the given action for each (key, value).
3402     *
3403 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3404     * needed for this operation to be executed in parallel
3405 dl 1.84 * @param action the action
3406 dl 1.102 * @since 1.8
3407 dl 1.75 */
3408 dl 1.102 public void forEach(long parallelismThreshold,
3409     BiAction<? super K,? super V> action) {
3410     if (action == null) throw new NullPointerException();
3411     new ForEachMappingTask<K,V>
3412     (null, batchFor(parallelismThreshold), 0, 0, table,
3413     action).invoke();
3414 dl 1.84 }
3415 dl 1.75
3416 dl 1.84 /**
3417     * Performs the given action for each non-null transformation
3418     * of each (key, value).
3419     *
3420 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3421     * needed for this operation to be executed in parallel
3422 dl 1.84 * @param transformer a function returning the transformation
3423 jsr166 1.91 * for an element, or null if there is no transformation (in
3424 jsr166 1.93 * which case the action is not applied)
3425 dl 1.84 * @param action the action
3426 dl 1.102 * @since 1.8
3427 dl 1.84 */
3428 dl 1.102 public <U> void forEach(long parallelismThreshold,
3429     BiFun<? super K, ? super V, ? extends U> transformer,
3430     Action<? super U> action) {
3431     if (transformer == null || action == null)
3432     throw new NullPointerException();
3433     new ForEachTransformedMappingTask<K,V,U>
3434     (null, batchFor(parallelismThreshold), 0, 0, table,
3435     transformer, action).invoke();
3436 dl 1.84 }
3437 dl 1.75
3438 dl 1.84 /**
3439     * Returns a non-null result from applying the given search
3440     * function on each (key, value), or null if none. Upon
3441     * success, further element processing is suppressed and the
3442     * results of any other parallel invocations of the search
3443     * function are ignored.
3444     *
3445 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3446     * needed for this operation to be executed in parallel
3447 dl 1.84 * @param searchFunction a function returning a non-null
3448     * result on success, else null
3449     * @return a non-null result from applying the given search
3450     * function on each (key, value), or null if none
3451 dl 1.102 * @since 1.8
3452 dl 1.84 */
3453 dl 1.102 public <U> U search(long parallelismThreshold,
3454     BiFun<? super K, ? super V, ? extends U> searchFunction) {
3455     if (searchFunction == null) throw new NullPointerException();
3456     return new SearchMappingsTask<K,V,U>
3457     (null, batchFor(parallelismThreshold), 0, 0, table,
3458     searchFunction, new AtomicReference<U>()).invoke();
3459 dl 1.84 }
3460 dl 1.75
3461 dl 1.84 /**
3462     * Returns the result of accumulating the given transformation
3463     * of all (key, value) pairs using the given reducer to
3464     * combine values, or null if none.
3465     *
3466 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3467     * needed for this operation to be executed in parallel
3468 dl 1.84 * @param transformer a function returning the transformation
3469 jsr166 1.91 * for an element, or null if there is no transformation (in
3470 jsr166 1.93 * which case it is not combined)
3471 dl 1.84 * @param reducer a commutative associative combining function
3472     * @return the result of accumulating the given transformation
3473     * of all (key, value) pairs
3474 dl 1.102 * @since 1.8
3475 dl 1.84 */
3476 dl 1.102 public <U> U reduce(long parallelismThreshold,
3477     BiFun<? super K, ? super V, ? extends U> transformer,
3478     BiFun<? super U, ? super U, ? extends U> reducer) {
3479     if (transformer == null || reducer == null)
3480     throw new NullPointerException();
3481     return new MapReduceMappingsTask<K,V,U>
3482     (null, batchFor(parallelismThreshold), 0, 0, table,
3483     null, transformer, reducer).invoke();
3484 dl 1.84 }
3485 dl 1.75
3486 dl 1.84 /**
3487     * Returns the result of accumulating the given transformation
3488     * of all (key, value) pairs using the given reducer to
3489     * combine values, and the given basis as an identity value.
3490     *
3491 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3492     * needed for this operation to be executed in parallel
3493 dl 1.84 * @param transformer a function returning the transformation
3494     * for an element
3495     * @param basis the identity (initial default value) for the reduction
3496     * @param reducer a commutative associative combining function
3497     * @return the result of accumulating the given transformation
3498     * of all (key, value) pairs
3499 dl 1.102 * @since 1.8
3500 dl 1.84 */
3501 dl 1.102 public double reduceToDoubleIn(long parallelismThreshold,
3502     ObjectByObjectToDouble<? super K, ? super V> transformer,
3503     double basis,
3504     DoubleByDoubleToDouble reducer) {
3505     if (transformer == null || reducer == null)
3506     throw new NullPointerException();
3507     return new MapReduceMappingsToDoubleTask<K,V>
3508     (null, batchFor(parallelismThreshold), 0, 0, table,
3509     null, transformer, basis, reducer).invoke();
3510 dl 1.84 }
3511    
3512     /**
3513     * Returns the result of accumulating the given transformation
3514     * of all (key, value) pairs using the given reducer to
3515     * combine values, and the given basis as an identity value.
3516     *
3517 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3518     * needed for this operation to be executed in parallel
3519 dl 1.84 * @param transformer a function returning the transformation
3520     * for an element
3521     * @param basis the identity (initial default value) for the reduction
3522     * @param reducer a commutative associative combining function
3523     * @return the result of accumulating the given transformation
3524     * of all (key, value) pairs
3525 dl 1.102 * @since 1.8
3526 dl 1.84 */
3527 dl 1.102 public long reduceToLong(long parallelismThreshold,
3528     ObjectByObjectToLong<? super K, ? super V> transformer,
3529     long basis,
3530     LongByLongToLong reducer) {
3531     if (transformer == null || reducer == null)
3532     throw new NullPointerException();
3533     return new MapReduceMappingsToLongTask<K,V>
3534     (null, batchFor(parallelismThreshold), 0, 0, table,
3535     null, transformer, basis, reducer).invoke();
3536 dl 1.84 }
3537    
3538     /**
3539     * Returns the result of accumulating the given transformation
3540     * of all (key, value) pairs using the given reducer to
3541     * combine values, and the given basis as an identity value.
3542     *
3543 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3544     * needed for this operation to be executed in parallel
3545 dl 1.84 * @param transformer a function returning the transformation
3546     * for an element
3547     * @param basis the identity (initial default value) for the reduction
3548     * @param reducer a commutative associative combining function
3549     * @return the result of accumulating the given transformation
3550     * of all (key, value) pairs
3551 dl 1.102 * @since 1.8
3552 dl 1.84 */
3553 dl 1.102 public int reduceToInt(long parallelismThreshold,
3554     ObjectByObjectToInt<? super K, ? super V> transformer,
3555     int basis,
3556     IntByIntToInt reducer) {
3557     if (transformer == null || reducer == null)
3558     throw new NullPointerException();
3559     return new MapReduceMappingsToIntTask<K,V>
3560     (null, batchFor(parallelismThreshold), 0, 0, table,
3561     null, transformer, basis, reducer).invoke();
3562 dl 1.84 }
3563    
3564     /**
3565     * Performs the given action for each key.
3566     *
3567 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3568     * needed for this operation to be executed in parallel
3569 dl 1.84 * @param action the action
3570 dl 1.102 * @since 1.8
3571 dl 1.84 */
3572 dl 1.102 public void forEachKey(long parallelismThreshold,
3573     Action<? super K> action) {
3574     if (action == null) throw new NullPointerException();
3575     new ForEachKeyTask<K,V>
3576     (null, batchFor(parallelismThreshold), 0, 0, table,
3577     action).invoke();
3578 dl 1.84 }
3579    
3580     /**
3581     * Performs the given action for each non-null transformation
3582     * of each key.
3583     *
3584 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3585     * needed for this operation to be executed in parallel
3586 dl 1.84 * @param transformer a function returning the transformation
3587 jsr166 1.91 * for an element, or null if there is no transformation (in
3588 jsr166 1.93 * which case the action is not applied)
3589 dl 1.84 * @param action the action
3590 dl 1.102 * @since 1.8
3591 dl 1.84 */
3592 dl 1.102 public <U> void forEachKey(long parallelismThreshold,
3593     Fun<? super K, ? extends U> transformer,
3594     Action<? super U> action) {
3595     if (transformer == null || action == null)
3596     throw new NullPointerException();
3597     new ForEachTransformedKeyTask<K,V,U>
3598     (null, batchFor(parallelismThreshold), 0, 0, table,
3599     transformer, action).invoke();
3600 dl 1.84 }
3601    
3602     /**
3603     * Returns a non-null result from applying the given search
3604     * function on each key, or null if none. Upon success,
3605     * further element processing is suppressed and the results of
3606     * any other parallel invocations of the search function are
3607     * ignored.
3608     *
3609 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3610     * needed for this operation to be executed in parallel
3611 dl 1.84 * @param searchFunction a function returning a non-null
3612     * result on success, else null
3613     * @return a non-null result from applying the given search
3614     * function on each key, or null if none
3615 dl 1.102 * @since 1.8
3616 dl 1.84 */
3617 dl 1.102 public <U> U searchKeys(long parallelismThreshold,
3618     Fun<? super K, ? extends U> searchFunction) {
3619     if (searchFunction == null) throw new NullPointerException();
3620     return new SearchKeysTask<K,V,U>
3621     (null, batchFor(parallelismThreshold), 0, 0, table,
3622     searchFunction, new AtomicReference<U>()).invoke();
3623 dl 1.84 }
3624    
3625     /**
3626     * Returns the result of accumulating all keys using the given
3627     * reducer to combine values, or null if none.
3628     *
3629 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3630     * needed for this operation to be executed in parallel
3631 dl 1.84 * @param reducer a commutative associative combining function
3632     * @return the result of accumulating all keys using the given
3633     * reducer to combine values, or null if none
3634 dl 1.102 * @since 1.8
3635 dl 1.84 */
3636 dl 1.102 public K reduceKeys(long parallelismThreshold,
3637     BiFun<? super K, ? super K, ? extends K> reducer) {
3638     if (reducer == null) throw new NullPointerException();
3639     return new ReduceKeysTask<K,V>
3640     (null, batchFor(parallelismThreshold), 0, 0, table,
3641     null, reducer).invoke();
3642 dl 1.84 }
3643    
3644     /**
3645     * Returns the result of accumulating the given transformation
3646     * of all keys using the given reducer to combine values, or
3647     * null if none.
3648     *
3649 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3650     * needed for this operation to be executed in parallel
3651 dl 1.84 * @param transformer a function returning the transformation
3652 jsr166 1.91 * for an element, or null if there is no transformation (in
3653 jsr166 1.93 * which case it is not combined)
3654 dl 1.84 * @param reducer a commutative associative combining function
3655     * @return the result of accumulating the given transformation
3656     * of all keys
3657 dl 1.102 * @since 1.8
3658 dl 1.84 */
3659 dl 1.102 public <U> U reduceKeys(long parallelismThreshold,
3660     Fun<? super K, ? extends U> transformer,
3661 dl 1.84 BiFun<? super U, ? super U, ? extends U> reducer) {
3662 dl 1.102 if (transformer == null || reducer == null)
3663     throw new NullPointerException();
3664     return new MapReduceKeysTask<K,V,U>
3665     (null, batchFor(parallelismThreshold), 0, 0, table,
3666     null, transformer, reducer).invoke();
3667 dl 1.84 }
3668    
3669     /**
3670     * Returns the result of accumulating the given transformation
3671     * of all keys using the given reducer to combine values, and
3672     * the given basis as an identity value.
3673     *
3674 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3675     * needed for this operation to be executed in parallel
3676 dl 1.84 * @param transformer a function returning the transformation
3677     * for an element
3678     * @param basis the identity (initial default value) for the reduction
3679     * @param reducer a commutative associative combining function
3680 dl 1.102 * @return the result of accumulating the given transformation
3681 dl 1.84 * of all keys
3682 dl 1.102 * @since 1.8
3683 dl 1.84 */
3684 dl 1.102 public double reduceKeysToDouble(long parallelismThreshold,
3685     ObjectToDouble<? super K> transformer,
3686     double basis,
3687     DoubleByDoubleToDouble reducer) {
3688     if (transformer == null || reducer == null)
3689     throw new NullPointerException();
3690     return new MapReduceKeysToDoubleTask<K,V>
3691     (null, batchFor(parallelismThreshold), 0, 0, table,
3692     null, transformer, basis, reducer).invoke();
3693 dl 1.84 }
3694    
3695     /**
3696     * Returns the result of accumulating the given transformation
3697     * of all keys using the given reducer to combine values, and
3698     * the given basis as an identity value.
3699     *
3700 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3701     * needed for this operation to be executed in parallel
3702 dl 1.84 * @param transformer a function returning the transformation
3703     * for an element
3704     * @param basis the identity (initial default value) for the reduction
3705     * @param reducer a commutative associative combining function
3706     * @return the result of accumulating the given transformation
3707     * of all keys
3708 dl 1.102 * @since 1.8
3709 dl 1.84 */
3710 dl 1.102 public long reduceKeysToLong(long parallelismThreshold,
3711     ObjectToLong<? super K> transformer,
3712     long basis,
3713     LongByLongToLong reducer) {
3714     if (transformer == null || reducer == null)
3715     throw new NullPointerException();
3716     return new MapReduceKeysToLongTask<K,V>
3717     (null, batchFor(parallelismThreshold), 0, 0, table,
3718     null, transformer, basis, reducer).invoke();
3719 dl 1.84 }
3720    
3721     /**
3722     * Returns the result of accumulating the given transformation
3723     * of all keys using the given reducer to combine values, and
3724     * the given basis as an identity value.
3725     *
3726 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3727     * needed for this operation to be executed in parallel
3728 dl 1.84 * @param transformer a function returning the transformation
3729     * for an element
3730     * @param basis the identity (initial default value) for the reduction
3731     * @param reducer a commutative associative combining function
3732     * @return the result of accumulating the given transformation
3733     * of all keys
3734 dl 1.102 * @since 1.8
3735 dl 1.84 */
3736 dl 1.102 public int reduceKeysToInt(long parallelismThreshold,
3737     ObjectToInt<? super K> transformer,
3738     int basis,
3739     IntByIntToInt reducer) {
3740     if (transformer == null || reducer == null)
3741     throw new NullPointerException();
3742     return new MapReduceKeysToIntTask<K,V>
3743     (null, batchFor(parallelismThreshold), 0, 0, table,
3744     null, transformer, basis, reducer).invoke();
3745 dl 1.84 }
3746    
3747     /**
3748     * Performs the given action for each value.
3749     *
3750 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3751     * needed for this operation to be executed in parallel
3752 dl 1.84 * @param action the action
3753 dl 1.102 * @since 1.8
3754 dl 1.84 */
3755 dl 1.102 public void forEachValue(long parallelismThreshold,
3756     Action<? super V> action) {
3757     if (action == null)
3758     throw new NullPointerException();
3759     new ForEachValueTask<K,V>
3760     (null, batchFor(parallelismThreshold), 0, 0, table,
3761     action).invoke();
3762 dl 1.84 }
3763    
3764     /**
3765     * Performs the given action for each non-null transformation
3766     * of each value.
3767     *
3768 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3769     * needed for this operation to be executed in parallel
3770 dl 1.84 * @param transformer a function returning the transformation
3771 jsr166 1.91 * for an element, or null if there is no transformation (in
3772 jsr166 1.93 * which case the action is not applied)
3773 dl 1.102 * @param action the action
3774     * @since 1.8
3775 dl 1.84 */
3776 dl 1.102 public <U> void forEachValue(long parallelismThreshold,
3777     Fun<? super V, ? extends U> transformer,
3778     Action<? super U> action) {
3779     if (transformer == null || action == null)
3780     throw new NullPointerException();
3781     new ForEachTransformedValueTask<K,V,U>
3782     (null, batchFor(parallelismThreshold), 0, 0, table,
3783     transformer, action).invoke();
3784 dl 1.84 }
3785    
3786     /**
3787     * Returns a non-null result from applying the given search
3788     * function on each value, or null if none. Upon success,
3789     * further element processing is suppressed and the results of
3790     * any other parallel invocations of the search function are
3791     * ignored.
3792     *
3793 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3794     * needed for this operation to be executed in parallel
3795 dl 1.84 * @param searchFunction a function returning a non-null
3796     * result on success, else null
3797     * @return a non-null result from applying the given search
3798     * function on each value, or null if none
3799 dl 1.102 * @since 1.8
3800 dl 1.84 */
3801 dl 1.102 public <U> U searchValues(long parallelismThreshold,
3802     Fun<? super V, ? extends U> searchFunction) {
3803     if (searchFunction == null) throw new NullPointerException();
3804     return new SearchValuesTask<K,V,U>
3805     (null, batchFor(parallelismThreshold), 0, 0, table,
3806     searchFunction, new AtomicReference<U>()).invoke();
3807 dl 1.84 }
3808    
3809     /**
3810     * Returns the result of accumulating all values using the
3811     * given reducer to combine values, or null if none.
3812     *
3813 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3814     * needed for this operation to be executed in parallel
3815 dl 1.84 * @param reducer a commutative associative combining function
3816 dl 1.102 * @return the result of accumulating all values
3817     * @since 1.8
3818 dl 1.84 */
3819 dl 1.102 public V reduceValues(long parallelismThreshold,
3820     BiFun<? super V, ? super V, ? extends V> reducer) {
3821     if (reducer == null) throw new NullPointerException();
3822     return new ReduceValuesTask<K,V>
3823     (null, batchFor(parallelismThreshold), 0, 0, table,
3824     null, reducer).invoke();
3825 dl 1.84 }
3826    
3827     /**
3828     * Returns the result of accumulating the given transformation
3829     * of all values using the given reducer to combine values, or
3830     * null if none.
3831     *
3832 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3833     * needed for this operation to be executed in parallel
3834 dl 1.84 * @param transformer a function returning the transformation
3835 jsr166 1.91 * for an element, or null if there is no transformation (in
3836 jsr166 1.93 * which case it is not combined)
3837 dl 1.84 * @param reducer a commutative associative combining function
3838     * @return the result of accumulating the given transformation
3839     * of all values
3840 dl 1.102 * @since 1.8
3841 dl 1.84 */
3842 dl 1.102 public <U> U reduceValues(long parallelismThreshold,
3843     Fun<? super V, ? extends U> transformer,
3844     BiFun<? super U, ? super U, ? extends U> reducer) {
3845     if (transformer == null || reducer == null)
3846     throw new NullPointerException();
3847     return new MapReduceValuesTask<K,V,U>
3848     (null, batchFor(parallelismThreshold), 0, 0, table,
3849     null, transformer, reducer).invoke();
3850 dl 1.84 }
3851    
3852     /**
3853     * Returns the result of accumulating the given transformation
3854     * of all values using the given reducer to combine values,
3855     * and the given basis as an identity value.
3856     *
3857 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3858     * needed for this operation to be executed in parallel
3859 dl 1.84 * @param transformer a function returning the transformation
3860     * for an element
3861     * @param basis the identity (initial default value) for the reduction
3862     * @param reducer a commutative associative combining function
3863     * @return the result of accumulating the given transformation
3864     * of all values
3865 dl 1.102 * @since 1.8
3866 dl 1.84 */
3867 dl 1.102 public double reduceValuesToDouble(long parallelismThreshold,
3868     ObjectToDouble<? super V> transformer,
3869     double basis,
3870     DoubleByDoubleToDouble reducer) {
3871     if (transformer == null || reducer == null)
3872     throw new NullPointerException();
3873     return new MapReduceValuesToDoubleTask<K,V>
3874     (null, batchFor(parallelismThreshold), 0, 0, table,
3875     null, transformer, basis, reducer).invoke();
3876 dl 1.84 }
3877    
3878     /**
3879     * Returns the result of accumulating the given transformation
3880     * of all values using the given reducer to combine values,
3881     * and the given basis as an identity value.
3882     *
3883 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3884     * needed for this operation to be executed in parallel
3885 dl 1.84 * @param transformer a function returning the transformation
3886     * for an element
3887     * @param basis the identity (initial default value) for the reduction
3888     * @param reducer a commutative associative combining function
3889     * @return the result of accumulating the given transformation
3890     * of all values
3891 dl 1.102 * @since 1.8
3892 dl 1.84 */
3893 dl 1.102 public long reduceValuesToLong(long parallelismThreshold,
3894     ObjectToLong<? super V> transformer,
3895     long basis,
3896     LongByLongToLong reducer) {
3897     if (transformer == null || reducer == null)
3898     throw new NullPointerException();
3899     return new MapReduceValuesToLongTask<K,V>
3900     (null, batchFor(parallelismThreshold), 0, 0, table,
3901     null, transformer, basis, reducer).invoke();
3902 dl 1.84 }
3903    
3904     /**
3905     * Returns the result of accumulating the given transformation
3906     * of all values using the given reducer to combine values,
3907     * and the given basis as an identity value.
3908     *
3909 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3910     * needed for this operation to be executed in parallel
3911 dl 1.84 * @param transformer a function returning the transformation
3912     * for an element
3913     * @param basis the identity (initial default value) for the reduction
3914     * @param reducer a commutative associative combining function
3915     * @return the result of accumulating the given transformation
3916     * of all values
3917 dl 1.102 * @since 1.8
3918 dl 1.84 */
3919 dl 1.102 public int reduceValuesToInt(long parallelismThreshold,
3920     ObjectToInt<? super V> transformer,
3921     int basis,
3922     IntByIntToInt reducer) {
3923     if (transformer == null || reducer == null)
3924     throw new NullPointerException();
3925     return new MapReduceValuesToIntTask<K,V>
3926     (null, batchFor(parallelismThreshold), 0, 0, table,
3927     null, transformer, basis, reducer).invoke();
3928 dl 1.84 }
3929    
3930     /**
3931     * Performs the given action for each entry.
3932     *
3933 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3934     * needed for this operation to be executed in parallel
3935 dl 1.84 * @param action the action
3936 dl 1.102 * @since 1.8
3937 dl 1.84 */
3938 dl 1.102 public void forEachEntry(long parallelismThreshold,
3939     Action<? super Map.Entry<K,V>> action) {
3940     if (action == null) throw new NullPointerException();
3941     new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table,
3942     action).invoke();
3943 dl 1.84 }
3944    
3945     /**
3946     * Performs the given action for each non-null transformation
3947     * of each entry.
3948     *
3949 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3950     * needed for this operation to be executed in parallel
3951 dl 1.84 * @param transformer a function returning the transformation
3952 jsr166 1.91 * for an element, or null if there is no transformation (in
3953 jsr166 1.93 * which case the action is not applied)
3954 dl 1.84 * @param action the action
3955 dl 1.102 * @since 1.8
3956 dl 1.84 */
3957 dl 1.102 public <U> void forEachEntry(long parallelismThreshold,
3958     Fun<Map.Entry<K,V>, ? extends U> transformer,
3959     Action<? super U> action) {
3960     if (transformer == null || action == null)
3961     throw new NullPointerException();
3962     new ForEachTransformedEntryTask<K,V,U>
3963     (null, batchFor(parallelismThreshold), 0, 0, table,
3964     transformer, action).invoke();
3965 dl 1.84 }
3966    
3967     /**
3968     * Returns a non-null result from applying the given search
3969     * function on each entry, or null if none. Upon success,
3970     * further element processing is suppressed and the results of
3971     * any other parallel invocations of the search function are
3972     * ignored.
3973     *
3974 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3975     * needed for this operation to be executed in parallel
3976 dl 1.84 * @param searchFunction a function returning a non-null
3977     * result on success, else null
3978     * @return a non-null result from applying the given search
3979     * function on each entry, or null if none
3980 dl 1.102 * @since 1.8
3981 dl 1.84 */
3982 dl 1.102 public <U> U searchEntries(long parallelismThreshold,
3983     Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
3984     if (searchFunction == null) throw new NullPointerException();
3985     return new SearchEntriesTask<K,V,U>
3986     (null, batchFor(parallelismThreshold), 0, 0, table,
3987     searchFunction, new AtomicReference<U>()).invoke();
3988 dl 1.84 }
3989    
3990     /**
3991     * Returns the result of accumulating all entries using the
3992     * given reducer to combine values, or null if none.
3993     *
3994 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
3995     * needed for this operation to be executed in parallel
3996 dl 1.84 * @param reducer a commutative associative combining function
3997     * @return the result of accumulating all entries
3998 dl 1.102 * @since 1.8
3999 dl 1.84 */
4000 dl 1.102 public Map.Entry<K,V> reduceEntries(long parallelismThreshold,
4001     BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4002     if (reducer == null) throw new NullPointerException();
4003     return new ReduceEntriesTask<K,V>
4004     (null, batchFor(parallelismThreshold), 0, 0, table,
4005     null, reducer).invoke();
4006 dl 1.84 }
4007    
4008     /**
4009     * Returns the result of accumulating the given transformation
4010     * of all entries using the given reducer to combine values,
4011     * or null if none.
4012     *
4013 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
4014     * needed for this operation to be executed in parallel
4015 dl 1.84 * @param transformer a function returning the transformation
4016 jsr166 1.91 * for an element, or null if there is no transformation (in
4017 jsr166 1.93 * which case it is not combined)
4018 dl 1.84 * @param reducer a commutative associative combining function
4019     * @return the result of accumulating the given transformation
4020     * of all entries
4021 dl 1.102 * @since 1.8
4022 dl 1.84 */
4023 dl 1.102 public <U> U reduceEntries(long parallelismThreshold,
4024     Fun<Map.Entry<K,V>, ? extends U> transformer,
4025     BiFun<? super U, ? super U, ? extends U> reducer) {
4026     if (transformer == null || reducer == null)
4027     throw new NullPointerException();
4028     return new MapReduceEntriesTask<K,V,U>
4029     (null, batchFor(parallelismThreshold), 0, 0, table,
4030     null, transformer, reducer).invoke();
4031 dl 1.84 }
4032    
4033     /**
4034     * Returns the result of accumulating the given transformation
4035     * of all entries using the given reducer to combine values,
4036     * and the given basis as an identity value.
4037     *
4038 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
4039     * needed for this operation to be executed in parallel
4040 dl 1.84 * @param transformer a function returning the transformation
4041     * for an element
4042     * @param basis the identity (initial default value) for the reduction
4043     * @param reducer a commutative associative combining function
4044     * @return the result of accumulating the given transformation
4045     * of all entries
4046 dl 1.102 * @since 1.8
4047 dl 1.84 */
4048 dl 1.102 public double reduceEntriesToDouble(long parallelismThreshold,
4049     ObjectToDouble<Map.Entry<K,V>> transformer,
4050     double basis,
4051     DoubleByDoubleToDouble reducer) {
4052     if (transformer == null || reducer == null)
4053     throw new NullPointerException();
4054     return new MapReduceEntriesToDoubleTask<K,V>
4055     (null, batchFor(parallelismThreshold), 0, 0, table,
4056     null, transformer, basis, reducer).invoke();
4057 dl 1.84 }
4058    
4059     /**
4060     * Returns the result of accumulating the given transformation
4061     * of all entries using the given reducer to combine values,
4062     * and the given basis as an identity value.
4063     *
4064 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
4065     * needed for this operation to be executed in parallel
4066 dl 1.84 * @param transformer a function returning the transformation
4067     * for an element
4068     * @param basis the identity (initial default value) for the reduction
4069     * @param reducer a commutative associative combining function
4070 dl 1.102 * @return the result of accumulating the given transformation
4071 dl 1.84 * of all entries
4072 dl 1.102 * @since 1.8
4073 dl 1.84 */
4074 dl 1.102 public long reduceEntriesToLong(long parallelismThreshold,
4075     ObjectToLong<Map.Entry<K,V>> transformer,
4076     long basis,
4077     LongByLongToLong reducer) {
4078     if (transformer == null || reducer == null)
4079     throw new NullPointerException();
4080     return new MapReduceEntriesToLongTask<K,V>
4081     (null, batchFor(parallelismThreshold), 0, 0, table,
4082     null, transformer, basis, reducer).invoke();
4083 dl 1.84 }
4084    
4085     /**
4086     * Returns the result of accumulating the given transformation
4087     * of all entries using the given reducer to combine values,
4088     * and the given basis as an identity value.
4089     *
4090 dl 1.102 * @param parallelismThreshold the (estimated) number of elements
4091     * needed for this operation to be executed in parallel
4092 dl 1.84 * @param transformer a function returning the transformation
4093     * for an element
4094     * @param basis the identity (initial default value) for the reduction
4095     * @param reducer a commutative associative combining function
4096     * @return the result of accumulating the given transformation
4097     * of all entries
4098 dl 1.102 * @since 1.8
4099 dl 1.84 */
4100 dl 1.102 public int reduceEntriesToInt(long parallelismThreshold,
4101     ObjectToInt<Map.Entry<K,V>> transformer,
4102     int basis,
4103     IntByIntToInt reducer) {
4104     if (transformer == null || reducer == null)
4105     throw new NullPointerException();
4106     return new MapReduceEntriesToIntTask<K,V>
4107     (null, batchFor(parallelismThreshold), 0, 0, table,
4108     null, transformer, basis, reducer).invoke();
4109 dl 1.84 }
4110    
4111    
4112     /* ----------------Views -------------- */
4113    
4114     /**
4115     * Base class for views.
4116     */
4117 dl 1.102 abstract static class CollectionView<K,V,E>
4118     implements Collection<E>, java.io.Serializable {
4119     private static final long serialVersionUID = 7249069246763182397L;
4120 jsr166 1.99 final ConcurrentHashMapV8<K,V> map;
4121 dl 1.102 CollectionView(ConcurrentHashMapV8<K,V> map) { this.map = map; }
4122 dl 1.84
4123     /**
4124     * Returns the map backing this view.
4125     *
4126     * @return the map backing this view
4127     */
4128     public ConcurrentHashMapV8<K,V> getMap() { return map; }
4129    
4130 dl 1.102 /**
4131     * Removes all of the elements from this view, by removing all
4132     * the mappings from the map backing this view.
4133     */
4134     public final void clear() { map.clear(); }
4135     public final int size() { return map.size(); }
4136     public final boolean isEmpty() { return map.isEmpty(); }
4137 dl 1.84
4138     // implementations below rely on concrete classes supplying these
4139 dl 1.102 // abstract methods
4140     /**
4141     * Returns a "weakly consistent" iterator that will never
4142     * throw {@link ConcurrentModificationException}, and
4143     * guarantees to traverse elements as they existed upon
4144     * construction of the iterator, and may (but is not
4145     * guaranteed to) reflect any modifications subsequent to
4146     * construction.
4147     */
4148     public abstract Iterator<E> iterator();
4149 jsr166 1.88 public abstract boolean contains(Object o);
4150     public abstract boolean remove(Object o);
4151 dl 1.84
4152     private static final String oomeMsg = "Required array size too large";
4153 dl 1.75
4154     public final Object[] toArray() {
4155     long sz = map.mappingCount();
4156 dl 1.102 if (sz > MAX_ARRAY_SIZE)
4157 dl 1.75 throw new OutOfMemoryError(oomeMsg);
4158     int n = (int)sz;
4159     Object[] r = new Object[n];
4160     int i = 0;
4161 dl 1.102 for (E e : this) {
4162 dl 1.75 if (i == n) {
4163     if (n >= MAX_ARRAY_SIZE)
4164     throw new OutOfMemoryError(oomeMsg);
4165     if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4166     n = MAX_ARRAY_SIZE;
4167     else
4168     n += (n >>> 1) + 1;
4169     r = Arrays.copyOf(r, n);
4170     }
4171 dl 1.102 r[i++] = e;
4172 dl 1.75 }
4173     return (i == n) ? r : Arrays.copyOf(r, i);
4174     }
4175    
4176 dl 1.102 @SuppressWarnings("unchecked")
4177     public final <T> T[] toArray(T[] a) {
4178 dl 1.75 long sz = map.mappingCount();
4179 dl 1.102 if (sz > MAX_ARRAY_SIZE)
4180 dl 1.75 throw new OutOfMemoryError(oomeMsg);
4181     int m = (int)sz;
4182     T[] r = (a.length >= m) ? a :
4183     (T[])java.lang.reflect.Array
4184     .newInstance(a.getClass().getComponentType(), m);
4185     int n = r.length;
4186     int i = 0;
4187 dl 1.102 for (E e : this) {
4188 dl 1.75 if (i == n) {
4189     if (n >= MAX_ARRAY_SIZE)
4190     throw new OutOfMemoryError(oomeMsg);
4191     if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4192     n = MAX_ARRAY_SIZE;
4193     else
4194     n += (n >>> 1) + 1;
4195     r = Arrays.copyOf(r, n);
4196     }
4197 dl 1.102 r[i++] = (T)e;
4198 dl 1.75 }
4199     if (a == r && i < n) {
4200     r[i] = null; // null-terminate
4201     return r;
4202     }
4203     return (i == n) ? r : Arrays.copyOf(r, i);
4204     }
4205    
4206 dl 1.102 /**
4207     * Returns a string representation of this collection.
4208     * The string representation consists of the string representations
4209     * of the collection's elements in the order they are returned by
4210     * its iterator, enclosed in square brackets ({@code "[]"}).
4211     * Adjacent elements are separated by the characters {@code ", "}
4212     * (comma and space). Elements are converted to strings as by
4213     * {@link String#valueOf(Object)}.
4214     *
4215     * @return a string representation of this collection
4216     */
4217 dl 1.75 public final String toString() {
4218     StringBuilder sb = new StringBuilder();
4219     sb.append('[');
4220 dl 1.102 Iterator<E> it = iterator();
4221 dl 1.75 if (it.hasNext()) {
4222     for (;;) {
4223     Object e = it.next();
4224     sb.append(e == this ? "(this Collection)" : e);
4225     if (!it.hasNext())
4226     break;
4227     sb.append(',').append(' ');
4228     }
4229     }
4230     return sb.append(']').toString();
4231     }
4232    
4233     public final boolean containsAll(Collection<?> c) {
4234     if (c != this) {
4235 dl 1.102 for (Object e : c) {
4236 dl 1.75 if (e == null || !contains(e))
4237     return false;
4238     }
4239     }
4240     return true;
4241     }
4242    
4243     public final boolean removeAll(Collection<?> c) {
4244     boolean modified = false;
4245 dl 1.102 for (Iterator<E> it = iterator(); it.hasNext();) {
4246 dl 1.75 if (c.contains(it.next())) {
4247     it.remove();
4248     modified = true;
4249     }
4250     }
4251     return modified;
4252     }
4253    
4254     public final boolean retainAll(Collection<?> c) {
4255     boolean modified = false;
4256 dl 1.102 for (Iterator<E> it = iterator(); it.hasNext();) {
4257 dl 1.75 if (!c.contains(it.next())) {
4258     it.remove();
4259     modified = true;
4260     }
4261     }
4262     return modified;
4263     }
4264    
4265     }
4266    
4267     /**
4268     * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in
4269     * which additions may optionally be enabled by mapping to a
4270 dl 1.102 * common value. This class cannot be directly instantiated.
4271     * See {@link #keySet() keySet()},
4272     * {@link #keySet(Object) keySet(V)},
4273     * {@link #newKeySet() newKeySet()},
4274     * {@link #newKeySet(int) newKeySet(int)}.
4275     *
4276     * @since 1.8
4277 dl 1.75 */
4278 dl 1.102 public static class KeySetView<K,V> extends CollectionView<K,V,K>
4279 dl 1.82 implements Set<K>, java.io.Serializable {
4280 dl 1.75 private static final long serialVersionUID = 7249069246763182397L;
4281     private final V value;
4282 jsr166 1.99 KeySetView(ConcurrentHashMapV8<K,V> map, V value) { // non-public
4283 dl 1.75 super(map);
4284     this.value = value;
4285     }
4286    
4287     /**
4288     * Returns the default mapped value for additions,
4289     * or {@code null} if additions are not supported.
4290     *
4291     * @return the default mapped value for additions, or {@code null}
4292 jsr166 1.93 * if not supported
4293 dl 1.75 */
4294     public V getMappedValue() { return value; }
4295    
4296 dl 1.102 /**
4297     * {@inheritDoc}
4298     * @throws NullPointerException if the specified key is null
4299     */
4300     public boolean contains(Object o) { return map.containsKey(o); }
4301    
4302     /**
4303     * Removes the key from this map view, by removing the key (and its
4304     * corresponding value) from the backing map. This method does
4305     * nothing if the key is not in the map.
4306     *
4307     * @param o the key to be removed from the backing map
4308     * @return {@code true} if the backing map contained the specified key
4309     * @throws NullPointerException if the specified key is null
4310     */
4311     public boolean remove(Object o) { return map.remove(o) != null; }
4312 dl 1.75
4313 dl 1.102 /**
4314     * @return an iterator over the keys of the backing map
4315     */
4316     public Iterator<K> iterator() {
4317     Node<K,V>[] t;
4318     ConcurrentHashMapV8<K,V> m = map;
4319     int f = (t = m.table) == null ? 0 : t.length;
4320     return new KeyIterator<K,V>(t, f, 0, f, m);
4321     }
4322 dl 1.75
4323     /**
4324 dl 1.102 * Adds the specified key to this set view by mapping the key to
4325     * the default mapped value in the backing map, if defined.
4326 dl 1.75 *
4327 dl 1.102 * @param e key to be added
4328     * @return {@code true} if this set changed as a result of the call
4329     * @throws NullPointerException if the specified key is null
4330     * @throws UnsupportedOperationException if no default mapped value
4331     * for additions was provided
4332 dl 1.75 */
4333     public boolean add(K e) {
4334     V v;
4335     if ((v = value) == null)
4336     throw new UnsupportedOperationException();
4337 dl 1.102 return map.putVal(e, v, true) == null;
4338 dl 1.75 }
4339 dl 1.102
4340     /**
4341     * Adds all of the elements in the specified collection to this set,
4342     * as if by calling {@link #add} on each one.
4343     *
4344     * @param c the elements to be inserted into this set
4345     * @return {@code true} if this set changed as a result of the call
4346     * @throws NullPointerException if the collection or any of its
4347     * elements are {@code null}
4348     * @throws UnsupportedOperationException if no default mapped value
4349     * for additions was provided
4350     */
4351 dl 1.75 public boolean addAll(Collection<? extends K> c) {
4352     boolean added = false;
4353     V v;
4354     if ((v = value) == null)
4355     throw new UnsupportedOperationException();
4356     for (K e : c) {
4357 dl 1.102 if (map.putVal(e, v, true) == null)
4358 dl 1.75 added = true;
4359     }
4360     return added;
4361     }
4362 dl 1.102
4363     public int hashCode() {
4364     int h = 0;
4365     for (K e : this)
4366     h += e.hashCode();
4367     return h;
4368     }
4369    
4370 dl 1.75 public boolean equals(Object o) {
4371     Set<?> c;
4372     return ((o instanceof Set) &&
4373     ((c = (Set<?>)o) == this ||
4374     (containsAll(c) && c.containsAll(this))));
4375     }
4376 dl 1.102
4377     public ConcurrentHashMapSpliterator<K> spliterator() {
4378     Node<K,V>[] t;
4379     ConcurrentHashMapV8<K,V> m = map;
4380     long n = m.sumCount();
4381     int f = (t = m.table) == null ? 0 : t.length;
4382     return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4383     }
4384    
4385     public void forEach(Action<? super K> action) {
4386     if (action == null) throw new NullPointerException();
4387     Node<K,V>[] t;
4388     if ((t = map.table) != null) {
4389     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4390     for (Node<K,V> p; (p = it.advance()) != null; )
4391     action.apply(p.key);
4392     }
4393     }
4394 dl 1.75 }
4395    
4396     /**
4397     * A view of a ConcurrentHashMapV8 as a {@link Collection} of
4398     * values, in which additions are disabled. This class cannot be
4399 jsr166 1.96 * directly instantiated. See {@link #values()}.
4400 dl 1.75 */
4401 dl 1.102 static final class ValuesView<K,V> extends CollectionView<K,V,V>
4402     implements Collection<V>, java.io.Serializable {
4403     private static final long serialVersionUID = 2249069246763182397L;
4404     ValuesView(ConcurrentHashMapV8<K,V> map) { super(map); }
4405     public final boolean contains(Object o) {
4406     return map.containsValue(o);
4407     }
4408    
4409 dl 1.75 public final boolean remove(Object o) {
4410     if (o != null) {
4411 dl 1.102 for (Iterator<V> it = iterator(); it.hasNext();) {
4412 dl 1.75 if (o.equals(it.next())) {
4413     it.remove();
4414     return true;
4415     }
4416     }
4417     }
4418     return false;
4419     }
4420    
4421     public final Iterator<V> iterator() {
4422 dl 1.102 ConcurrentHashMapV8<K,V> m = map;
4423     Node<K,V>[] t;
4424     int f = (t = m.table) == null ? 0 : t.length;
4425     return new ValueIterator<K,V>(t, f, 0, f, m);
4426 dl 1.75 }
4427 dl 1.102
4428 dl 1.75 public final boolean add(V e) {
4429     throw new UnsupportedOperationException();
4430     }
4431     public final boolean addAll(Collection<? extends V> c) {
4432     throw new UnsupportedOperationException();
4433     }
4434    
4435 dl 1.102 public ConcurrentHashMapSpliterator<V> spliterator() {
4436     Node<K,V>[] t;
4437     ConcurrentHashMapV8<K,V> m = map;
4438     long n = m.sumCount();
4439     int f = (t = m.table) == null ? 0 : t.length;
4440     return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4441     }
4442    
4443     public void forEach(Action<? super V> action) {
4444     if (action == null) throw new NullPointerException();
4445     Node<K,V>[] t;
4446     if ((t = map.table) != null) {
4447     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4448     for (Node<K,V> p; (p = it.advance()) != null; )
4449     action.apply(p.val);
4450     }
4451     }
4452 dl 1.70 }
4453 dl 1.52
4454 dl 1.70 /**
4455 dl 1.75 * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value)
4456     * entries. This class cannot be directly instantiated. See
4457 jsr166 1.95 * {@link #entrySet()}.
4458 dl 1.70 */
4459 dl 1.102 static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>>
4460     implements Set<Map.Entry<K,V>>, java.io.Serializable {
4461     private static final long serialVersionUID = 2249069246763182397L;
4462 jsr166 1.99 EntrySetView(ConcurrentHashMapV8<K,V> map) { super(map); }
4463 dl 1.102
4464     public boolean contains(Object o) {
4465 dl 1.75 Object k, v, r; Map.Entry<?,?> e;
4466     return ((o instanceof Map.Entry) &&
4467     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4468     (r = map.get(k)) != null &&
4469     (v = e.getValue()) != null &&
4470     (v == r || v.equals(r)));
4471     }
4472 dl 1.102
4473     public boolean remove(Object o) {
4474 dl 1.75 Object k, v; Map.Entry<?,?> e;
4475     return ((o instanceof Map.Entry) &&
4476     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4477     (v = e.getValue()) != null &&
4478     map.remove(k, v));
4479     }
4480    
4481     /**
4482 dl 1.102 * @return an iterator over the entries of the backing map
4483 dl 1.75 */
4484 dl 1.102 public Iterator<Map.Entry<K,V>> iterator() {
4485     ConcurrentHashMapV8<K,V> m = map;
4486     Node<K,V>[] t;
4487     int f = (t = m.table) == null ? 0 : t.length;
4488     return new EntryIterator<K,V>(t, f, 0, f, m);
4489 dl 1.75 }
4490    
4491 dl 1.102 public boolean add(Entry<K,V> e) {
4492     return map.putVal(e.getKey(), e.getValue(), false) == null;
4493 dl 1.75 }
4494 dl 1.102
4495     public boolean addAll(Collection<? extends Entry<K,V>> c) {
4496 dl 1.75 boolean added = false;
4497     for (Entry<K,V> e : c) {
4498     if (add(e))
4499     added = true;
4500     }
4501     return added;
4502     }
4503 dl 1.52
4504 dl 1.102 public final int hashCode() {
4505     int h = 0;
4506     Node<K,V>[] t;
4507     if ((t = map.table) != null) {
4508     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4509     for (Node<K,V> p; (p = it.advance()) != null; ) {
4510     h += p.hashCode();
4511     }
4512     }
4513     return h;
4514 dl 1.52 }
4515    
4516 dl 1.102 public final boolean equals(Object o) {
4517     Set<?> c;
4518     return ((o instanceof Set) &&
4519     ((c = (Set<?>)o) == this ||
4520     (containsAll(c) && c.containsAll(this))));
4521 dl 1.52 }
4522    
4523 dl 1.102 public ConcurrentHashMapSpliterator<Map.Entry<K,V>> spliterator() {
4524     Node<K,V>[] t;
4525     ConcurrentHashMapV8<K,V> m = map;
4526     long n = m.sumCount();
4527     int f = (t = m.table) == null ? 0 : t.length;
4528     return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m);
4529 dl 1.52 }
4530    
4531 dl 1.102 public void forEach(Action<? super Map.Entry<K,V>> action) {
4532     if (action == null) throw new NullPointerException();
4533     Node<K,V>[] t;
4534     if ((t = map.table) != null) {
4535     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4536     for (Node<K,V> p; (p = it.advance()) != null; )
4537     action.apply(new MapEntry<K,V>(p.key, p.val, map));
4538     }
4539 dl 1.52 }
4540    
4541 dl 1.102 }
4542 dl 1.52
4543 dl 1.102 // -------------------------------------------------------
4544 dl 1.52
4545 dl 1.102 /**
4546     * Base class for bulk tasks. Repeats some fields and code from
4547     * class Traverser, because we need to subclass CountedCompleter.
4548     */
4549     abstract static class BulkTask<K,V,R> extends CountedCompleter<R> {
4550     Node<K,V>[] tab; // same as Traverser
4551     Node<K,V> next;
4552     int index;
4553     int baseIndex;
4554     int baseLimit;
4555     final int baseSize;
4556     int batch; // split control
4557    
4558     BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) {
4559     super(par);
4560     this.batch = b;
4561     this.index = this.baseIndex = i;
4562     if ((this.tab = t) == null)
4563     this.baseSize = this.baseLimit = 0;
4564     else if (par == null)
4565     this.baseSize = this.baseLimit = t.length;
4566     else {
4567     this.baseLimit = f;
4568     this.baseSize = par.baseSize;
4569     }
4570 dl 1.52 }
4571    
4572     /**
4573 dl 1.102 * Same as Traverser version
4574 dl 1.52 */
4575 dl 1.102 final Node<K,V> advance() {
4576     Node<K,V> e;
4577     if ((e = next) != null)
4578     e = e.next;
4579     for (;;) {
4580     Node<K,V>[] t; int i, n; K ek; // must use locals in checks
4581     if (e != null)
4582     return next = e;
4583     if (baseIndex >= baseLimit || (t = tab) == null ||
4584     (n = t.length) <= (i = index) || i < 0)
4585     return next = null;
4586     if ((e = tabAt(t, index)) != null && e.hash < 0) {
4587     if (e instanceof ForwardingNode) {
4588     tab = ((ForwardingNode<K,V>)e).nextTable;
4589     e = null;
4590     continue;
4591     }
4592     else if (e instanceof TreeBin)
4593     e = ((TreeBin<K,V>)e).first;
4594     else
4595     e = null;
4596     }
4597     if ((index += baseSize) >= n)
4598     index = ++baseIndex; // visit upper slots if present
4599     }
4600 dl 1.52 }
4601     }
4602    
4603     /*
4604     * Task classes. Coded in a regular but ugly format/style to
4605     * simplify checks that each variant differs in the right way from
4606 dl 1.82 * others. The null screenings exist because compilers cannot tell
4607     * that we've already null-checked task arguments, so we force
4608     * simplest hoisted bypass to help avoid convoluted traps.
4609 dl 1.52 */
4610 dl 1.102 @SuppressWarnings("serial")
4611     static final class ForEachKeyTask<K,V>
4612     extends BulkTask<K,V,Void> {
4613     final Action<? super K> action;
4614 dl 1.52 ForEachKeyTask
4615 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4616     Action<? super K> action) {
4617     super(p, b, i, f, t);
4618 dl 1.52 this.action = action;
4619     }
4620 dl 1.102 public final void compute() {
4621     final Action<? super K> action;
4622 dl 1.82 if ((action = this.action) != null) {
4623 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4624     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4625     addToPendingCount(1);
4626     new ForEachKeyTask<K,V>
4627     (this, batch >>>= 1, baseLimit = h, f, tab,
4628     action).fork();
4629     }
4630     for (Node<K,V> p; (p = advance()) != null;)
4631     action.apply(p.key);
4632 dl 1.82 propagateCompletion();
4633     }
4634 dl 1.52 }
4635     }
4636    
4637 dl 1.102 @SuppressWarnings("serial")
4638     static final class ForEachValueTask<K,V>
4639     extends BulkTask<K,V,Void> {
4640     final Action<? super V> action;
4641 dl 1.52 ForEachValueTask
4642 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4643     Action<? super V> action) {
4644     super(p, b, i, f, t);
4645 dl 1.52 this.action = action;
4646     }
4647 dl 1.102 public final void compute() {
4648     final Action<? super V> action;
4649 dl 1.82 if ((action = this.action) != null) {
4650 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4651     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4652     addToPendingCount(1);
4653     new ForEachValueTask<K,V>
4654     (this, batch >>>= 1, baseLimit = h, f, tab,
4655     action).fork();
4656     }
4657     for (Node<K,V> p; (p = advance()) != null;)
4658     action.apply(p.val);
4659 dl 1.82 propagateCompletion();
4660     }
4661 dl 1.52 }
4662     }
4663    
4664 dl 1.102 @SuppressWarnings("serial")
4665     static final class ForEachEntryTask<K,V>
4666     extends BulkTask<K,V,Void> {
4667     final Action<? super Entry<K,V>> action;
4668 dl 1.52 ForEachEntryTask
4669 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4670     Action<? super Entry<K,V>> action) {
4671     super(p, b, i, f, t);
4672 dl 1.52 this.action = action;
4673     }
4674 dl 1.102 public final void compute() {
4675     final Action<? super Entry<K,V>> action;
4676 dl 1.82 if ((action = this.action) != null) {
4677 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4678     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4679     addToPendingCount(1);
4680     new ForEachEntryTask<K,V>
4681     (this, batch >>>= 1, baseLimit = h, f, tab,
4682     action).fork();
4683     }
4684     for (Node<K,V> p; (p = advance()) != null; )
4685     action.apply(p);
4686 dl 1.82 propagateCompletion();
4687     }
4688 dl 1.52 }
4689     }
4690    
4691 dl 1.102 @SuppressWarnings("serial")
4692     static final class ForEachMappingTask<K,V>
4693     extends BulkTask<K,V,Void> {
4694     final BiAction<? super K, ? super V> action;
4695 dl 1.52 ForEachMappingTask
4696 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4697     BiAction<? super K,? super V> action) {
4698     super(p, b, i, f, t);
4699 dl 1.52 this.action = action;
4700     }
4701 dl 1.102 public final void compute() {
4702     final BiAction<? super K, ? super V> action;
4703 dl 1.82 if ((action = this.action) != null) {
4704 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4705     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4706     addToPendingCount(1);
4707     new ForEachMappingTask<K,V>
4708     (this, batch >>>= 1, baseLimit = h, f, tab,
4709     action).fork();
4710     }
4711     for (Node<K,V> p; (p = advance()) != null; )
4712     action.apply(p.key, p.val);
4713 dl 1.82 propagateCompletion();
4714     }
4715 dl 1.52 }
4716     }
4717    
4718 dl 1.102 @SuppressWarnings("serial")
4719     static final class ForEachTransformedKeyTask<K,V,U>
4720     extends BulkTask<K,V,Void> {
4721 dl 1.52 final Fun<? super K, ? extends U> transformer;
4722 dl 1.102 final Action<? super U> action;
4723 dl 1.52 ForEachTransformedKeyTask
4724 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4725     Fun<? super K, ? extends U> transformer, Action<? super U> action) {
4726     super(p, b, i, f, t);
4727 dl 1.79 this.transformer = transformer; this.action = action;
4728     }
4729 dl 1.102 public final void compute() {
4730 dl 1.79 final Fun<? super K, ? extends U> transformer;
4731 dl 1.102 final Action<? super U> action;
4732 dl 1.82 if ((transformer = this.transformer) != null &&
4733     (action = this.action) != null) {
4734 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4735     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4736     addToPendingCount(1);
4737 dl 1.82 new ForEachTransformedKeyTask<K,V,U>
4738 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
4739     transformer, action).fork();
4740     }
4741     for (Node<K,V> p; (p = advance()) != null; ) {
4742     U u;
4743     if ((u = transformer.apply(p.key)) != null)
4744 dl 1.82 action.apply(u);
4745     }
4746     propagateCompletion();
4747 dl 1.52 }
4748     }
4749     }
4750    
4751 dl 1.102 @SuppressWarnings("serial")
4752     static final class ForEachTransformedValueTask<K,V,U>
4753     extends BulkTask<K,V,Void> {
4754 dl 1.52 final Fun<? super V, ? extends U> transformer;
4755 dl 1.102 final Action<? super U> action;
4756 dl 1.52 ForEachTransformedValueTask
4757 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4758     Fun<? super V, ? extends U> transformer, Action<? super U> action) {
4759     super(p, b, i, f, t);
4760 dl 1.79 this.transformer = transformer; this.action = action;
4761     }
4762 dl 1.102 public final void compute() {
4763 dl 1.79 final Fun<? super V, ? extends U> transformer;
4764 dl 1.102 final Action<? super U> action;
4765 dl 1.82 if ((transformer = this.transformer) != null &&
4766     (action = this.action) != null) {
4767 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4768     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4769     addToPendingCount(1);
4770 dl 1.82 new ForEachTransformedValueTask<K,V,U>
4771 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
4772     transformer, action).fork();
4773     }
4774     for (Node<K,V> p; (p = advance()) != null; ) {
4775     U u;
4776     if ((u = transformer.apply(p.val)) != null)
4777 dl 1.82 action.apply(u);
4778     }
4779     propagateCompletion();
4780 dl 1.52 }
4781     }
4782     }
4783    
4784 dl 1.102 @SuppressWarnings("serial")
4785     static final class ForEachTransformedEntryTask<K,V,U>
4786     extends BulkTask<K,V,Void> {
4787 dl 1.52 final Fun<Map.Entry<K,V>, ? extends U> transformer;
4788 dl 1.102 final Action<? super U> action;
4789 dl 1.52 ForEachTransformedEntryTask
4790 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4791     Fun<Map.Entry<K,V>, ? extends U> transformer, Action<? super U> action) {
4792     super(p, b, i, f, t);
4793 dl 1.79 this.transformer = transformer; this.action = action;
4794     }
4795 dl 1.102 public final void compute() {
4796 dl 1.79 final Fun<Map.Entry<K,V>, ? extends U> transformer;
4797 dl 1.102 final Action<? super U> action;
4798 dl 1.82 if ((transformer = this.transformer) != null &&
4799     (action = this.action) != null) {
4800 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4801     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4802     addToPendingCount(1);
4803 dl 1.82 new ForEachTransformedEntryTask<K,V,U>
4804 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
4805     transformer, action).fork();
4806     }
4807     for (Node<K,V> p; (p = advance()) != null; ) {
4808     U u;
4809     if ((u = transformer.apply(p)) != null)
4810 dl 1.82 action.apply(u);
4811     }
4812     propagateCompletion();
4813 dl 1.52 }
4814     }
4815     }
4816    
4817 dl 1.102 @SuppressWarnings("serial")
4818     static final class ForEachTransformedMappingTask<K,V,U>
4819     extends BulkTask<K,V,Void> {
4820 dl 1.52 final BiFun<? super K, ? super V, ? extends U> transformer;
4821 dl 1.102 final Action<? super U> action;
4822 dl 1.52 ForEachTransformedMappingTask
4823 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4824 dl 1.52 BiFun<? super K, ? super V, ? extends U> transformer,
4825 dl 1.102 Action<? super U> action) {
4826     super(p, b, i, f, t);
4827 dl 1.79 this.transformer = transformer; this.action = action;
4828 dl 1.52 }
4829 dl 1.102 public final void compute() {
4830 dl 1.79 final BiFun<? super K, ? super V, ? extends U> transformer;
4831 dl 1.102 final Action<? super U> action;
4832 dl 1.82 if ((transformer = this.transformer) != null &&
4833     (action = this.action) != null) {
4834 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4835     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4836     addToPendingCount(1);
4837 dl 1.82 new ForEachTransformedMappingTask<K,V,U>
4838 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
4839     transformer, action).fork();
4840     }
4841     for (Node<K,V> p; (p = advance()) != null; ) {
4842     U u;
4843     if ((u = transformer.apply(p.key, p.val)) != null)
4844 dl 1.82 action.apply(u);
4845     }
4846     propagateCompletion();
4847 dl 1.52 }
4848     }
4849     }
4850    
4851 dl 1.102 @SuppressWarnings("serial")
4852     static final class SearchKeysTask<K,V,U>
4853     extends BulkTask<K,V,U> {
4854 dl 1.52 final Fun<? super K, ? extends U> searchFunction;
4855     final AtomicReference<U> result;
4856     SearchKeysTask
4857 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4858 dl 1.52 Fun<? super K, ? extends U> searchFunction,
4859     AtomicReference<U> result) {
4860 dl 1.102 super(p, b, i, f, t);
4861 dl 1.52 this.searchFunction = searchFunction; this.result = result;
4862     }
4863 dl 1.79 public final U getRawResult() { return result.get(); }
4864 dl 1.102 public final void compute() {
4865 dl 1.79 final Fun<? super K, ? extends U> searchFunction;
4866     final AtomicReference<U> result;
4867 dl 1.82 if ((searchFunction = this.searchFunction) != null &&
4868     (result = this.result) != null) {
4869 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4870     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4871 dl 1.82 if (result.get() != null)
4872     return;
4873 dl 1.102 addToPendingCount(1);
4874 dl 1.82 new SearchKeysTask<K,V,U>
4875 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
4876     searchFunction, result).fork();
4877 dl 1.61 }
4878 dl 1.82 while (result.get() == null) {
4879     U u;
4880 dl 1.102 Node<K,V> p;
4881     if ((p = advance()) == null) {
4882 dl 1.82 propagateCompletion();
4883     break;
4884     }
4885 dl 1.102 if ((u = searchFunction.apply(p.key)) != null) {
4886 dl 1.82 if (result.compareAndSet(null, u))
4887     quietlyCompleteRoot();
4888     break;
4889     }
4890 dl 1.52 }
4891     }
4892     }
4893     }
4894    
4895 dl 1.102 @SuppressWarnings("serial")
4896     static final class SearchValuesTask<K,V,U>
4897     extends BulkTask<K,V,U> {
4898 dl 1.52 final Fun<? super V, ? extends U> searchFunction;
4899     final AtomicReference<U> result;
4900     SearchValuesTask
4901 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4902 dl 1.52 Fun<? super V, ? extends U> searchFunction,
4903     AtomicReference<U> result) {
4904 dl 1.102 super(p, b, i, f, t);
4905 dl 1.52 this.searchFunction = searchFunction; this.result = result;
4906     }
4907 dl 1.79 public final U getRawResult() { return result.get(); }
4908 dl 1.102 public final void compute() {
4909 dl 1.79 final Fun<? super V, ? extends U> searchFunction;
4910     final AtomicReference<U> result;
4911 dl 1.82 if ((searchFunction = this.searchFunction) != null &&
4912     (result = this.result) != null) {
4913 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4914     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4915 dl 1.82 if (result.get() != null)
4916     return;
4917 dl 1.102 addToPendingCount(1);
4918 dl 1.82 new SearchValuesTask<K,V,U>
4919 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
4920     searchFunction, result).fork();
4921 dl 1.61 }
4922 dl 1.82 while (result.get() == null) {
4923 dl 1.102 U u;
4924     Node<K,V> p;
4925     if ((p = advance()) == null) {
4926 dl 1.82 propagateCompletion();
4927     break;
4928     }
4929 dl 1.102 if ((u = searchFunction.apply(p.val)) != null) {
4930 dl 1.82 if (result.compareAndSet(null, u))
4931     quietlyCompleteRoot();
4932     break;
4933     }
4934 dl 1.52 }
4935     }
4936     }
4937     }
4938    
4939 dl 1.102 @SuppressWarnings("serial")
4940     static final class SearchEntriesTask<K,V,U>
4941     extends BulkTask<K,V,U> {
4942 dl 1.52 final Fun<Entry<K,V>, ? extends U> searchFunction;
4943     final AtomicReference<U> result;
4944     SearchEntriesTask
4945 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4946 dl 1.52 Fun<Entry<K,V>, ? extends U> searchFunction,
4947     AtomicReference<U> result) {
4948 dl 1.102 super(p, b, i, f, t);
4949 dl 1.52 this.searchFunction = searchFunction; this.result = result;
4950     }
4951 dl 1.79 public final U getRawResult() { return result.get(); }
4952 dl 1.102 public final void compute() {
4953 dl 1.79 final Fun<Entry<K,V>, ? extends U> searchFunction;
4954     final AtomicReference<U> result;
4955 dl 1.82 if ((searchFunction = this.searchFunction) != null &&
4956     (result = this.result) != null) {
4957 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
4958     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4959 dl 1.82 if (result.get() != null)
4960     return;
4961 dl 1.102 addToPendingCount(1);
4962 dl 1.82 new SearchEntriesTask<K,V,U>
4963 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
4964     searchFunction, result).fork();
4965 dl 1.61 }
4966 dl 1.82 while (result.get() == null) {
4967 dl 1.102 U u;
4968     Node<K,V> p;
4969     if ((p = advance()) == null) {
4970 dl 1.82 propagateCompletion();
4971     break;
4972     }
4973 dl 1.102 if ((u = searchFunction.apply(p)) != null) {
4974 dl 1.82 if (result.compareAndSet(null, u))
4975     quietlyCompleteRoot();
4976     return;
4977     }
4978 dl 1.52 }
4979     }
4980     }
4981     }
4982    
4983 dl 1.102 @SuppressWarnings("serial")
4984     static final class SearchMappingsTask<K,V,U>
4985     extends BulkTask<K,V,U> {
4986 dl 1.52 final BiFun<? super K, ? super V, ? extends U> searchFunction;
4987     final AtomicReference<U> result;
4988     SearchMappingsTask
4989 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4990 dl 1.52 BiFun<? super K, ? super V, ? extends U> searchFunction,
4991     AtomicReference<U> result) {
4992 dl 1.102 super(p, b, i, f, t);
4993 dl 1.52 this.searchFunction = searchFunction; this.result = result;
4994     }
4995 dl 1.79 public final U getRawResult() { return result.get(); }
4996 dl 1.102 public final void compute() {
4997 dl 1.79 final BiFun<? super K, ? super V, ? extends U> searchFunction;
4998     final AtomicReference<U> result;
4999 dl 1.82 if ((searchFunction = this.searchFunction) != null &&
5000     (result = this.result) != null) {
5001 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5002     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5003 dl 1.82 if (result.get() != null)
5004     return;
5005 dl 1.102 addToPendingCount(1);
5006 dl 1.82 new SearchMappingsTask<K,V,U>
5007 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5008     searchFunction, result).fork();
5009 dl 1.61 }
5010 dl 1.82 while (result.get() == null) {
5011 dl 1.102 U u;
5012     Node<K,V> p;
5013     if ((p = advance()) == null) {
5014 dl 1.82 propagateCompletion();
5015     break;
5016     }
5017 dl 1.102 if ((u = searchFunction.apply(p.key, p.val)) != null) {
5018 dl 1.82 if (result.compareAndSet(null, u))
5019     quietlyCompleteRoot();
5020     break;
5021     }
5022 dl 1.52 }
5023     }
5024     }
5025     }
5026    
5027 dl 1.102 @SuppressWarnings("serial")
5028     static final class ReduceKeysTask<K,V>
5029     extends BulkTask<K,V,K> {
5030 dl 1.52 final BiFun<? super K, ? super K, ? extends K> reducer;
5031     K result;
5032 dl 1.61 ReduceKeysTask<K,V> rights, nextRight;
5033 dl 1.52 ReduceKeysTask
5034 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5035 dl 1.61 ReduceKeysTask<K,V> nextRight,
5036 dl 1.52 BiFun<? super K, ? super K, ? extends K> reducer) {
5037 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5038 dl 1.52 this.reducer = reducer;
5039     }
5040 dl 1.79 public final K getRawResult() { return result; }
5041 dl 1.102 public final void compute() {
5042 dl 1.82 final BiFun<? super K, ? super K, ? extends K> reducer;
5043     if ((reducer = this.reducer) != null) {
5044 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5045     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5046     addToPendingCount(1);
5047 dl 1.82 (rights = new ReduceKeysTask<K,V>
5048 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5049     rights, reducer)).fork();
5050     }
5051 dl 1.82 K r = null;
5052 dl 1.102 for (Node<K,V> p; (p = advance()) != null; ) {
5053     K u = p.key;
5054     r = (r == null) ? u : u == null ? r : reducer.apply(r, u);
5055 dl 1.82 }
5056     result = r;
5057     CountedCompleter<?> c;
5058     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5059 dl 1.102 @SuppressWarnings("unchecked") ReduceKeysTask<K,V>
5060 dl 1.82 t = (ReduceKeysTask<K,V>)c,
5061     s = t.rights;
5062     while (s != null) {
5063     K tr, sr;
5064     if ((sr = s.result) != null)
5065     t.result = (((tr = t.result) == null) ? sr :
5066     reducer.apply(tr, sr));
5067     s = t.rights = s.nextRight;
5068     }
5069 dl 1.52 }
5070 dl 1.71 }
5071 dl 1.52 }
5072     }
5073    
5074 dl 1.102 @SuppressWarnings("serial")
5075     static final class ReduceValuesTask<K,V>
5076     extends BulkTask<K,V,V> {
5077 dl 1.52 final BiFun<? super V, ? super V, ? extends V> reducer;
5078     V result;
5079 dl 1.61 ReduceValuesTask<K,V> rights, nextRight;
5080 dl 1.52 ReduceValuesTask
5081 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5082 dl 1.61 ReduceValuesTask<K,V> nextRight,
5083 dl 1.52 BiFun<? super V, ? super V, ? extends V> reducer) {
5084 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5085 dl 1.52 this.reducer = reducer;
5086     }
5087 dl 1.79 public final V getRawResult() { return result; }
5088 dl 1.102 public final void compute() {
5089 dl 1.82 final BiFun<? super V, ? super V, ? extends V> reducer;
5090     if ((reducer = this.reducer) != null) {
5091 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5092     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5093     addToPendingCount(1);
5094 dl 1.82 (rights = new ReduceValuesTask<K,V>
5095 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5096     rights, reducer)).fork();
5097     }
5098 dl 1.82 V r = null;
5099 dl 1.102 for (Node<K,V> p; (p = advance()) != null; ) {
5100     V v = p.val;
5101     r = (r == null) ? v : reducer.apply(r, v);
5102 dl 1.82 }
5103     result = r;
5104     CountedCompleter<?> c;
5105     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5106 dl 1.102 @SuppressWarnings("unchecked") ReduceValuesTask<K,V>
5107 dl 1.82 t = (ReduceValuesTask<K,V>)c,
5108     s = t.rights;
5109     while (s != null) {
5110     V tr, sr;
5111     if ((sr = s.result) != null)
5112     t.result = (((tr = t.result) == null) ? sr :
5113     reducer.apply(tr, sr));
5114     s = t.rights = s.nextRight;
5115     }
5116 dl 1.52 }
5117     }
5118     }
5119     }
5120    
5121 dl 1.102 @SuppressWarnings("serial")
5122     static final class ReduceEntriesTask<K,V>
5123     extends BulkTask<K,V,Map.Entry<K,V>> {
5124 dl 1.52 final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5125     Map.Entry<K,V> result;
5126 dl 1.61 ReduceEntriesTask<K,V> rights, nextRight;
5127 dl 1.52 ReduceEntriesTask
5128 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5129 dl 1.63 ReduceEntriesTask<K,V> nextRight,
5130 dl 1.52 BiFun<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5131 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5132 dl 1.52 this.reducer = reducer;
5133     }
5134 dl 1.79 public final Map.Entry<K,V> getRawResult() { return result; }
5135 dl 1.102 public final void compute() {
5136 dl 1.82 final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5137     if ((reducer = this.reducer) != null) {
5138 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5139     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5140     addToPendingCount(1);
5141 dl 1.82 (rights = new ReduceEntriesTask<K,V>
5142 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5143     rights, reducer)).fork();
5144     }
5145 dl 1.82 Map.Entry<K,V> r = null;
5146 dl 1.102 for (Node<K,V> p; (p = advance()) != null; )
5147     r = (r == null) ? p : reducer.apply(r, p);
5148 dl 1.82 result = r;
5149     CountedCompleter<?> c;
5150     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5151 dl 1.102 @SuppressWarnings("unchecked") ReduceEntriesTask<K,V>
5152 dl 1.82 t = (ReduceEntriesTask<K,V>)c,
5153     s = t.rights;
5154     while (s != null) {
5155     Map.Entry<K,V> tr, sr;
5156     if ((sr = s.result) != null)
5157     t.result = (((tr = t.result) == null) ? sr :
5158     reducer.apply(tr, sr));
5159     s = t.rights = s.nextRight;
5160     }
5161 dl 1.52 }
5162     }
5163     }
5164     }
5165    
5166 dl 1.102 @SuppressWarnings("serial")
5167     static final class MapReduceKeysTask<K,V,U>
5168     extends BulkTask<K,V,U> {
5169 dl 1.52 final Fun<? super K, ? extends U> transformer;
5170     final BiFun<? super U, ? super U, ? extends U> reducer;
5171     U result;
5172 dl 1.61 MapReduceKeysTask<K,V,U> rights, nextRight;
5173 dl 1.52 MapReduceKeysTask
5174 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5175 dl 1.61 MapReduceKeysTask<K,V,U> nextRight,
5176 dl 1.52 Fun<? super K, ? extends U> transformer,
5177     BiFun<? super U, ? super U, ? extends U> reducer) {
5178 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5179 dl 1.52 this.transformer = transformer;
5180     this.reducer = reducer;
5181     }
5182 dl 1.79 public final U getRawResult() { return result; }
5183 dl 1.102 public final void compute() {
5184 dl 1.82 final Fun<? super K, ? extends U> transformer;
5185     final BiFun<? super U, ? super U, ? extends U> reducer;
5186     if ((transformer = this.transformer) != null &&
5187     (reducer = this.reducer) != null) {
5188 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5189     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5190     addToPendingCount(1);
5191 dl 1.82 (rights = new MapReduceKeysTask<K,V,U>
5192 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5193     rights, transformer, reducer)).fork();
5194     }
5195     U r = null;
5196     for (Node<K,V> p; (p = advance()) != null; ) {
5197     U u;
5198     if ((u = transformer.apply(p.key)) != null)
5199 dl 1.82 r = (r == null) ? u : reducer.apply(r, u);
5200     }
5201     result = r;
5202     CountedCompleter<?> c;
5203     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5204 dl 1.102 @SuppressWarnings("unchecked") MapReduceKeysTask<K,V,U>
5205 dl 1.82 t = (MapReduceKeysTask<K,V,U>)c,
5206     s = t.rights;
5207     while (s != null) {
5208     U tr, sr;
5209     if ((sr = s.result) != null)
5210     t.result = (((tr = t.result) == null) ? sr :
5211     reducer.apply(tr, sr));
5212     s = t.rights = s.nextRight;
5213     }
5214 dl 1.52 }
5215     }
5216     }
5217     }
5218    
5219 dl 1.102 @SuppressWarnings("serial")
5220     static final class MapReduceValuesTask<K,V,U>
5221     extends BulkTask<K,V,U> {
5222 dl 1.52 final Fun<? super V, ? extends U> transformer;
5223     final BiFun<? super U, ? super U, ? extends U> reducer;
5224     U result;
5225 dl 1.61 MapReduceValuesTask<K,V,U> rights, nextRight;
5226 dl 1.52 MapReduceValuesTask
5227 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5228 dl 1.61 MapReduceValuesTask<K,V,U> nextRight,
5229 dl 1.52 Fun<? super V, ? extends U> transformer,
5230     BiFun<? super U, ? super U, ? extends U> reducer) {
5231 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5232 dl 1.52 this.transformer = transformer;
5233     this.reducer = reducer;
5234     }
5235 dl 1.79 public final U getRawResult() { return result; }
5236 dl 1.102 public final void compute() {
5237 dl 1.82 final Fun<? super V, ? extends U> transformer;
5238     final BiFun<? super U, ? super U, ? extends U> reducer;
5239     if ((transformer = this.transformer) != null &&
5240     (reducer = this.reducer) != null) {
5241 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5242     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5243     addToPendingCount(1);
5244 dl 1.82 (rights = new MapReduceValuesTask<K,V,U>
5245 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5246     rights, transformer, reducer)).fork();
5247     }
5248     U r = null;
5249     for (Node<K,V> p; (p = advance()) != null; ) {
5250     U u;
5251     if ((u = transformer.apply(p.val)) != null)
5252 dl 1.82 r = (r == null) ? u : reducer.apply(r, u);
5253     }
5254     result = r;
5255     CountedCompleter<?> c;
5256     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5257 dl 1.102 @SuppressWarnings("unchecked") MapReduceValuesTask<K,V,U>
5258 dl 1.82 t = (MapReduceValuesTask<K,V,U>)c,
5259     s = t.rights;
5260     while (s != null) {
5261     U tr, sr;
5262     if ((sr = s.result) != null)
5263     t.result = (((tr = t.result) == null) ? sr :
5264     reducer.apply(tr, sr));
5265     s = t.rights = s.nextRight;
5266     }
5267 dl 1.52 }
5268 dl 1.71 }
5269 dl 1.52 }
5270     }
5271    
5272 dl 1.102 @SuppressWarnings("serial")
5273     static final class MapReduceEntriesTask<K,V,U>
5274     extends BulkTask<K,V,U> {
5275 dl 1.52 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5276     final BiFun<? super U, ? super U, ? extends U> reducer;
5277     U result;
5278 dl 1.61 MapReduceEntriesTask<K,V,U> rights, nextRight;
5279 dl 1.52 MapReduceEntriesTask
5280 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5281 dl 1.61 MapReduceEntriesTask<K,V,U> nextRight,
5282 dl 1.52 Fun<Map.Entry<K,V>, ? extends U> transformer,
5283     BiFun<? super U, ? super U, ? extends U> reducer) {
5284 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5285 dl 1.52 this.transformer = transformer;
5286     this.reducer = reducer;
5287     }
5288 dl 1.79 public final U getRawResult() { return result; }
5289 dl 1.102 public final void compute() {
5290 dl 1.82 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5291     final BiFun<? super U, ? super U, ? extends U> reducer;
5292     if ((transformer = this.transformer) != null &&
5293     (reducer = this.reducer) != null) {
5294 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5295     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5296     addToPendingCount(1);
5297 dl 1.82 (rights = new MapReduceEntriesTask<K,V,U>
5298 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5299     rights, transformer, reducer)).fork();
5300     }
5301     U r = null;
5302     for (Node<K,V> p; (p = advance()) != null; ) {
5303     U u;
5304     if ((u = transformer.apply(p)) != null)
5305 dl 1.82 r = (r == null) ? u : reducer.apply(r, u);
5306     }
5307     result = r;
5308     CountedCompleter<?> c;
5309     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5310 dl 1.102 @SuppressWarnings("unchecked") MapReduceEntriesTask<K,V,U>
5311 dl 1.82 t = (MapReduceEntriesTask<K,V,U>)c,
5312     s = t.rights;
5313     while (s != null) {
5314     U tr, sr;
5315     if ((sr = s.result) != null)
5316     t.result = (((tr = t.result) == null) ? sr :
5317     reducer.apply(tr, sr));
5318     s = t.rights = s.nextRight;
5319     }
5320 dl 1.52 }
5321     }
5322     }
5323     }
5324    
5325 dl 1.102 @SuppressWarnings("serial")
5326     static final class MapReduceMappingsTask<K,V,U>
5327     extends BulkTask<K,V,U> {
5328 dl 1.52 final BiFun<? super K, ? super V, ? extends U> transformer;
5329     final BiFun<? super U, ? super U, ? extends U> reducer;
5330     U result;
5331 dl 1.61 MapReduceMappingsTask<K,V,U> rights, nextRight;
5332 dl 1.52 MapReduceMappingsTask
5333 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5334 dl 1.61 MapReduceMappingsTask<K,V,U> nextRight,
5335 dl 1.52 BiFun<? super K, ? super V, ? extends U> transformer,
5336     BiFun<? super U, ? super U, ? extends U> reducer) {
5337 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5338 dl 1.52 this.transformer = transformer;
5339     this.reducer = reducer;
5340     }
5341 dl 1.79 public final U getRawResult() { return result; }
5342 dl 1.102 public final void compute() {
5343 dl 1.82 final BiFun<? super K, ? super V, ? extends U> transformer;
5344     final BiFun<? super U, ? super U, ? extends U> reducer;
5345     if ((transformer = this.transformer) != null &&
5346     (reducer = this.reducer) != null) {
5347 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5348     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5349     addToPendingCount(1);
5350 dl 1.82 (rights = new MapReduceMappingsTask<K,V,U>
5351 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5352     rights, transformer, reducer)).fork();
5353     }
5354     U r = null;
5355     for (Node<K,V> p; (p = advance()) != null; ) {
5356     U u;
5357     if ((u = transformer.apply(p.key, p.val)) != null)
5358 dl 1.82 r = (r == null) ? u : reducer.apply(r, u);
5359     }
5360     result = r;
5361     CountedCompleter<?> c;
5362     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5363 dl 1.102 @SuppressWarnings("unchecked") MapReduceMappingsTask<K,V,U>
5364 dl 1.82 t = (MapReduceMappingsTask<K,V,U>)c,
5365     s = t.rights;
5366     while (s != null) {
5367     U tr, sr;
5368     if ((sr = s.result) != null)
5369     t.result = (((tr = t.result) == null) ? sr :
5370     reducer.apply(tr, sr));
5371     s = t.rights = s.nextRight;
5372     }
5373 dl 1.52 }
5374 dl 1.71 }
5375 dl 1.52 }
5376     }
5377    
5378 dl 1.102 @SuppressWarnings("serial")
5379     static final class MapReduceKeysToDoubleTask<K,V>
5380     extends BulkTask<K,V,Double> {
5381 dl 1.52 final ObjectToDouble<? super K> transformer;
5382     final DoubleByDoubleToDouble reducer;
5383     final double basis;
5384     double result;
5385 dl 1.61 MapReduceKeysToDoubleTask<K,V> rights, nextRight;
5386 dl 1.52 MapReduceKeysToDoubleTask
5387 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5388 dl 1.61 MapReduceKeysToDoubleTask<K,V> nextRight,
5389 dl 1.52 ObjectToDouble<? super K> transformer,
5390     double basis,
5391     DoubleByDoubleToDouble reducer) {
5392 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5393 dl 1.52 this.transformer = transformer;
5394     this.basis = basis; this.reducer = reducer;
5395     }
5396 dl 1.79 public final Double getRawResult() { return result; }
5397 dl 1.102 public final void compute() {
5398 dl 1.82 final ObjectToDouble<? super K> transformer;
5399     final DoubleByDoubleToDouble reducer;
5400     if ((transformer = this.transformer) != null &&
5401     (reducer = this.reducer) != null) {
5402     double r = this.basis;
5403 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5404     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5405     addToPendingCount(1);
5406 dl 1.82 (rights = new MapReduceKeysToDoubleTask<K,V>
5407 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5408     rights, transformer, r, reducer)).fork();
5409     }
5410     for (Node<K,V> p; (p = advance()) != null; )
5411     r = reducer.apply(r, transformer.apply(p.key));
5412 dl 1.82 result = r;
5413     CountedCompleter<?> c;
5414     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5415 dl 1.102 @SuppressWarnings("unchecked") MapReduceKeysToDoubleTask<K,V>
5416 dl 1.82 t = (MapReduceKeysToDoubleTask<K,V>)c,
5417     s = t.rights;
5418     while (s != null) {
5419     t.result = reducer.apply(t.result, s.result);
5420     s = t.rights = s.nextRight;
5421     }
5422 dl 1.52 }
5423 dl 1.71 }
5424 dl 1.52 }
5425     }
5426    
5427 dl 1.102 @SuppressWarnings("serial")
5428     static final class MapReduceValuesToDoubleTask<K,V>
5429     extends BulkTask<K,V,Double> {
5430 dl 1.52 final ObjectToDouble<? super V> transformer;
5431     final DoubleByDoubleToDouble reducer;
5432     final double basis;
5433     double result;
5434 dl 1.61 MapReduceValuesToDoubleTask<K,V> rights, nextRight;
5435 dl 1.52 MapReduceValuesToDoubleTask
5436 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5437 dl 1.61 MapReduceValuesToDoubleTask<K,V> nextRight,
5438 dl 1.52 ObjectToDouble<? super V> transformer,
5439     double basis,
5440     DoubleByDoubleToDouble reducer) {
5441 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5442 dl 1.52 this.transformer = transformer;
5443     this.basis = basis; this.reducer = reducer;
5444     }
5445 dl 1.79 public final Double getRawResult() { return result; }
5446 dl 1.102 public final void compute() {
5447 dl 1.82 final ObjectToDouble<? super V> transformer;
5448     final DoubleByDoubleToDouble reducer;
5449     if ((transformer = this.transformer) != null &&
5450     (reducer = this.reducer) != null) {
5451     double r = this.basis;
5452 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5453     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5454     addToPendingCount(1);
5455 dl 1.82 (rights = new MapReduceValuesToDoubleTask<K,V>
5456 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5457     rights, transformer, r, reducer)).fork();
5458     }
5459     for (Node<K,V> p; (p = advance()) != null; )
5460     r = reducer.apply(r, transformer.apply(p.val));
5461 dl 1.82 result = r;
5462     CountedCompleter<?> c;
5463     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5464 dl 1.102 @SuppressWarnings("unchecked") MapReduceValuesToDoubleTask<K,V>
5465 dl 1.82 t = (MapReduceValuesToDoubleTask<K,V>)c,
5466     s = t.rights;
5467     while (s != null) {
5468     t.result = reducer.apply(t.result, s.result);
5469     s = t.rights = s.nextRight;
5470     }
5471 dl 1.52 }
5472 dl 1.71 }
5473 dl 1.52 }
5474     }
5475    
5476 dl 1.102 @SuppressWarnings("serial")
5477     static final class MapReduceEntriesToDoubleTask<K,V>
5478     extends BulkTask<K,V,Double> {
5479 dl 1.52 final ObjectToDouble<Map.Entry<K,V>> transformer;
5480     final DoubleByDoubleToDouble reducer;
5481     final double basis;
5482     double result;
5483 dl 1.61 MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
5484 dl 1.52 MapReduceEntriesToDoubleTask
5485 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5486 dl 1.61 MapReduceEntriesToDoubleTask<K,V> nextRight,
5487 dl 1.52 ObjectToDouble<Map.Entry<K,V>> transformer,
5488     double basis,
5489     DoubleByDoubleToDouble reducer) {
5490 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5491 dl 1.52 this.transformer = transformer;
5492     this.basis = basis; this.reducer = reducer;
5493     }
5494 dl 1.79 public final Double getRawResult() { return result; }
5495 dl 1.102 public final void compute() {
5496 dl 1.82 final ObjectToDouble<Map.Entry<K,V>> transformer;
5497     final DoubleByDoubleToDouble reducer;
5498     if ((transformer = this.transformer) != null &&
5499     (reducer = this.reducer) != null) {
5500     double r = this.basis;
5501 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5502     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5503     addToPendingCount(1);
5504 dl 1.82 (rights = new MapReduceEntriesToDoubleTask<K,V>
5505 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5506     rights, transformer, r, reducer)).fork();
5507     }
5508     for (Node<K,V> p; (p = advance()) != null; )
5509     r = reducer.apply(r, transformer.apply(p));
5510 dl 1.82 result = r;
5511     CountedCompleter<?> c;
5512     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5513 dl 1.102 @SuppressWarnings("unchecked") MapReduceEntriesToDoubleTask<K,V>
5514 dl 1.82 t = (MapReduceEntriesToDoubleTask<K,V>)c,
5515     s = t.rights;
5516     while (s != null) {
5517     t.result = reducer.apply(t.result, s.result);
5518     s = t.rights = s.nextRight;
5519     }
5520 dl 1.52 }
5521 dl 1.71 }
5522 dl 1.52 }
5523     }
5524    
5525 dl 1.102 @SuppressWarnings("serial")
5526     static final class MapReduceMappingsToDoubleTask<K,V>
5527     extends BulkTask<K,V,Double> {
5528 dl 1.52 final ObjectByObjectToDouble<? super K, ? super V> transformer;
5529     final DoubleByDoubleToDouble reducer;
5530     final double basis;
5531     double result;
5532 dl 1.61 MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
5533 dl 1.52 MapReduceMappingsToDoubleTask
5534 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5535 dl 1.61 MapReduceMappingsToDoubleTask<K,V> nextRight,
5536 dl 1.52 ObjectByObjectToDouble<? super K, ? super V> transformer,
5537     double basis,
5538     DoubleByDoubleToDouble reducer) {
5539 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5540 dl 1.52 this.transformer = transformer;
5541     this.basis = basis; this.reducer = reducer;
5542     }
5543 dl 1.79 public final Double getRawResult() { return result; }
5544 dl 1.102 public final void compute() {
5545 dl 1.82 final ObjectByObjectToDouble<? super K, ? super V> transformer;
5546     final DoubleByDoubleToDouble reducer;
5547     if ((transformer = this.transformer) != null &&
5548     (reducer = this.reducer) != null) {
5549     double r = this.basis;
5550 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5551     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5552     addToPendingCount(1);
5553 dl 1.82 (rights = new MapReduceMappingsToDoubleTask<K,V>
5554 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5555     rights, transformer, r, reducer)).fork();
5556     }
5557     for (Node<K,V> p; (p = advance()) != null; )
5558     r = reducer.apply(r, transformer.apply(p.key, p.val));
5559 dl 1.82 result = r;
5560     CountedCompleter<?> c;
5561     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5562 dl 1.102 @SuppressWarnings("unchecked") MapReduceMappingsToDoubleTask<K,V>
5563 dl 1.82 t = (MapReduceMappingsToDoubleTask<K,V>)c,
5564     s = t.rights;
5565     while (s != null) {
5566     t.result = reducer.apply(t.result, s.result);
5567     s = t.rights = s.nextRight;
5568     }
5569 dl 1.52 }
5570 dl 1.71 }
5571 dl 1.52 }
5572     }
5573    
5574 dl 1.102 @SuppressWarnings("serial")
5575     static final class MapReduceKeysToLongTask<K,V>
5576     extends BulkTask<K,V,Long> {
5577 dl 1.52 final ObjectToLong<? super K> transformer;
5578     final LongByLongToLong reducer;
5579     final long basis;
5580     long result;
5581 dl 1.61 MapReduceKeysToLongTask<K,V> rights, nextRight;
5582 dl 1.52 MapReduceKeysToLongTask
5583 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5584 dl 1.61 MapReduceKeysToLongTask<K,V> nextRight,
5585 dl 1.52 ObjectToLong<? super K> transformer,
5586     long basis,
5587     LongByLongToLong reducer) {
5588 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5589 dl 1.52 this.transformer = transformer;
5590     this.basis = basis; this.reducer = reducer;
5591     }
5592 dl 1.79 public final Long getRawResult() { return result; }
5593 dl 1.102 public final void compute() {
5594 dl 1.82 final ObjectToLong<? super K> transformer;
5595     final LongByLongToLong reducer;
5596     if ((transformer = this.transformer) != null &&
5597     (reducer = this.reducer) != null) {
5598     long r = this.basis;
5599 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5600     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5601     addToPendingCount(1);
5602 dl 1.82 (rights = new MapReduceKeysToLongTask<K,V>
5603 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5604     rights, transformer, r, reducer)).fork();
5605     }
5606     for (Node<K,V> p; (p = advance()) != null; )
5607     r = reducer.apply(r, transformer.apply(p.key));
5608 dl 1.82 result = r;
5609     CountedCompleter<?> c;
5610     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5611 dl 1.102 @SuppressWarnings("unchecked") MapReduceKeysToLongTask<K,V>
5612 dl 1.82 t = (MapReduceKeysToLongTask<K,V>)c,
5613     s = t.rights;
5614     while (s != null) {
5615     t.result = reducer.apply(t.result, s.result);
5616     s = t.rights = s.nextRight;
5617     }
5618 dl 1.52 }
5619 dl 1.71 }
5620 dl 1.52 }
5621     }
5622    
5623 dl 1.102 @SuppressWarnings("serial")
5624     static final class MapReduceValuesToLongTask<K,V>
5625     extends BulkTask<K,V,Long> {
5626 dl 1.52 final ObjectToLong<? super V> transformer;
5627     final LongByLongToLong reducer;
5628     final long basis;
5629     long result;
5630 dl 1.61 MapReduceValuesToLongTask<K,V> rights, nextRight;
5631 dl 1.52 MapReduceValuesToLongTask
5632 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5633 dl 1.61 MapReduceValuesToLongTask<K,V> nextRight,
5634 dl 1.52 ObjectToLong<? super V> transformer,
5635     long basis,
5636     LongByLongToLong reducer) {
5637 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5638 dl 1.52 this.transformer = transformer;
5639     this.basis = basis; this.reducer = reducer;
5640     }
5641 dl 1.79 public final Long getRawResult() { return result; }
5642 dl 1.102 public final void compute() {
5643 dl 1.82 final ObjectToLong<? super V> transformer;
5644     final LongByLongToLong reducer;
5645     if ((transformer = this.transformer) != null &&
5646     (reducer = this.reducer) != null) {
5647     long r = this.basis;
5648 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5649     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5650     addToPendingCount(1);
5651 dl 1.82 (rights = new MapReduceValuesToLongTask<K,V>
5652 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5653     rights, transformer, r, reducer)).fork();
5654     }
5655     for (Node<K,V> p; (p = advance()) != null; )
5656     r = reducer.apply(r, transformer.apply(p.val));
5657 dl 1.82 result = r;
5658     CountedCompleter<?> c;
5659     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5660 dl 1.102 @SuppressWarnings("unchecked") MapReduceValuesToLongTask<K,V>
5661 dl 1.82 t = (MapReduceValuesToLongTask<K,V>)c,
5662     s = t.rights;
5663     while (s != null) {
5664     t.result = reducer.apply(t.result, s.result);
5665     s = t.rights = s.nextRight;
5666     }
5667 dl 1.52 }
5668 dl 1.71 }
5669 dl 1.52 }
5670     }
5671    
5672 dl 1.102 @SuppressWarnings("serial")
5673     static final class MapReduceEntriesToLongTask<K,V>
5674     extends BulkTask<K,V,Long> {
5675 dl 1.52 final ObjectToLong<Map.Entry<K,V>> transformer;
5676     final LongByLongToLong reducer;
5677     final long basis;
5678     long result;
5679 dl 1.61 MapReduceEntriesToLongTask<K,V> rights, nextRight;
5680 dl 1.52 MapReduceEntriesToLongTask
5681 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5682 dl 1.61 MapReduceEntriesToLongTask<K,V> nextRight,
5683 dl 1.52 ObjectToLong<Map.Entry<K,V>> transformer,
5684     long basis,
5685     LongByLongToLong reducer) {
5686 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5687 dl 1.52 this.transformer = transformer;
5688     this.basis = basis; this.reducer = reducer;
5689     }
5690 dl 1.79 public final Long getRawResult() { return result; }
5691 dl 1.102 public final void compute() {
5692 dl 1.82 final ObjectToLong<Map.Entry<K,V>> transformer;
5693     final LongByLongToLong reducer;
5694     if ((transformer = this.transformer) != null &&
5695     (reducer = this.reducer) != null) {
5696     long r = this.basis;
5697 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5698     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5699     addToPendingCount(1);
5700 dl 1.82 (rights = new MapReduceEntriesToLongTask<K,V>
5701 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5702     rights, transformer, r, reducer)).fork();
5703     }
5704     for (Node<K,V> p; (p = advance()) != null; )
5705     r = reducer.apply(r, transformer.apply(p));
5706 dl 1.82 result = r;
5707     CountedCompleter<?> c;
5708     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5709 dl 1.102 @SuppressWarnings("unchecked") MapReduceEntriesToLongTask<K,V>
5710 dl 1.82 t = (MapReduceEntriesToLongTask<K,V>)c,
5711     s = t.rights;
5712     while (s != null) {
5713     t.result = reducer.apply(t.result, s.result);
5714     s = t.rights = s.nextRight;
5715     }
5716 dl 1.52 }
5717     }
5718     }
5719     }
5720    
5721 dl 1.102 @SuppressWarnings("serial")
5722     static final class MapReduceMappingsToLongTask<K,V>
5723     extends BulkTask<K,V,Long> {
5724 dl 1.52 final ObjectByObjectToLong<? super K, ? super V> transformer;
5725     final LongByLongToLong reducer;
5726     final long basis;
5727     long result;
5728 dl 1.61 MapReduceMappingsToLongTask<K,V> rights, nextRight;
5729 dl 1.52 MapReduceMappingsToLongTask
5730 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5731 dl 1.61 MapReduceMappingsToLongTask<K,V> nextRight,
5732 dl 1.52 ObjectByObjectToLong<? super K, ? super V> transformer,
5733     long basis,
5734     LongByLongToLong reducer) {
5735 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5736 dl 1.52 this.transformer = transformer;
5737     this.basis = basis; this.reducer = reducer;
5738     }
5739 dl 1.79 public final Long getRawResult() { return result; }
5740 dl 1.102 public final void compute() {
5741 dl 1.82 final ObjectByObjectToLong<? super K, ? super V> transformer;
5742     final LongByLongToLong reducer;
5743     if ((transformer = this.transformer) != null &&
5744     (reducer = this.reducer) != null) {
5745     long r = this.basis;
5746 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5747     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5748     addToPendingCount(1);
5749 dl 1.82 (rights = new MapReduceMappingsToLongTask<K,V>
5750 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5751     rights, transformer, r, reducer)).fork();
5752     }
5753     for (Node<K,V> p; (p = advance()) != null; )
5754     r = reducer.apply(r, transformer.apply(p.key, p.val));
5755 dl 1.82 result = r;
5756     CountedCompleter<?> c;
5757     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5758 dl 1.102 @SuppressWarnings("unchecked") MapReduceMappingsToLongTask<K,V>
5759 dl 1.82 t = (MapReduceMappingsToLongTask<K,V>)c,
5760     s = t.rights;
5761     while (s != null) {
5762     t.result = reducer.apply(t.result, s.result);
5763     s = t.rights = s.nextRight;
5764     }
5765 dl 1.52 }
5766 dl 1.71 }
5767 dl 1.52 }
5768     }
5769    
5770 dl 1.102 @SuppressWarnings("serial")
5771     static final class MapReduceKeysToIntTask<K,V>
5772     extends BulkTask<K,V,Integer> {
5773 dl 1.52 final ObjectToInt<? super K> transformer;
5774     final IntByIntToInt reducer;
5775     final int basis;
5776     int result;
5777 dl 1.61 MapReduceKeysToIntTask<K,V> rights, nextRight;
5778 dl 1.52 MapReduceKeysToIntTask
5779 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5780 dl 1.61 MapReduceKeysToIntTask<K,V> nextRight,
5781 dl 1.52 ObjectToInt<? super K> transformer,
5782     int basis,
5783     IntByIntToInt reducer) {
5784 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5785 dl 1.52 this.transformer = transformer;
5786     this.basis = basis; this.reducer = reducer;
5787     }
5788 dl 1.79 public final Integer getRawResult() { return result; }
5789 dl 1.102 public final void compute() {
5790 dl 1.82 final ObjectToInt<? super K> transformer;
5791     final IntByIntToInt reducer;
5792     if ((transformer = this.transformer) != null &&
5793     (reducer = this.reducer) != null) {
5794     int r = this.basis;
5795 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5796     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5797     addToPendingCount(1);
5798 dl 1.82 (rights = new MapReduceKeysToIntTask<K,V>
5799 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5800     rights, transformer, r, reducer)).fork();
5801     }
5802     for (Node<K,V> p; (p = advance()) != null; )
5803     r = reducer.apply(r, transformer.apply(p.key));
5804 dl 1.82 result = r;
5805     CountedCompleter<?> c;
5806     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5807 dl 1.102 @SuppressWarnings("unchecked") MapReduceKeysToIntTask<K,V>
5808 dl 1.82 t = (MapReduceKeysToIntTask<K,V>)c,
5809     s = t.rights;
5810     while (s != null) {
5811     t.result = reducer.apply(t.result, s.result);
5812     s = t.rights = s.nextRight;
5813     }
5814 dl 1.52 }
5815 dl 1.71 }
5816 dl 1.52 }
5817     }
5818    
5819 dl 1.102 @SuppressWarnings("serial")
5820     static final class MapReduceValuesToIntTask<K,V>
5821     extends BulkTask<K,V,Integer> {
5822 dl 1.52 final ObjectToInt<? super V> transformer;
5823     final IntByIntToInt reducer;
5824     final int basis;
5825     int result;
5826 dl 1.61 MapReduceValuesToIntTask<K,V> rights, nextRight;
5827 dl 1.52 MapReduceValuesToIntTask
5828 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5829 dl 1.61 MapReduceValuesToIntTask<K,V> nextRight,
5830 dl 1.52 ObjectToInt<? super V> transformer,
5831     int basis,
5832     IntByIntToInt reducer) {
5833 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5834 dl 1.52 this.transformer = transformer;
5835     this.basis = basis; this.reducer = reducer;
5836     }
5837 dl 1.79 public final Integer getRawResult() { return result; }
5838 dl 1.102 public final void compute() {
5839 dl 1.82 final ObjectToInt<? super V> transformer;
5840     final IntByIntToInt reducer;
5841     if ((transformer = this.transformer) != null &&
5842     (reducer = this.reducer) != null) {
5843     int r = this.basis;
5844 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5845     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5846     addToPendingCount(1);
5847 dl 1.82 (rights = new MapReduceValuesToIntTask<K,V>
5848 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5849     rights, transformer, r, reducer)).fork();
5850     }
5851     for (Node<K,V> p; (p = advance()) != null; )
5852     r = reducer.apply(r, transformer.apply(p.val));
5853 dl 1.82 result = r;
5854     CountedCompleter<?> c;
5855     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5856 dl 1.102 @SuppressWarnings("unchecked") MapReduceValuesToIntTask<K,V>
5857 dl 1.82 t = (MapReduceValuesToIntTask<K,V>)c,
5858     s = t.rights;
5859     while (s != null) {
5860     t.result = reducer.apply(t.result, s.result);
5861     s = t.rights = s.nextRight;
5862     }
5863 dl 1.52 }
5864 dl 1.71 }
5865 dl 1.52 }
5866     }
5867    
5868 dl 1.102 @SuppressWarnings("serial")
5869     static final class MapReduceEntriesToIntTask<K,V>
5870     extends BulkTask<K,V,Integer> {
5871 dl 1.52 final ObjectToInt<Map.Entry<K,V>> transformer;
5872     final IntByIntToInt reducer;
5873     final int basis;
5874     int result;
5875 dl 1.61 MapReduceEntriesToIntTask<K,V> rights, nextRight;
5876 dl 1.52 MapReduceEntriesToIntTask
5877 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5878 dl 1.61 MapReduceEntriesToIntTask<K,V> nextRight,
5879 dl 1.52 ObjectToInt<Map.Entry<K,V>> transformer,
5880     int basis,
5881     IntByIntToInt reducer) {
5882 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5883 dl 1.52 this.transformer = transformer;
5884     this.basis = basis; this.reducer = reducer;
5885     }
5886 dl 1.79 public final Integer getRawResult() { return result; }
5887 dl 1.102 public final void compute() {
5888 dl 1.82 final ObjectToInt<Map.Entry<K,V>> transformer;
5889     final IntByIntToInt reducer;
5890     if ((transformer = this.transformer) != null &&
5891     (reducer = this.reducer) != null) {
5892     int r = this.basis;
5893 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5894     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5895     addToPendingCount(1);
5896 dl 1.82 (rights = new MapReduceEntriesToIntTask<K,V>
5897 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5898     rights, transformer, r, reducer)).fork();
5899     }
5900     for (Node<K,V> p; (p = advance()) != null; )
5901     r = reducer.apply(r, transformer.apply(p));
5902 dl 1.82 result = r;
5903     CountedCompleter<?> c;
5904     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5905 dl 1.102 @SuppressWarnings("unchecked") MapReduceEntriesToIntTask<K,V>
5906 dl 1.82 t = (MapReduceEntriesToIntTask<K,V>)c,
5907     s = t.rights;
5908     while (s != null) {
5909     t.result = reducer.apply(t.result, s.result);
5910     s = t.rights = s.nextRight;
5911     }
5912 dl 1.52 }
5913     }
5914     }
5915     }
5916    
5917 dl 1.102 @SuppressWarnings("serial")
5918     static final class MapReduceMappingsToIntTask<K,V>
5919     extends BulkTask<K,V,Integer> {
5920 dl 1.52 final ObjectByObjectToInt<? super K, ? super V> transformer;
5921     final IntByIntToInt reducer;
5922     final int basis;
5923     int result;
5924 dl 1.61 MapReduceMappingsToIntTask<K,V> rights, nextRight;
5925 dl 1.52 MapReduceMappingsToIntTask
5926 dl 1.102 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5927 dl 1.79 MapReduceMappingsToIntTask<K,V> nextRight,
5928 dl 1.52 ObjectByObjectToInt<? super K, ? super V> transformer,
5929     int basis,
5930     IntByIntToInt reducer) {
5931 dl 1.102 super(p, b, i, f, t); this.nextRight = nextRight;
5932 dl 1.52 this.transformer = transformer;
5933     this.basis = basis; this.reducer = reducer;
5934     }
5935 dl 1.79 public final Integer getRawResult() { return result; }
5936 dl 1.102 public final void compute() {
5937 dl 1.82 final ObjectByObjectToInt<? super K, ? super V> transformer;
5938     final IntByIntToInt reducer;
5939     if ((transformer = this.transformer) != null &&
5940     (reducer = this.reducer) != null) {
5941     int r = this.basis;
5942 dl 1.102 for (int i = baseIndex, f, h; batch > 0 &&
5943     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5944     addToPendingCount(1);
5945 dl 1.82 (rights = new MapReduceMappingsToIntTask<K,V>
5946 dl 1.102 (this, batch >>>= 1, baseLimit = h, f, tab,
5947     rights, transformer, r, reducer)).fork();
5948     }
5949     for (Node<K,V> p; (p = advance()) != null; )
5950     r = reducer.apply(r, transformer.apply(p.key, p.val));
5951 dl 1.82 result = r;
5952     CountedCompleter<?> c;
5953     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5954 dl 1.102 @SuppressWarnings("unchecked") MapReduceMappingsToIntTask<K,V>
5955 dl 1.82 t = (MapReduceMappingsToIntTask<K,V>)c,
5956     s = t.rights;
5957     while (s != null) {
5958     t.result = reducer.apply(t.result, s.result);
5959     s = t.rights = s.nextRight;
5960     }
5961 dl 1.52 }
5962     }
5963     }
5964     }
5965    
5966 dl 1.102 /* ---------------- Counters -------------- */
5967    
5968     // Adapted from LongAdder and Striped64.
5969     // See their internal docs for explanation.
5970    
5971     // A padded cell for distributing counts
5972     static final class CounterCell {
5973     volatile long p0, p1, p2, p3, p4, p5, p6;
5974     volatile long value;
5975     volatile long q0, q1, q2, q3, q4, q5, q6;
5976     CounterCell(long x) { value = x; }
5977     }
5978    
5979     /**
5980     * Holder for the thread-local hash code determining which
5981     * CounterCell to use. The code is initialized via the
5982     * counterHashCodeGenerator, but may be moved upon collisions.
5983     */
5984     static final class CounterHashCode {
5985     int code;
5986     }
5987    
5988     /**
5989     * Generates initial value for per-thread CounterHashCodes
5990     */
5991     static final AtomicInteger counterHashCodeGenerator = new AtomicInteger();
5992    
5993     /**
5994     * Increment for counterHashCodeGenerator. See class ThreadLocal
5995     * for explanation.
5996     */
5997     static final int SEED_INCREMENT = 0x61c88647;
5998    
5999     /**
6000     * Per-thread counter hash codes. Shared across all instances.
6001     */
6002     static final ThreadLocal<CounterHashCode> threadCounterHashCode =
6003     new ThreadLocal<CounterHashCode>();
6004    
6005    
6006     final long sumCount() {
6007     CounterCell[] as = counterCells; CounterCell a;
6008     long sum = baseCount;
6009     if (as != null) {
6010     for (int i = 0; i < as.length; ++i) {
6011     if ((a = as[i]) != null)
6012     sum += a.value;
6013     }
6014     }
6015     return sum;
6016     }
6017    
6018     // See LongAdder version for explanation
6019     private final void fullAddCount(long x, CounterHashCode hc,
6020     boolean wasUncontended) {
6021     int h;
6022     if (hc == null) {
6023     hc = new CounterHashCode();
6024     int s = counterHashCodeGenerator.addAndGet(SEED_INCREMENT);
6025     h = hc.code = (s == 0) ? 1 : s; // Avoid zero
6026     threadCounterHashCode.set(hc);
6027     }
6028     else
6029     h = hc.code;
6030     boolean collide = false; // True if last slot nonempty
6031     for (;;) {
6032     CounterCell[] as; CounterCell a; int n; long v;
6033     if ((as = counterCells) != null && (n = as.length) > 0) {
6034     if ((a = as[(n - 1) & h]) == null) {
6035     if (cellsBusy == 0) { // Try to attach new Cell
6036     CounterCell r = new CounterCell(x); // Optimistic create
6037     if (cellsBusy == 0 &&
6038     U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
6039     boolean created = false;
6040     try { // Recheck under lock
6041     CounterCell[] rs; int m, j;
6042     if ((rs = counterCells) != null &&
6043     (m = rs.length) > 0 &&
6044     rs[j = (m - 1) & h] == null) {
6045     rs[j] = r;
6046     created = true;
6047     }
6048     } finally {
6049     cellsBusy = 0;
6050     }
6051     if (created)
6052     break;
6053     continue; // Slot is now non-empty
6054     }
6055     }
6056     collide = false;
6057     }
6058     else if (!wasUncontended) // CAS already known to fail
6059     wasUncontended = true; // Continue after rehash
6060     else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))
6061     break;
6062     else if (counterCells != as || n >= NCPU)
6063     collide = false; // At max size or stale
6064     else if (!collide)
6065     collide = true;
6066     else if (cellsBusy == 0 &&
6067     U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
6068     try {
6069     if (counterCells == as) {// Expand table unless stale
6070     CounterCell[] rs = new CounterCell[n << 1];
6071     for (int i = 0; i < n; ++i)
6072     rs[i] = as[i];
6073     counterCells = rs;
6074     }
6075     } finally {
6076     cellsBusy = 0;
6077     }
6078     collide = false;
6079     continue; // Retry with expanded table
6080     }
6081     h ^= h << 13; // Rehash
6082     h ^= h >>> 17;
6083     h ^= h << 5;
6084     }
6085     else if (cellsBusy == 0 && counterCells == as &&
6086     U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
6087     boolean init = false;
6088     try { // Initialize table
6089     if (counterCells == as) {
6090     CounterCell[] rs = new CounterCell[2];
6091     rs[h & 1] = new CounterCell(x);
6092     counterCells = rs;
6093     init = true;
6094     }
6095     } finally {
6096     cellsBusy = 0;
6097     }
6098     if (init)
6099     break;
6100     }
6101     else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x))
6102     break; // Fall back on using base
6103     }
6104     hc.code = h; // Record index for next time
6105     }
6106    
6107 dl 1.52 // Unsafe mechanics
6108 dl 1.82 private static final sun.misc.Unsafe U;
6109     private static final long SIZECTL;
6110     private static final long TRANSFERINDEX;
6111     private static final long TRANSFERORIGIN;
6112     private static final long BASECOUNT;
6113 dl 1.102 private static final long CELLSBUSY;
6114 dl 1.82 private static final long CELLVALUE;
6115 dl 1.52 private static final long ABASE;
6116     private static final int ASHIFT;
6117    
6118     static {
6119     try {
6120 dl 1.82 U = getUnsafe();
6121 dl 1.52 Class<?> k = ConcurrentHashMapV8.class;
6122 dl 1.82 SIZECTL = U.objectFieldOffset
6123 dl 1.52 (k.getDeclaredField("sizeCtl"));
6124 dl 1.82 TRANSFERINDEX = U.objectFieldOffset
6125     (k.getDeclaredField("transferIndex"));
6126     TRANSFERORIGIN = U.objectFieldOffset
6127     (k.getDeclaredField("transferOrigin"));
6128     BASECOUNT = U.objectFieldOffset
6129     (k.getDeclaredField("baseCount"));
6130 dl 1.102 CELLSBUSY = U.objectFieldOffset
6131     (k.getDeclaredField("cellsBusy"));
6132 dl 1.82 Class<?> ck = CounterCell.class;
6133     CELLVALUE = U.objectFieldOffset
6134     (ck.getDeclaredField("value"));
6135 jsr166 1.101 Class<?> ak = Node[].class;
6136     ABASE = U.arrayBaseOffset(ak);
6137     int scale = U.arrayIndexScale(ak);
6138 jsr166 1.89 if ((scale & (scale - 1)) != 0)
6139     throw new Error("data type scale not a power of two");
6140     ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
6141 dl 1.52 } catch (Exception e) {
6142     throw new Error(e);
6143     }
6144     }
6145    
6146     /**
6147     * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
6148     * Replace with a simple call to Unsafe.getUnsafe when integrating
6149     * into a jdk.
6150     *
6151     * @return a sun.misc.Unsafe
6152     */
6153     private static sun.misc.Unsafe getUnsafe() {
6154     try {
6155     return sun.misc.Unsafe.getUnsafe();
6156 jsr166 1.87 } catch (SecurityException tryReflectionInstead) {}
6157     try {
6158     return java.security.AccessController.doPrivileged
6159     (new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
6160     public sun.misc.Unsafe run() throws Exception {
6161     Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
6162     for (java.lang.reflect.Field f : k.getDeclaredFields()) {
6163     f.setAccessible(true);
6164     Object x = f.get(null);
6165     if (k.isInstance(x))
6166     return k.cast(x);
6167     }
6168     throw new NoSuchFieldError("the Unsafe");
6169     }});
6170     } catch (java.security.PrivilegedActionException e) {
6171     throw new RuntimeException("Could not initialize intrinsics",
6172     e.getCause());
6173 dl 1.52 }
6174     }
6175 dl 1.1 }