ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/main/java/util/concurrent/ConcurrentHashMap.java
Revision: 1.295
Committed: Sun Jul 17 04:23:31 2016 UTC (7 years, 10 months ago) by jsr166
Branch: MAIN
Changes since 1.294: +24 -5 lines
Log Message:
optimize view set removeAll using heuristics

File Contents

# User Rev Content
1 dl 1.2 /*
2     * Written by Doug Lea with assistance from members of JCP JSR-166
3 dl 1.36 * Expert Group and released to the public domain, as explained at
4 dl 1.100 * http://creativecommons.org/publicdomain/zero/1.0/
5 dl 1.2 */
6    
7 tim 1.1 package java.util.concurrent;
8 dl 1.222
9     import java.io.ObjectStreamField;
10 dl 1.208 import java.io.Serializable;
11     import java.lang.reflect.ParameterizedType;
12     import java.lang.reflect.Type;
13 dl 1.236 import java.util.AbstractMap;
14 dl 1.119 import java.util.Arrays;
15     import java.util.Collection;
16 dl 1.208 import java.util.Enumeration;
17     import java.util.HashMap;
18 dl 1.119 import java.util.Hashtable;
19     import java.util.Iterator;
20 dl 1.208 import java.util.Map;
21 dl 1.119 import java.util.NoSuchElementException;
22 dl 1.208 import java.util.Set;
23     import java.util.Spliterator;
24     import java.util.concurrent.atomic.AtomicReference;
25 dl 1.222 import java.util.concurrent.locks.LockSupport;
26 dl 1.208 import java.util.concurrent.locks.ReentrantLock;
27     import java.util.function.BiConsumer;
28     import java.util.function.BiFunction;
29     import java.util.function.Consumer;
30     import java.util.function.DoubleBinaryOperator;
31     import java.util.function.Function;
32     import java.util.function.IntBinaryOperator;
33     import java.util.function.LongBinaryOperator;
34 dl 1.271 import java.util.function.Predicate;
35 dl 1.208 import java.util.function.ToDoubleBiFunction;
36     import java.util.function.ToDoubleFunction;
37     import java.util.function.ToIntBiFunction;
38     import java.util.function.ToIntFunction;
39     import java.util.function.ToLongBiFunction;
40     import java.util.function.ToLongFunction;
41 dl 1.210 import java.util.stream.Stream;
42 jsr166 1.293 import jdk.internal.misc.Unsafe;
43 tim 1.1
44     /**
45 dl 1.4 * A hash table supporting full concurrency of retrievals and
46 dl 1.119 * high expected concurrency for updates. This class obeys the
47 dl 1.22 * same functional specification as {@link java.util.Hashtable}, and
48 dl 1.19 * includes versions of methods corresponding to each method of
49 dl 1.119 * {@code Hashtable}. However, even though all operations are
50 dl 1.19 * thread-safe, retrieval operations do <em>not</em> entail locking,
51     * and there is <em>not</em> any support for locking the entire table
52     * in a way that prevents all access. This class is fully
53 dl 1.119 * interoperable with {@code Hashtable} in programs that rely on its
54 dl 1.4 * thread safety but not on its synchronization details.
55 tim 1.11 *
56 jsr166 1.145 * <p>Retrieval operations (including {@code get}) generally do not
57 dl 1.119 * block, so may overlap with update operations (including {@code put}
58     * and {@code remove}). Retrievals reflect the results of the most
59     * recently <em>completed</em> update operations holding upon their
60 dl 1.126 * onset. (More formally, an update operation for a given key bears a
61     * <em>happens-before</em> relation with any (non-null) retrieval for
62     * that key reporting the updated value.) For aggregate operations
63     * such as {@code putAll} and {@code clear}, concurrent retrievals may
64     * reflect insertion or removal of only some entries. Similarly,
65 jsr166 1.241 * Iterators, Spliterators and Enumerations return elements reflecting the
66     * state of the hash table at some point at or since the creation of the
67 dl 1.126 * iterator/enumeration. They do <em>not</em> throw {@link
68 jsr166 1.241 * java.util.ConcurrentModificationException ConcurrentModificationException}.
69     * However, iterators are designed to be used by only one thread at a time.
70     * Bear in mind that the results of aggregate status methods including
71     * {@code size}, {@code isEmpty}, and {@code containsValue} are typically
72     * useful only when a map is not undergoing concurrent updates in other threads.
73 dl 1.126 * Otherwise the results of these methods reflect transient states
74     * that may be adequate for monitoring or estimation purposes, but not
75     * for program control.
76 tim 1.1 *
77 jsr166 1.145 * <p>The table is dynamically expanded when there are too many
78 dl 1.119 * collisions (i.e., keys that have distinct hash codes but fall into
79     * the same slot modulo the table size), with the expected average
80     * effect of maintaining roughly two bins per mapping (corresponding
81     * to a 0.75 load factor threshold for resizing). There may be much
82     * variance around this average as mappings are added and removed, but
83     * overall, this maintains a commonly accepted time/space tradeoff for
84     * hash tables. However, resizing this or any other kind of hash
85     * table may be a relatively slow operation. When possible, it is a
86     * good idea to provide a size estimate as an optional {@code
87     * initialCapacity} constructor argument. An additional optional
88     * {@code loadFactor} constructor argument provides a further means of
89     * customizing initial table capacity by specifying the table density
90     * to be used in calculating the amount of space to allocate for the
91     * given number of elements. Also, for compatibility with previous
92     * versions of this class, constructors may optionally specify an
93     * expected {@code concurrencyLevel} as an additional hint for
94     * internal sizing. Note that using many keys with exactly the same
95     * {@code hashCode()} is a sure way to slow down performance of any
96 dl 1.210 * hash table. To ameliorate impact, when keys are {@link Comparable},
97     * this class may use comparison order among keys to help break ties.
98 tim 1.1 *
99 jsr166 1.145 * <p>A {@link Set} projection of a ConcurrentHashMap may be created
100 dl 1.137 * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
101     * (using {@link #keySet(Object)} when only keys are of interest, and the
102     * mapped values are (perhaps transiently) not used or all take the
103     * same mapping value.
104     *
105 jsr166 1.257 * <p>A ConcurrentHashMap can be used as a scalable frequency map (a
106 dl 1.153 * form of histogram or multiset) by using {@link
107     * java.util.concurrent.atomic.LongAdder} values and initializing via
108 jsr166 1.175 * {@link #computeIfAbsent computeIfAbsent}. For example, to add a count
109     * to a {@code ConcurrentHashMap<String,LongAdder> freqs}, you can use
110 jsr166 1.257 * {@code freqs.computeIfAbsent(key, k -> new LongAdder()).increment();}
111 dl 1.137 *
112 dl 1.45 * <p>This class and its views and iterators implement all of the
113     * <em>optional</em> methods of the {@link Map} and {@link Iterator}
114     * interfaces.
115 dl 1.23 *
116 jsr166 1.145 * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
117 dl 1.119 * does <em>not</em> allow {@code null} to be used as a key or value.
118 tim 1.1 *
119 dl 1.210 * <p>ConcurrentHashMaps support a set of sequential and parallel bulk
120     * operations that, unlike most {@link Stream} methods, are designed
121     * to be safely, and often sensibly, applied even with maps that are
122     * being concurrently updated by other threads; for example, when
123     * computing a snapshot summary of the values in a shared registry.
124     * There are three kinds of operation, each with four forms, accepting
125 jsr166 1.278 * functions with keys, values, entries, and (key, value) pairs as
126     * arguments and/or return values. Because the elements of a
127     * ConcurrentHashMap are not ordered in any particular way, and may be
128     * processed in different orders in different parallel executions, the
129     * correctness of supplied functions should not depend on any
130     * ordering, or on any other objects or values that may transiently
131     * change while computation is in progress; and except for forEach
132     * actions, should ideally be side-effect-free. Bulk operations on
133     * {@link java.util.Map.Entry} objects do not support method {@code
134     * setValue}.
135 dl 1.137 *
136     * <ul>
137 jsr166 1.280 * <li>forEach: Performs a given action on each element.
138 dl 1.137 * A variant form applies a given transformation on each element
139 jsr166 1.280 * before performing the action.
140 dl 1.137 *
141 jsr166 1.280 * <li>search: Returns the first available non-null result of
142 dl 1.137 * applying a given function on each element; skipping further
143 jsr166 1.280 * search when a result is found.
144 dl 1.137 *
145 jsr166 1.280 * <li>reduce: Accumulates each element. The supplied reduction
146 dl 1.137 * function cannot rely on ordering (more formally, it should be
147     * both associative and commutative). There are five variants:
148     *
149     * <ul>
150     *
151 jsr166 1.280 * <li>Plain reductions. (There is not a form of this method for
152 dl 1.137 * (key, value) function arguments since there is no corresponding
153 jsr166 1.280 * return type.)
154 dl 1.137 *
155 jsr166 1.280 * <li>Mapped reductions that accumulate the results of a given
156     * function applied to each element.
157 dl 1.137 *
158 jsr166 1.280 * <li>Reductions to scalar doubles, longs, and ints, using a
159     * given basis value.
160 dl 1.137 *
161 jsr166 1.178 * </ul>
162 dl 1.137 * </ul>
163     *
164 dl 1.210 * <p>These bulk operations accept a {@code parallelismThreshold}
165     * argument. Methods proceed sequentially if the current map size is
166     * estimated to be less than the given threshold. Using a value of
167     * {@code Long.MAX_VALUE} suppresses all parallelism. Using a value
168 dl 1.217 * of {@code 1} results in maximal parallelism by partitioning into
169 dl 1.219 * enough subtasks to fully utilize the {@link
170     * ForkJoinPool#commonPool()} that is used for all parallel
171     * computations. Normally, you would initially choose one of these
172     * extreme values, and then measure performance of using in-between
173     * values that trade off overhead versus throughput.
174 dl 1.210 *
175 dl 1.137 * <p>The concurrency properties of bulk operations follow
176     * from those of ConcurrentHashMap: Any non-null result returned
177     * from {@code get(key)} and related access methods bears a
178     * happens-before relation with the associated insertion or
179     * update. The result of any bulk operation reflects the
180     * composition of these per-element relations (but is not
181     * necessarily atomic with respect to the map as a whole unless it
182     * is somehow known to be quiescent). Conversely, because keys
183     * and values in the map are never null, null serves as a reliable
184     * atomic indicator of the current lack of any result. To
185     * maintain this property, null serves as an implicit basis for
186     * all non-scalar reduction operations. For the double, long, and
187     * int versions, the basis should be one that, when combined with
188     * any other value, returns that other value (more formally, it
189     * should be the identity element for the reduction). Most common
190     * reductions have these properties; for example, computing a sum
191     * with basis 0 or a minimum with basis MAX_VALUE.
192     *
193     * <p>Search and transformation functions provided as arguments
194     * should similarly return null to indicate the lack of any result
195     * (in which case it is not used). In the case of mapped
196     * reductions, this also enables transformations to serve as
197     * filters, returning null (or, in the case of primitive
198     * specializations, the identity basis) if the element should not
199     * be combined. You can create compound transformations and
200     * filterings by composing them yourself under this "null means
201     * there is nothing there now" rule before using them in search or
202     * reduce operations.
203     *
204     * <p>Methods accepting and/or returning Entry arguments maintain
205     * key-value associations. They may be useful for example when
206     * finding the key for the greatest value. Note that "plain" Entry
207     * arguments can be supplied using {@code new
208     * AbstractMap.SimpleEntry(k,v)}.
209     *
210 jsr166 1.145 * <p>Bulk operations may complete abruptly, throwing an
211 dl 1.137 * exception encountered in the application of a supplied
212     * function. Bear in mind when handling such exceptions that other
213     * concurrently executing functions could also have thrown
214     * exceptions, or would have done so if the first exception had
215     * not occurred.
216     *
217 dl 1.151 * <p>Speedups for parallel compared to sequential forms are common
218     * but not guaranteed. Parallel operations involving brief functions
219     * on small maps may execute more slowly than sequential forms if the
220     * underlying work to parallelize the computation is more expensive
221     * than the computation itself. Similarly, parallelization may not
222     * lead to much actual parallelism if all processors are busy
223     * performing unrelated tasks.
224 dl 1.137 *
225 jsr166 1.145 * <p>All arguments to all task methods must be non-null.
226 dl 1.137 *
227 dl 1.42 * <p>This class is a member of the
228 jsr166 1.88 * <a href="{@docRoot}/../technotes/guides/collections/index.html">
229 dl 1.42 * Java Collections Framework</a>.
230     *
231 dl 1.8 * @since 1.5
232     * @author Doug Lea
233 dl 1.27 * @param <K> the type of keys maintained by this map
234 jsr166 1.64 * @param <V> the type of mapped values
235 dl 1.8 */
236 dl 1.240 public class ConcurrentHashMap<K,V> extends AbstractMap<K,V>
237     implements ConcurrentMap<K,V>, Serializable {
238 dl 1.20 private static final long serialVersionUID = 7249069246763182397L;
239 tim 1.1
240     /*
241 dl 1.119 * Overview:
242     *
243     * The primary design goal of this hash table is to maintain
244     * concurrent readability (typically method get(), but also
245     * iterators and related methods) while minimizing update
246     * contention. Secondary goals are to keep space consumption about
247     * the same or better than java.util.HashMap, and to support high
248     * initial insertion rates on an empty table by many threads.
249     *
250 dl 1.224 * This map usually acts as a binned (bucketed) hash table. Each
251     * key-value mapping is held in a Node. Most nodes are instances
252     * of the basic Node class with hash, key, value, and next
253     * fields. However, various subclasses exist: TreeNodes are
254 dl 1.222 * arranged in balanced trees, not lists. TreeBins hold the roots
255     * of sets of TreeNodes. ForwardingNodes are placed at the heads
256     * of bins during resizing. ReservationNodes are used as
257     * placeholders while establishing values in computeIfAbsent and
258 dl 1.224 * related methods. The types TreeBin, ForwardingNode, and
259 dl 1.222 * ReservationNode do not hold normal user keys, values, or
260     * hashes, and are readily distinguishable during search etc
261     * because they have negative hash fields and null key and value
262     * fields. (These special nodes are either uncommon or transient,
263     * so the impact of carrying around some unused fields is
264 jsr166 1.232 * insignificant.)
265 dl 1.119 *
266     * The table is lazily initialized to a power-of-two size upon the
267     * first insertion. Each bin in the table normally contains a
268     * list of Nodes (most often, the list has only zero or one Node).
269     * Table accesses require volatile/atomic reads, writes, and
270     * CASes. Because there is no other way to arrange this without
271     * adding further indirections, we use intrinsics
272 jsr166 1.291 * (jdk.internal.misc.Unsafe) operations.
273 dl 1.119 *
274 dl 1.149 * We use the top (sign) bit of Node hash fields for control
275     * purposes -- it is available anyway because of addressing
276 dl 1.222 * constraints. Nodes with negative hash fields are specially
277     * handled or ignored in map methods.
278 dl 1.119 *
279     * Insertion (via put or its variants) of the first node in an
280     * empty bin is performed by just CASing it to the bin. This is
281     * by far the most common case for put operations under most
282     * key/hash distributions. Other update operations (insert,
283     * delete, and replace) require locks. We do not want to waste
284     * the space required to associate a distinct lock object with
285     * each bin, so instead use the first node of a bin list itself as
286 dl 1.149 * a lock. Locking support for these locks relies on builtin
287     * "synchronized" monitors.
288 dl 1.119 *
289     * Using the first node of a list as a lock does not by itself
290     * suffice though: When a node is locked, any update must first
291     * validate that it is still the first node after locking it, and
292     * retry if not. Because new nodes are always appended to lists,
293     * once a node is first in a bin, it remains first until deleted
294 dl 1.210 * or the bin becomes invalidated (upon resizing).
295 dl 1.119 *
296     * The main disadvantage of per-bin locks is that other update
297     * operations on other nodes in a bin list protected by the same
298     * lock can stall, for example when user equals() or mapping
299     * functions take a long time. However, statistically, under
300     * random hash codes, this is not a common problem. Ideally, the
301     * frequency of nodes in bins follows a Poisson distribution
302     * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
303     * parameter of about 0.5 on average, given the resizing threshold
304     * of 0.75, although with a large variance because of resizing
305     * granularity. Ignoring variance, the expected occurrences of
306     * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
307     * first values are:
308     *
309     * 0: 0.60653066
310     * 1: 0.30326533
311     * 2: 0.07581633
312     * 3: 0.01263606
313     * 4: 0.00157952
314     * 5: 0.00015795
315     * 6: 0.00001316
316     * 7: 0.00000094
317     * 8: 0.00000006
318     * more: less than 1 in ten million
319     *
320     * Lock contention probability for two threads accessing distinct
321     * elements is roughly 1 / (8 * #elements) under random hashes.
322     *
323     * Actual hash code distributions encountered in practice
324     * sometimes deviate significantly from uniform randomness. This
325     * includes the case when N > (1<<30), so some keys MUST collide.
326     * Similarly for dumb or hostile usages in which multiple keys are
327 dl 1.222 * designed to have identical hash codes or ones that differs only
328 dl 1.224 * in masked-out high bits. So we use a secondary strategy that
329     * applies when the number of nodes in a bin exceeds a
330     * threshold. These TreeBins use a balanced tree to hold nodes (a
331     * specialized form of red-black trees), bounding search time to
332     * O(log N). Each search step in a TreeBin is at least twice as
333     * slow as in a regular list, but given that N cannot exceed
334     * (1<<64) (before running out of addresses) this bounds search
335     * steps, lock hold times, etc, to reasonable constants (roughly
336     * 100 nodes inspected per operation worst case) so long as keys
337     * are Comparable (which is very common -- String, Long, etc).
338 dl 1.119 * TreeBin nodes (TreeNodes) also maintain the same "next"
339     * traversal pointers as regular nodes, so can be traversed in
340     * iterators in the same way.
341     *
342     * The table is resized when occupancy exceeds a percentage
343 dl 1.149 * threshold (nominally, 0.75, but see below). Any thread
344     * noticing an overfull bin may assist in resizing after the
345 jsr166 1.248 * initiating thread allocates and sets up the replacement array.
346     * However, rather than stalling, these other threads may proceed
347     * with insertions etc. The use of TreeBins shields us from the
348     * worst case effects of overfilling while resizes are in
349 dl 1.149 * progress. Resizing proceeds by transferring bins, one by one,
350 dl 1.246 * from the table to the next table. However, threads claim small
351     * blocks of indices to transfer (via field transferIndex) before
352 dl 1.252 * doing so, reducing contention. A generation stamp in field
353     * sizeCtl ensures that resizings do not overlap. Because we are
354     * using power-of-two expansion, the elements from each bin must
355     * either stay at same index, or move with a power of two
356     * offset. We eliminate unnecessary node creation by catching
357     * cases where old nodes can be reused because their next fields
358     * won't change. On average, only about one-sixth of them need
359     * cloning when a table doubles. The nodes they replace will be
360     * garbage collectable as soon as they are no longer referenced by
361     * any reader thread that may be in the midst of concurrently
362     * traversing table. Upon transfer, the old table bin contains
363     * only a special forwarding node (with hash field "MOVED") that
364     * contains the next table as its key. On encountering a
365     * forwarding node, access and update operations restart, using
366     * the new table.
367 dl 1.149 *
368     * Each bin transfer requires its bin lock, which can stall
369     * waiting for locks while resizing. However, because other
370     * threads can join in and help resize rather than contend for
371     * locks, average aggregate waits become shorter as resizing
372     * progresses. The transfer operation must also ensure that all
373     * accessible bins in both the old and new table are usable by any
374 dl 1.246 * traversal. This is arranged in part by proceeding from the
375     * last bin (table.length - 1) up towards the first. Upon seeing
376     * a forwarding node, traversals (see class Traverser) arrange to
377     * move to the new table without revisiting nodes. To ensure that
378     * no intervening nodes are skipped even when moved out of order,
379     * a stack (see class TableStack) is created on first encounter of
380     * a forwarding node during a traversal, to maintain its place if
381     * later processing the current table. The need for these
382     * save/restore mechanics is relatively rare, but when one
383 jsr166 1.248 * forwarding node is encountered, typically many more will be.
384 dl 1.246 * So Traversers use a simple caching scheme to avoid creating so
385     * many new TableStack nodes. (Thanks to Peter Levart for
386     * suggesting use of a stack here.)
387 dl 1.119 *
388     * The traversal scheme also applies to partial traversals of
389     * ranges of bins (via an alternate Traverser constructor)
390     * to support partitioned aggregate operations. Also, read-only
391     * operations give up if ever forwarded to a null table, which
392     * provides support for shutdown-style clearing, which is also not
393     * currently implemented.
394     *
395     * Lazy table initialization minimizes footprint until first use,
396     * and also avoids resizings when the first operation is from a
397     * putAll, constructor with map argument, or deserialization.
398     * These cases attempt to override the initial capacity settings,
399     * but harmlessly fail to take effect in cases of races.
400     *
401 dl 1.149 * The element count is maintained using a specialization of
402     * LongAdder. We need to incorporate a specialization rather than
403     * just use a LongAdder in order to access implicit
404     * contention-sensing that leads to creation of multiple
405 dl 1.222 * CounterCells. The counter mechanics avoid contention on
406 dl 1.149 * updates but can encounter cache thrashing if read too
407     * frequently during concurrent access. To avoid reading so often,
408     * resizing under contention is attempted only upon adding to a
409     * bin already holding two or more nodes. Under uniform hash
410     * distributions, the probability of this occurring at threshold
411     * is around 13%, meaning that only about 1 in 8 puts check
412 dl 1.222 * threshold (and after resizing, many fewer do so).
413     *
414     * TreeBins use a special form of comparison for search and
415     * related operations (which is the main reason we cannot use
416     * existing collections such as TreeMaps). TreeBins contain
417     * Comparable elements, but may contain others, as well as
418 dl 1.240 * elements that are Comparable but not necessarily Comparable for
419     * the same T, so we cannot invoke compareTo among them. To handle
420     * this, the tree is ordered primarily by hash value, then by
421     * Comparable.compareTo order if applicable. On lookup at a node,
422     * if elements are not comparable or compare as 0 then both left
423     * and right children may need to be searched in the case of tied
424     * hash values. (This corresponds to the full list search that
425     * would be necessary if all elements were non-Comparable and had
426     * tied hashes.) On insertion, to keep a total ordering (or as
427     * close as is required here) across rebalancings, we compare
428     * classes and identityHashCodes as tie-breakers. The red-black
429     * balancing code is updated from pre-jdk-collections
430 dl 1.222 * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
431     * based in turn on Cormen, Leiserson, and Rivest "Introduction to
432     * Algorithms" (CLR).
433     *
434     * TreeBins also require an additional locking mechanism. While
435 dl 1.224 * list traversal is always possible by readers even during
436 jsr166 1.232 * updates, tree traversal is not, mainly because of tree-rotations
437 dl 1.222 * that may change the root node and/or its linkages. TreeBins
438     * include a simple read-write lock mechanism parasitic on the
439     * main bin-synchronization strategy: Structural adjustments
440     * associated with an insertion or removal are already bin-locked
441     * (and so cannot conflict with other writers) but must wait for
442     * ongoing readers to finish. Since there can be only one such
443     * waiter, we use a simple scheme using a single "waiter" field to
444     * block writers. However, readers need never block. If the root
445     * lock is held, they proceed along the slow traversal path (via
446     * next-pointers) until the lock becomes available or the list is
447     * exhausted, whichever comes first. These cases are not fast, but
448     * maximize aggregate expected throughput.
449 dl 1.119 *
450     * Maintaining API and serialization compatibility with previous
451     * versions of this class introduces several oddities. Mainly: We
452 jsr166 1.274 * leave untouched but unused constructor arguments referring to
453 dl 1.119 * concurrencyLevel. We accept a loadFactor constructor argument,
454     * but apply it only to initial table capacity (which is the only
455     * time that we can guarantee to honor it.) We also declare an
456     * unused "Segment" class that is instantiated in minimal form
457     * only when serializing.
458 dl 1.222 *
459 dl 1.240 * Also, solely for compatibility with previous versions of this
460     * class, it extends AbstractMap, even though all of its methods
461     * are overridden, so it is just useless baggage.
462     *
463 dl 1.222 * This file is organized to make things a little easier to follow
464 dl 1.224 * while reading than they might otherwise: First the main static
465 dl 1.222 * declarations and utilities, then fields, then main public
466     * methods (with a few factorings of multiple public methods into
467     * internal ones), then sizing methods, trees, traversers, and
468     * bulk operations.
469 dl 1.4 */
470 tim 1.1
471 dl 1.4 /* ---------------- Constants -------------- */
472 tim 1.11
473 dl 1.4 /**
474 dl 1.119 * The largest possible table capacity. This value must be
475     * exactly 1<<30 to stay within Java array allocation and indexing
476     * bounds for power of two table sizes, and is further required
477     * because the top two bits of 32bit hash fields are used for
478     * control purposes.
479 dl 1.4 */
480 dl 1.119 private static final int MAXIMUM_CAPACITY = 1 << 30;
481 dl 1.56
482     /**
483 dl 1.119 * The default initial table capacity. Must be a power of 2
484     * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
485 dl 1.56 */
486 dl 1.119 private static final int DEFAULT_CAPACITY = 16;
487 dl 1.56
488     /**
489 dl 1.119 * The largest possible (non-power of two) array size.
490     * Needed by toArray and related methods.
491 jsr166 1.59 */
492 dl 1.119 static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
493 tim 1.1
494     /**
495 dl 1.119 * The default concurrency level for this table. Unused but
496     * defined for compatibility with previous versions of this class.
497 dl 1.4 */
498 dl 1.119 private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
499 tim 1.11
500 tim 1.1 /**
501 dl 1.119 * The load factor for this table. Overrides of this value in
502     * constructors affect only the initial table capacity. The
503     * actual floating point value isn't normally used -- it is
504     * simpler to use expressions such as {@code n - (n >>> 2)} for
505     * the associated resizing threshold.
506 dl 1.99 */
507 dl 1.119 private static final float LOAD_FACTOR = 0.75f;
508 dl 1.99
509     /**
510 dl 1.119 * The bin count threshold for using a tree rather than list for a
511 dl 1.222 * bin. Bins are converted to trees when adding an element to a
512 dl 1.224 * bin with at least this many nodes. The value must be greater
513     * than 2, and should be at least 8 to mesh with assumptions in
514     * tree removal about conversion back to plain bins upon
515     * shrinkage.
516 dl 1.222 */
517     static final int TREEIFY_THRESHOLD = 8;
518    
519     /**
520     * The bin count threshold for untreeifying a (split) bin during a
521     * resize operation. Should be less than TREEIFY_THRESHOLD, and at
522     * most 6 to mesh with shrinkage detection under removal.
523     */
524     static final int UNTREEIFY_THRESHOLD = 6;
525    
526     /**
527     * The smallest table capacity for which bins may be treeified.
528     * (Otherwise the table is resized if too many nodes in a bin.)
529 dl 1.224 * The value should be at least 4 * TREEIFY_THRESHOLD to avoid
530     * conflicts between resizing and treeification thresholds.
531 dl 1.119 */
532 dl 1.222 static final int MIN_TREEIFY_CAPACITY = 64;
533 dl 1.119
534 dl 1.149 /**
535     * Minimum number of rebinnings per transfer step. Ranges are
536     * subdivided to allow multiple resizer threads. This value
537     * serves as a lower bound to avoid resizers encountering
538     * excessive memory contention. The value should be at least
539     * DEFAULT_CAPACITY.
540     */
541     private static final int MIN_TRANSFER_STRIDE = 16;
542    
543 dl 1.252 /**
544     * The number of bits used for generation stamp in sizeCtl.
545     * Must be at least 6 for 32bit arrays.
546     */
547 jsr166 1.282 private static final int RESIZE_STAMP_BITS = 16;
548 dl 1.252
549     /**
550     * The maximum number of threads that can help resize.
551     * Must fit in 32 - RESIZE_STAMP_BITS bits.
552     */
553     private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;
554    
555     /**
556     * The bit shift for recording size stamp in sizeCtl.
557     */
558     private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;
559    
560 dl 1.119 /*
561 dl 1.149 * Encodings for Node hash fields. See above for explanation.
562 dl 1.46 */
563 dl 1.233 static final int MOVED = -1; // hash for forwarding nodes
564     static final int TREEBIN = -2; // hash for roots of trees
565     static final int RESERVED = -3; // hash for transient reservations
566 dl 1.149 static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
567    
568     /** Number of CPUS, to place bounds on some sizings */
569     static final int NCPU = Runtime.getRuntime().availableProcessors();
570    
571 jsr166 1.287 /**
572     * Serialized pseudo-fields, provided only for jdk7 compatibility.
573     * @serialField segments Segment[]
574     * The segments, each of which is a specialized hash table.
575     * @serialField segmentMask int
576     * Mask value for indexing into segments. The upper bits of a
577     * key's hash code are used to choose the segment.
578     * @serialField segmentShift int
579     * Shift value for indexing within segments.
580     */
581 dl 1.208 private static final ObjectStreamField[] serialPersistentFields = {
582 dl 1.209 new ObjectStreamField("segments", Segment[].class),
583     new ObjectStreamField("segmentMask", Integer.TYPE),
584 jsr166 1.283 new ObjectStreamField("segmentShift", Integer.TYPE),
585 dl 1.208 };
586    
587 dl 1.222 /* ---------------- Nodes -------------- */
588    
589     /**
590     * Key-value entry. This class is never exported out as a
591     * user-mutable Map.Entry (i.e., one supporting setValue; see
592     * MapEntry below), but can be used for read-only traversals used
593 jsr166 1.232 * in bulk tasks. Subclasses of Node with a negative hash field
594 dl 1.224 * are special, and contain null keys and values (but are never
595     * exported). Otherwise, keys and vals are never null.
596 dl 1.222 */
597     static class Node<K,V> implements Map.Entry<K,V> {
598     final int hash;
599     final K key;
600     volatile V val;
601 dl 1.233 volatile Node<K,V> next;
602 dl 1.222
603 jsr166 1.290 Node(int hash, K key, V val) {
604 dl 1.222 this.hash = hash;
605     this.key = key;
606     this.val = val;
607 jsr166 1.290 }
608    
609     Node(int hash, K key, V val, Node<K,V> next) {
610     this(hash, key, val);
611 dl 1.222 this.next = next;
612     }
613    
614 jsr166 1.268 public final K getKey() { return key; }
615     public final V getValue() { return val; }
616     public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
617     public final String toString() {
618     return Helpers.mapEntryToString(key, val);
619 jsr166 1.265 }
620 dl 1.222 public final V setValue(V value) {
621     throw new UnsupportedOperationException();
622     }
623    
624     public final boolean equals(Object o) {
625     Object k, v, u; Map.Entry<?,?> e;
626     return ((o instanceof Map.Entry) &&
627     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
628     (v = e.getValue()) != null &&
629     (k == key || k.equals(key)) &&
630     (v == (u = val) || v.equals(u)));
631     }
632    
633 dl 1.224 /**
634     * Virtualized support for map.get(); overridden in subclasses.
635     */
636 dl 1.222 Node<K,V> find(int h, Object k) {
637     Node<K,V> e = this;
638     if (k != null) {
639     do {
640     K ek;
641     if (e.hash == h &&
642     ((ek = e.key) == k || (ek != null && k.equals(ek))))
643     return e;
644     } while ((e = e.next) != null);
645     }
646     return null;
647     }
648     }
649    
650     /* ---------------- Static utilities -------------- */
651    
652     /**
653     * Spreads (XORs) higher bits of hash to lower and also forces top
654     * bit to 0. Because the table uses power-of-two masking, sets of
655     * hashes that vary only in bits above the current mask will
656     * always collide. (Among known examples are sets of Float keys
657     * holding consecutive whole numbers in small tables.) So we
658     * apply a transform that spreads the impact of higher bits
659     * downward. There is a tradeoff between speed, utility, and
660     * quality of bit-spreading. Because many common sets of hashes
661     * are already reasonably distributed (so don't benefit from
662     * spreading), and because we use trees to handle large sets of
663     * collisions in bins, we just XOR some shifted bits in the
664     * cheapest possible way to reduce systematic lossage, as well as
665     * to incorporate impact of the highest bits that would otherwise
666     * never be used in index calculations because of table bounds.
667     */
668     static final int spread(int h) {
669     return (h ^ (h >>> 16)) & HASH_BITS;
670     }
671    
672     /**
673     * Returns a power of two table size for the given desired capacity.
674     * See Hackers Delight, sec 3.2
675     */
676     private static final int tableSizeFor(int c) {
677     int n = c - 1;
678     n |= n >>> 1;
679     n |= n >>> 2;
680     n |= n >>> 4;
681     n |= n >>> 8;
682     n |= n >>> 16;
683     return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
684     }
685    
686     /**
687     * Returns x's Class if it is of the form "class C implements
688     * Comparable<C>", else null.
689     */
690     static Class<?> comparableClassFor(Object x) {
691     if (x instanceof Comparable) {
692     Class<?> c; Type[] ts, as; Type t; ParameterizedType p;
693     if ((c = x.getClass()) == String.class) // bypass checks
694     return c;
695     if ((ts = c.getGenericInterfaces()) != null) {
696     for (int i = 0; i < ts.length; ++i) {
697     if (((t = ts[i]) instanceof ParameterizedType) &&
698     ((p = (ParameterizedType)t).getRawType() ==
699     Comparable.class) &&
700     (as = p.getActualTypeArguments()) != null &&
701     as.length == 1 && as[0] == c) // type arg is c
702     return c;
703     }
704     }
705     }
706     return null;
707     }
708    
709 dl 1.210 /**
710 dl 1.222 * Returns k.compareTo(x) if x matches kc (k's screened comparable
711     * class), else 0.
712     */
713     @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
714     static int compareComparables(Class<?> kc, Object k, Object x) {
715     return (x == null || x.getClass() != kc ? 0 :
716     ((Comparable)k).compareTo(x));
717     }
718    
719     /* ---------------- Table element access -------------- */
720    
721     /*
722 dl 1.294 * Atomic access methods are used for table elements as well as
723 dl 1.222 * elements of in-progress next table while resizing. All uses of
724     * the tab arguments must be null checked by callers. All callers
725     * also paranoically precheck that tab's length is not zero (or an
726     * equivalent check), thus ensuring that any index argument taking
727     * the form of a hash value anded with (length - 1) is a valid
728     * index. Note that, to be correct wrt arbitrary concurrency
729     * errors by users, these checks must operate on local variables,
730     * which accounts for some odd-looking inline assignments below.
731     * Note that calls to setTabAt always occur within locked regions,
732 dl 1.294 * and so require only release ordering.
733 dl 1.210 */
734 dl 1.222
735     @SuppressWarnings("unchecked")
736     static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) {
737 dl 1.294 return (Node<K,V>)U.getObjectAcquire(tab, ((long)i << ASHIFT) + ABASE);
738 dl 1.222 }
739    
740     static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i,
741     Node<K,V> c, Node<K,V> v) {
742     return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
743     }
744    
745     static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) {
746 dl 1.294 U.putObjectRelease(tab, ((long)i << ASHIFT) + ABASE, v);
747 dl 1.149 }
748    
749 dl 1.4 /* ---------------- Fields -------------- */
750 tim 1.1
751     /**
752 dl 1.119 * The array of bins. Lazily initialized upon first insertion.
753     * Size is always a power of two. Accessed directly by iterators.
754 jsr166 1.59 */
755 dl 1.210 transient volatile Node<K,V>[] table;
756 tim 1.1
757     /**
758 dl 1.149 * The next table to use; non-null only while resizing.
759 jsr166 1.59 */
760 dl 1.210 private transient volatile Node<K,V>[] nextTable;
761 dl 1.149
762     /**
763     * Base counter value, used mainly when there is no contention,
764     * but also as a fallback during table initialization
765     * races. Updated via CAS.
766     */
767     private transient volatile long baseCount;
768 tim 1.1
769     /**
770 dl 1.119 * Table initialization and resizing control. When negative, the
771 dl 1.149 * table is being initialized or resized: -1 for initialization,
772     * else -(1 + the number of active resizing threads). Otherwise,
773     * when table is null, holds the initial table size to use upon
774     * creation, or 0 for default. After initialization, holds the
775     * next element count value upon which to resize the table.
776 tim 1.1 */
777 dl 1.119 private transient volatile int sizeCtl;
778 dl 1.4
779 dl 1.149 /**
780     * The next table index (plus one) to split while resizing.
781     */
782     private transient volatile int transferIndex;
783    
784     /**
785 dl 1.222 * Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
786 dl 1.149 */
787 dl 1.153 private transient volatile int cellsBusy;
788 dl 1.149
789     /**
790     * Table of counter cells. When non-null, size is a power of 2.
791     */
792 dl 1.222 private transient volatile CounterCell[] counterCells;
793 dl 1.149
794 dl 1.119 // views
795 dl 1.137 private transient KeySetView<K,V> keySet;
796 dl 1.142 private transient ValuesView<K,V> values;
797     private transient EntrySetView<K,V> entrySet;
798 dl 1.119
799    
800 dl 1.222 /* ---------------- Public operations -------------- */
801    
802     /**
803     * Creates a new, empty map with the default initial table size (16).
804 dl 1.119 */
805 dl 1.222 public ConcurrentHashMap() {
806     }
807 dl 1.119
808 dl 1.222 /**
809     * Creates a new, empty map with an initial table size
810     * accommodating the specified number of elements without the need
811     * to dynamically resize.
812     *
813     * @param initialCapacity The implementation performs internal
814     * sizing to accommodate this many elements.
815     * @throws IllegalArgumentException if the initial capacity of
816     * elements is negative
817     */
818     public ConcurrentHashMap(int initialCapacity) {
819     if (initialCapacity < 0)
820     throw new IllegalArgumentException();
821     int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
822     MAXIMUM_CAPACITY :
823     tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
824     this.sizeCtl = cap;
825 dl 1.119 }
826    
827 dl 1.222 /**
828     * Creates a new map with the same mappings as the given map.
829     *
830     * @param m the map
831     */
832     public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
833     this.sizeCtl = DEFAULT_CAPACITY;
834     putAll(m);
835 dl 1.119 }
836    
837 dl 1.222 /**
838     * Creates a new, empty map with an initial table size based on
839     * the given number of elements ({@code initialCapacity}) and
840     * initial table density ({@code loadFactor}).
841     *
842     * @param initialCapacity the initial capacity. The implementation
843     * performs internal sizing to accommodate this many elements,
844     * given the specified load factor.
845     * @param loadFactor the load factor (table density) for
846     * establishing the initial table size
847     * @throws IllegalArgumentException if the initial capacity of
848     * elements is negative or the load factor is nonpositive
849     *
850     * @since 1.6
851     */
852     public ConcurrentHashMap(int initialCapacity, float loadFactor) {
853     this(initialCapacity, loadFactor, 1);
854 dl 1.119 }
855    
856 dl 1.99 /**
857 dl 1.222 * Creates a new, empty map with an initial table size based on
858     * the given number of elements ({@code initialCapacity}), table
859     * density ({@code loadFactor}), and number of concurrently
860     * updating threads ({@code concurrencyLevel}).
861     *
862     * @param initialCapacity the initial capacity. The implementation
863     * performs internal sizing to accommodate this many elements,
864     * given the specified load factor.
865     * @param loadFactor the load factor (table density) for
866     * establishing the initial table size
867     * @param concurrencyLevel the estimated number of concurrently
868     * updating threads. The implementation may use this value as
869     * a sizing hint.
870     * @throws IllegalArgumentException if the initial capacity is
871     * negative or the load factor or concurrencyLevel are
872     * nonpositive
873 dl 1.99 */
874 dl 1.222 public ConcurrentHashMap(int initialCapacity,
875     float loadFactor, int concurrencyLevel) {
876     if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
877     throw new IllegalArgumentException();
878     if (initialCapacity < concurrencyLevel) // Use at least as many bins
879     initialCapacity = concurrencyLevel; // as estimated threads
880     long size = (long)(1.0 + (long)initialCapacity / loadFactor);
881     int cap = (size >= (long)MAXIMUM_CAPACITY) ?
882     MAXIMUM_CAPACITY : tableSizeFor((int)size);
883     this.sizeCtl = cap;
884     }
885 dl 1.99
886 dl 1.222 // Original (since JDK1.2) Map methods
887 dl 1.210
888 dl 1.222 /**
889     * {@inheritDoc}
890     */
891     public int size() {
892     long n = sumCount();
893     return ((n < 0L) ? 0 :
894     (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
895     (int)n);
896     }
897 dl 1.210
898 dl 1.222 /**
899     * {@inheritDoc}
900     */
901     public boolean isEmpty() {
902     return sumCount() <= 0L; // ignore transient negative values
903 dl 1.210 }
904    
905     /**
906 dl 1.222 * Returns the value to which the specified key is mapped,
907     * or {@code null} if this map contains no mapping for the key.
908     *
909     * <p>More formally, if this map contains a mapping from a key
910     * {@code k} to a value {@code v} such that {@code key.equals(k)},
911     * then this method returns {@code v}; otherwise it returns
912     * {@code null}. (There can be at most one such mapping.)
913     *
914     * @throws NullPointerException if the specified key is null
915 dl 1.210 */
916 dl 1.222 public V get(Object key) {
917     Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
918     int h = spread(key.hashCode());
919     if ((tab = table) != null && (n = tab.length) > 0 &&
920     (e = tabAt(tab, (n - 1) & h)) != null) {
921     if ((eh = e.hash) == h) {
922     if ((ek = e.key) == key || (ek != null && key.equals(ek)))
923     return e.val;
924     }
925     else if (eh < 0)
926     return (p = e.find(h, key)) != null ? p.val : null;
927     while ((e = e.next) != null) {
928     if (e.hash == h &&
929     ((ek = e.key) == key || (ek != null && key.equals(ek))))
930     return e.val;
931     }
932 dl 1.210 }
933 dl 1.222 return null;
934 dl 1.99 }
935    
936     /**
937 dl 1.222 * Tests if the specified object is a key in this table.
938     *
939     * @param key possible key
940     * @return {@code true} if and only if the specified object
941     * is a key in this table, as determined by the
942     * {@code equals} method; {@code false} otherwise
943     * @throws NullPointerException if the specified key is null
944 dl 1.99 */
945 dl 1.222 public boolean containsKey(Object key) {
946     return get(key) != null;
947 dl 1.99 }
948 tim 1.1
949 dl 1.201 /**
950 dl 1.222 * Returns {@code true} if this map maps one or more keys to the
951     * specified value. Note: This method may require a full traversal
952     * of the map, and is much slower than method {@code containsKey}.
953     *
954     * @param value value whose presence in this map is to be tested
955     * @return {@code true} if this map maps one or more keys to the
956     * specified value
957     * @throws NullPointerException if the specified value is null
958     */
959     public boolean containsValue(Object value) {
960     if (value == null)
961     throw new NullPointerException();
962     Node<K,V>[] t;
963     if ((t = table) != null) {
964     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
965     for (Node<K,V> p; (p = it.advance()) != null; ) {
966     V v;
967     if ((v = p.val) == value || (v != null && value.equals(v)))
968     return true;
969 dl 1.201 }
970     }
971 dl 1.222 return false;
972 dl 1.201 }
973    
974 tim 1.1 /**
975 dl 1.222 * Maps the specified key to the specified value in this table.
976     * Neither the key nor the value can be null.
977 dl 1.119 *
978 dl 1.222 * <p>The value can be retrieved by calling the {@code get} method
979     * with a key that is equal to the original key.
980 dl 1.119 *
981 dl 1.222 * @param key key with which the specified value is to be associated
982     * @param value value to be associated with the specified key
983     * @return the previous value associated with {@code key}, or
984     * {@code null} if there was no mapping for {@code key}
985     * @throws NullPointerException if the specified key or value is null
986 dl 1.119 */
987 dl 1.222 public V put(K key, V value) {
988     return putVal(key, value, false);
989     }
990 dl 1.119
991 dl 1.222 /** Implementation for put and putIfAbsent */
992     final V putVal(K key, V value, boolean onlyIfAbsent) {
993     if (key == null || value == null) throw new NullPointerException();
994     int hash = spread(key.hashCode());
995     int binCount = 0;
996     for (Node<K,V>[] tab = table;;) {
997     Node<K,V> f; int n, i, fh;
998     if (tab == null || (n = tab.length) == 0)
999     tab = initTable();
1000     else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
1001 jsr166 1.290 if (casTabAt(tab, i, null, new Node<K,V>(hash, key, value)))
1002 dl 1.222 break; // no lock when adding to empty bin
1003 dl 1.119 }
1004 dl 1.222 else if ((fh = f.hash) == MOVED)
1005     tab = helpTransfer(tab, f);
1006     else {
1007     V oldVal = null;
1008     synchronized (f) {
1009     if (tabAt(tab, i) == f) {
1010     if (fh >= 0) {
1011     binCount = 1;
1012     for (Node<K,V> e = f;; ++binCount) {
1013     K ek;
1014     if (e.hash == hash &&
1015     ((ek = e.key) == key ||
1016     (ek != null && key.equals(ek)))) {
1017     oldVal = e.val;
1018     if (!onlyIfAbsent)
1019     e.val = value;
1020     break;
1021     }
1022     Node<K,V> pred = e;
1023     if ((e = e.next) == null) {
1024 jsr166 1.290 pred.next = new Node<K,V>(hash, key, value);
1025 dl 1.222 break;
1026     }
1027     }
1028     }
1029     else if (f instanceof TreeBin) {
1030     Node<K,V> p;
1031     binCount = 2;
1032     if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
1033     value)) != null) {
1034     oldVal = p.val;
1035     if (!onlyIfAbsent)
1036     p.val = value;
1037     }
1038     }
1039 dl 1.259 else if (f instanceof ReservationNode)
1040     throw new IllegalStateException("Recursive update");
1041 dl 1.99 }
1042     }
1043 dl 1.222 if (binCount != 0) {
1044     if (binCount >= TREEIFY_THRESHOLD)
1045     treeifyBin(tab, i);
1046     if (oldVal != null)
1047     return oldVal;
1048 dl 1.99 break;
1049     }
1050     }
1051     }
1052 dl 1.222 addCount(1L, binCount);
1053     return null;
1054     }
1055    
1056     /**
1057     * Copies all of the mappings from the specified map to this one.
1058     * These mappings replace any mappings that this map had for any of the
1059     * keys currently in the specified map.
1060     *
1061     * @param m mappings to be stored in this map
1062     */
1063     public void putAll(Map<? extends K, ? extends V> m) {
1064     tryPresize(m.size());
1065     for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
1066     putVal(e.getKey(), e.getValue(), false);
1067     }
1068 dl 1.99
1069 dl 1.222 /**
1070     * Removes the key (and its corresponding value) from this map.
1071     * This method does nothing if the key is not in the map.
1072     *
1073     * @param key the key that needs to be removed
1074     * @return the previous value associated with {@code key}, or
1075     * {@code null} if there was no mapping for {@code key}
1076     * @throws NullPointerException if the specified key is null
1077     */
1078     public V remove(Object key) {
1079     return replaceNode(key, null, null);
1080     }
1081 dl 1.99
1082 dl 1.222 /**
1083     * Implementation for the four public remove/replace methods:
1084     * Replaces node value with v, conditional upon match of cv if
1085     * non-null. If resulting value is null, delete.
1086     */
1087     final V replaceNode(Object key, V value, Object cv) {
1088     int hash = spread(key.hashCode());
1089     for (Node<K,V>[] tab = table;;) {
1090     Node<K,V> f; int n, i, fh;
1091     if (tab == null || (n = tab.length) == 0 ||
1092     (f = tabAt(tab, i = (n - 1) & hash)) == null)
1093     break;
1094     else if ((fh = f.hash) == MOVED)
1095     tab = helpTransfer(tab, f);
1096     else {
1097     V oldVal = null;
1098     boolean validated = false;
1099     synchronized (f) {
1100     if (tabAt(tab, i) == f) {
1101     if (fh >= 0) {
1102     validated = true;
1103     for (Node<K,V> e = f, pred = null;;) {
1104     K ek;
1105     if (e.hash == hash &&
1106     ((ek = e.key) == key ||
1107     (ek != null && key.equals(ek)))) {
1108     V ev = e.val;
1109     if (cv == null || cv == ev ||
1110     (ev != null && cv.equals(ev))) {
1111     oldVal = ev;
1112     if (value != null)
1113     e.val = value;
1114     else if (pred != null)
1115     pred.next = e.next;
1116     else
1117     setTabAt(tab, i, e.next);
1118     }
1119     break;
1120 dl 1.119 }
1121 dl 1.222 pred = e;
1122     if ((e = e.next) == null)
1123     break;
1124 dl 1.119 }
1125     }
1126 dl 1.222 else if (f instanceof TreeBin) {
1127     validated = true;
1128     TreeBin<K,V> t = (TreeBin<K,V>)f;
1129     TreeNode<K,V> r, p;
1130     if ((r = t.root) != null &&
1131     (p = r.findTreeNode(hash, key, null)) != null) {
1132     V pv = p.val;
1133     if (cv == null || cv == pv ||
1134     (pv != null && cv.equals(pv))) {
1135     oldVal = pv;
1136     if (value != null)
1137     p.val = value;
1138     else if (t.removeTreeNode(p))
1139     setTabAt(tab, i, untreeify(t.first));
1140 dl 1.119 }
1141     }
1142 dl 1.99 }
1143 dl 1.259 else if (f instanceof ReservationNode)
1144     throw new IllegalStateException("Recursive update");
1145 dl 1.99 }
1146     }
1147 dl 1.222 if (validated) {
1148     if (oldVal != null) {
1149     if (value == null)
1150     addCount(-1L, -1);
1151     return oldVal;
1152     }
1153     break;
1154     }
1155 dl 1.99 }
1156     }
1157 dl 1.222 return null;
1158     }
1159 dl 1.45
1160 dl 1.222 /**
1161     * Removes all of the mappings from this map.
1162     */
1163     public void clear() {
1164     long delta = 0L; // negative number of deletions
1165     int i = 0;
1166     Node<K,V>[] tab = table;
1167     while (tab != null && i < tab.length) {
1168     int fh;
1169     Node<K,V> f = tabAt(tab, i);
1170     if (f == null)
1171     ++i;
1172     else if ((fh = f.hash) == MOVED) {
1173     tab = helpTransfer(tab, f);
1174     i = 0; // restart
1175 dl 1.217 }
1176 dl 1.222 else {
1177     synchronized (f) {
1178     if (tabAt(tab, i) == f) {
1179     Node<K,V> p = (fh >= 0 ? f :
1180     (f instanceof TreeBin) ?
1181     ((TreeBin<K,V>)f).first : null);
1182     while (p != null) {
1183     --delta;
1184     p = p.next;
1185 dl 1.119 }
1186 dl 1.222 setTabAt(tab, i++, null);
1187 dl 1.119 }
1188 dl 1.45 }
1189 dl 1.4 }
1190 dl 1.217 }
1191 dl 1.222 if (delta != 0L)
1192     addCount(delta, -1);
1193     }
1194 dl 1.217
1195 dl 1.222 /**
1196     * Returns a {@link Set} view of the keys contained in this map.
1197     * The set is backed by the map, so changes to the map are
1198     * reflected in the set, and vice-versa. The set supports element
1199     * removal, which removes the corresponding mapping from this map,
1200     * via the {@code Iterator.remove}, {@code Set.remove},
1201     * {@code removeAll}, {@code retainAll}, and {@code clear}
1202     * operations. It does not support the {@code add} or
1203     * {@code addAll} operations.
1204     *
1205 jsr166 1.242 * <p>The view's iterators and spliterators are
1206     * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1207 dl 1.222 *
1208 jsr166 1.241 * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT},
1209     * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}.
1210     *
1211 dl 1.222 * @return the set view
1212     */
1213     public KeySetView<K,V> keySet() {
1214     KeySetView<K,V> ks;
1215 jsr166 1.292 if ((ks = keySet) != null) return ks;
1216     return keySet = new KeySetView<K,V>(this, null);
1217 dl 1.222 }
1218 dl 1.217
1219 dl 1.222 /**
1220     * Returns a {@link Collection} view of the values contained in this map.
1221     * The collection is backed by the map, so changes to the map are
1222     * reflected in the collection, and vice-versa. The collection
1223     * supports element removal, which removes the corresponding
1224     * mapping from this map, via the {@code Iterator.remove},
1225     * {@code Collection.remove}, {@code removeAll},
1226     * {@code retainAll}, and {@code clear} operations. It does not
1227     * support the {@code add} or {@code addAll} operations.
1228     *
1229 jsr166 1.242 * <p>The view's iterators and spliterators are
1230     * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1231 dl 1.222 *
1232 jsr166 1.241 * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT}
1233     * and {@link Spliterator#NONNULL}.
1234     *
1235 dl 1.222 * @return the collection view
1236     */
1237     public Collection<V> values() {
1238     ValuesView<K,V> vs;
1239 jsr166 1.292 if ((vs = values) != null) return vs;
1240     return values = new ValuesView<K,V>(this);
1241 tim 1.1 }
1242    
1243 dl 1.99 /**
1244 dl 1.222 * Returns a {@link Set} view of the mappings contained in this map.
1245     * The set is backed by the map, so changes to the map are
1246     * reflected in the set, and vice-versa. The set supports element
1247     * removal, which removes the corresponding mapping from the map,
1248     * via the {@code Iterator.remove}, {@code Set.remove},
1249     * {@code removeAll}, {@code retainAll}, and {@code clear}
1250     * operations.
1251     *
1252 jsr166 1.242 * <p>The view's iterators and spliterators are
1253     * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1254 dl 1.222 *
1255 jsr166 1.241 * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT},
1256     * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}.
1257     *
1258 dl 1.222 * @return the set view
1259 dl 1.119 */
1260 dl 1.222 public Set<Map.Entry<K,V>> entrySet() {
1261     EntrySetView<K,V> es;
1262 jsr166 1.292 if ((es = entrySet) != null) return es;
1263     return entrySet = new EntrySetView<K,V>(this);
1264 dl 1.99 }
1265    
1266     /**
1267 dl 1.222 * Returns the hash code value for this {@link Map}, i.e.,
1268     * the sum of, for each key-value pair in the map,
1269     * {@code key.hashCode() ^ value.hashCode()}.
1270     *
1271     * @return the hash code value for this map
1272 dl 1.119 */
1273 dl 1.222 public int hashCode() {
1274     int h = 0;
1275     Node<K,V>[] t;
1276     if ((t = table) != null) {
1277     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1278     for (Node<K,V> p; (p = it.advance()) != null; )
1279     h += p.key.hashCode() ^ p.val.hashCode();
1280 dl 1.119 }
1281 dl 1.222 return h;
1282 dl 1.99 }
1283 tim 1.11
1284 dl 1.222 /**
1285     * Returns a string representation of this map. The string
1286     * representation consists of a list of key-value mappings (in no
1287     * particular order) enclosed in braces ("{@code {}}"). Adjacent
1288     * mappings are separated by the characters {@code ", "} (comma
1289     * and space). Each key-value mapping is rendered as the key
1290     * followed by an equals sign ("{@code =}") followed by the
1291     * associated value.
1292     *
1293     * @return a string representation of this map
1294     */
1295     public String toString() {
1296     Node<K,V>[] t;
1297     int f = (t = table) == null ? 0 : t.length;
1298     Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1299     StringBuilder sb = new StringBuilder();
1300     sb.append('{');
1301     Node<K,V> p;
1302     if ((p = it.advance()) != null) {
1303 dl 1.210 for (;;) {
1304 dl 1.222 K k = p.key;
1305     V v = p.val;
1306     sb.append(k == this ? "(this Map)" : k);
1307     sb.append('=');
1308     sb.append(v == this ? "(this Map)" : v);
1309     if ((p = it.advance()) == null)
1310 dl 1.210 break;
1311 dl 1.222 sb.append(',').append(' ');
1312 dl 1.119 }
1313     }
1314 dl 1.222 return sb.append('}').toString();
1315 tim 1.1 }
1316    
1317     /**
1318 dl 1.222 * Compares the specified object with this map for equality.
1319     * Returns {@code true} if the given object is a map with the same
1320     * mappings as this map. This operation may return misleading
1321     * results if either map is concurrently modified during execution
1322     * of this method.
1323     *
1324     * @param o object to be compared for equality with this map
1325     * @return {@code true} if the specified object is equal to this map
1326 dl 1.119 */
1327 dl 1.222 public boolean equals(Object o) {
1328     if (o != this) {
1329     if (!(o instanceof Map))
1330     return false;
1331     Map<?,?> m = (Map<?,?>) o;
1332     Node<K,V>[] t;
1333     int f = (t = table) == null ? 0 : t.length;
1334     Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1335     for (Node<K,V> p; (p = it.advance()) != null; ) {
1336     V val = p.val;
1337     Object v = m.get(p.key);
1338     if (v == null || (v != val && !v.equals(val)))
1339     return false;
1340 dl 1.119 }
1341 dl 1.222 for (Map.Entry<?,?> e : m.entrySet()) {
1342     Object mk, mv, v;
1343     if ((mk = e.getKey()) == null ||
1344     (mv = e.getValue()) == null ||
1345     (v = get(mk)) == null ||
1346     (mv != v && !mv.equals(v)))
1347     return false;
1348     }
1349     }
1350     return true;
1351     }
1352    
1353     /**
1354     * Stripped-down version of helper class used in previous version,
1355 jsr166 1.285 * declared for the sake of serialization compatibility.
1356 dl 1.222 */
1357     static class Segment<K,V> extends ReentrantLock implements Serializable {
1358     private static final long serialVersionUID = 2249069246763182397L;
1359     final float loadFactor;
1360     Segment(float lf) { this.loadFactor = lf; }
1361     }
1362    
1363     /**
1364     * Saves the state of the {@code ConcurrentHashMap} instance to a
1365     * stream (i.e., serializes it).
1366     * @param s the stream
1367 jsr166 1.238 * @throws java.io.IOException if an I/O error occurs
1368 dl 1.222 * @serialData
1369 jsr166 1.287 * the serialized fields, followed by the key (Object) and value
1370     * (Object) for each key-value mapping, followed by a null pair.
1371 dl 1.222 * The key-value mappings are emitted in no particular order.
1372     */
1373     private void writeObject(java.io.ObjectOutputStream s)
1374     throws java.io.IOException {
1375     // For serialization compatibility
1376     // Emulate segment calculation from previous version of this class
1377     int sshift = 0;
1378     int ssize = 1;
1379     while (ssize < DEFAULT_CONCURRENCY_LEVEL) {
1380     ++sshift;
1381     ssize <<= 1;
1382     }
1383     int segmentShift = 32 - sshift;
1384     int segmentMask = ssize - 1;
1385 dl 1.246 @SuppressWarnings("unchecked")
1386     Segment<K,V>[] segments = (Segment<K,V>[])
1387 dl 1.222 new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
1388     for (int i = 0; i < segments.length; ++i)
1389     segments[i] = new Segment<K,V>(LOAD_FACTOR);
1390 jsr166 1.270 java.io.ObjectOutputStream.PutField streamFields = s.putFields();
1391     streamFields.put("segments", segments);
1392     streamFields.put("segmentShift", segmentShift);
1393     streamFields.put("segmentMask", segmentMask);
1394 dl 1.222 s.writeFields();
1395    
1396     Node<K,V>[] t;
1397     if ((t = table) != null) {
1398     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1399     for (Node<K,V> p; (p = it.advance()) != null; ) {
1400     s.writeObject(p.key);
1401     s.writeObject(p.val);
1402     }
1403     }
1404     s.writeObject(null);
1405     s.writeObject(null);
1406     }
1407    
1408     /**
1409     * Reconstitutes the instance from a stream (that is, deserializes it).
1410     * @param s the stream
1411 jsr166 1.238 * @throws ClassNotFoundException if the class of a serialized object
1412     * could not be found
1413     * @throws java.io.IOException if an I/O error occurs
1414 dl 1.222 */
1415     private void readObject(java.io.ObjectInputStream s)
1416     throws java.io.IOException, ClassNotFoundException {
1417     /*
1418     * To improve performance in typical cases, we create nodes
1419     * while reading, then place in table once size is known.
1420     * However, we must also validate uniqueness and deal with
1421     * overpopulated bins while doing so, which requires
1422     * specialized versions of putVal mechanics.
1423     */
1424     sizeCtl = -1; // force exclusion for table construction
1425     s.defaultReadObject();
1426     long size = 0L;
1427     Node<K,V> p = null;
1428     for (;;) {
1429 dl 1.246 @SuppressWarnings("unchecked")
1430     K k = (K) s.readObject();
1431     @SuppressWarnings("unchecked")
1432     V v = (V) s.readObject();
1433 dl 1.222 if (k != null && v != null) {
1434     p = new Node<K,V>(spread(k.hashCode()), k, v, p);
1435     ++size;
1436     }
1437     else
1438     break;
1439     }
1440     if (size == 0L)
1441     sizeCtl = 0;
1442     else {
1443     int n;
1444     if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
1445     n = MAXIMUM_CAPACITY;
1446 dl 1.149 else {
1447 dl 1.222 int sz = (int)size;
1448     n = tableSizeFor(sz + (sz >>> 1) + 1);
1449     }
1450 jsr166 1.245 @SuppressWarnings("unchecked")
1451 dl 1.246 Node<K,V>[] tab = (Node<K,V>[])new Node<?,?>[n];
1452 dl 1.222 int mask = n - 1;
1453     long added = 0L;
1454     while (p != null) {
1455     boolean insertAtFront;
1456     Node<K,V> next = p.next, first;
1457     int h = p.hash, j = h & mask;
1458     if ((first = tabAt(tab, j)) == null)
1459     insertAtFront = true;
1460     else {
1461     K k = p.key;
1462     if (first.hash < 0) {
1463     TreeBin<K,V> t = (TreeBin<K,V>)first;
1464     if (t.putTreeVal(h, k, p.val) == null)
1465     ++added;
1466     insertAtFront = false;
1467     }
1468     else {
1469     int binCount = 0;
1470     insertAtFront = true;
1471     Node<K,V> q; K qk;
1472     for (q = first; q != null; q = q.next) {
1473     if (q.hash == h &&
1474     ((qk = q.key) == k ||
1475     (qk != null && k.equals(qk)))) {
1476     insertAtFront = false;
1477 dl 1.119 break;
1478     }
1479 dl 1.222 ++binCount;
1480     }
1481     if (insertAtFront && binCount >= TREEIFY_THRESHOLD) {
1482     insertAtFront = false;
1483     ++added;
1484     p.next = first;
1485     TreeNode<K,V> hd = null, tl = null;
1486     for (q = p; q != null; q = q.next) {
1487     TreeNode<K,V> t = new TreeNode<K,V>
1488     (q.hash, q.key, q.val, null, null);
1489     if ((t.prev = tl) == null)
1490     hd = t;
1491     else
1492     tl.next = t;
1493     tl = t;
1494     }
1495     setTabAt(tab, j, new TreeBin<K,V>(hd));
1496 dl 1.119 }
1497     }
1498     }
1499 dl 1.222 if (insertAtFront) {
1500     ++added;
1501     p.next = first;
1502     setTabAt(tab, j, p);
1503 dl 1.119 }
1504 dl 1.222 p = next;
1505 dl 1.119 }
1506 dl 1.222 table = tab;
1507     sizeCtl = n - (n >>> 2);
1508     baseCount = added;
1509 dl 1.119 }
1510 dl 1.55 }
1511    
1512 dl 1.222 // ConcurrentMap methods
1513    
1514     /**
1515     * {@inheritDoc}
1516     *
1517     * @return the previous value associated with the specified key,
1518     * or {@code null} if there was no mapping for the key
1519     * @throws NullPointerException if the specified key or value is null
1520     */
1521     public V putIfAbsent(K key, V value) {
1522     return putVal(key, value, true);
1523     }
1524    
1525     /**
1526     * {@inheritDoc}
1527     *
1528     * @throws NullPointerException if the specified key is null
1529     */
1530     public boolean remove(Object key, Object value) {
1531     if (key == null)
1532     throw new NullPointerException();
1533     return value != null && replaceNode(key, null, value) != null;
1534     }
1535    
1536     /**
1537     * {@inheritDoc}
1538     *
1539     * @throws NullPointerException if any of the arguments are null
1540     */
1541     public boolean replace(K key, V oldValue, V newValue) {
1542     if (key == null || oldValue == null || newValue == null)
1543     throw new NullPointerException();
1544     return replaceNode(key, newValue, oldValue) != null;
1545     }
1546    
1547     /**
1548     * {@inheritDoc}
1549     *
1550     * @return the previous value associated with the specified key,
1551     * or {@code null} if there was no mapping for the key
1552     * @throws NullPointerException if the specified key or value is null
1553     */
1554     public V replace(K key, V value) {
1555     if (key == null || value == null)
1556     throw new NullPointerException();
1557     return replaceNode(key, value, null);
1558     }
1559    
1560     // Overrides of JDK8+ Map extension method defaults
1561    
1562     /**
1563     * Returns the value to which the specified key is mapped, or the
1564     * given default value if this map contains no mapping for the
1565     * key.
1566     *
1567     * @param key the key whose associated value is to be returned
1568     * @param defaultValue the value to return if this map contains
1569     * no mapping for the given key
1570     * @return the mapping for the key, if present; else the default value
1571     * @throws NullPointerException if the specified key is null
1572 tim 1.1 */
1573 dl 1.222 public V getOrDefault(Object key, V defaultValue) {
1574     V v;
1575     return (v = get(key)) == null ? defaultValue : v;
1576     }
1577 tim 1.1
1578 dl 1.222 public void forEach(BiConsumer<? super K, ? super V> action) {
1579     if (action == null) throw new NullPointerException();
1580     Node<K,V>[] t;
1581     if ((t = table) != null) {
1582     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1583     for (Node<K,V> p; (p = it.advance()) != null; ) {
1584     action.accept(p.key, p.val);
1585 dl 1.99 }
1586 dl 1.222 }
1587     }
1588    
1589     public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
1590     if (function == null) throw new NullPointerException();
1591     Node<K,V>[] t;
1592     if ((t = table) != null) {
1593     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1594     for (Node<K,V> p; (p = it.advance()) != null; ) {
1595     V oldValue = p.val;
1596     for (K key = p.key;;) {
1597     V newValue = function.apply(key, oldValue);
1598     if (newValue == null)
1599     throw new NullPointerException();
1600     if (replaceNode(key, newValue, oldValue) != null ||
1601     (oldValue = get(key)) == null)
1602 dl 1.119 break;
1603     }
1604     }
1605 dl 1.45 }
1606 dl 1.21 }
1607    
1608 dl 1.222 /**
1609 jsr166 1.285 * Helper method for EntrySetView.removeIf.
1610 dl 1.271 */
1611 jsr166 1.273 boolean removeEntryIf(Predicate<? super Entry<K,V>> function) {
1612 dl 1.271 if (function == null) throw new NullPointerException();
1613     Node<K,V>[] t;
1614     boolean removed = false;
1615     if ((t = table) != null) {
1616     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1617     for (Node<K,V> p; (p = it.advance()) != null; ) {
1618     K k = p.key;
1619     V v = p.val;
1620     Map.Entry<K,V> e = new AbstractMap.SimpleImmutableEntry<>(k, v);
1621     if (function.test(e) && replaceNode(k, null, v) != null)
1622     removed = true;
1623     }
1624     }
1625     return removed;
1626     }
1627    
1628     /**
1629 jsr166 1.285 * Helper method for ValuesView.removeIf.
1630 dl 1.272 */
1631 jsr166 1.276 boolean removeValueIf(Predicate<? super V> function) {
1632 dl 1.272 if (function == null) throw new NullPointerException();
1633     Node<K,V>[] t;
1634     boolean removed = false;
1635     if ((t = table) != null) {
1636     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1637     for (Node<K,V> p; (p = it.advance()) != null; ) {
1638     K k = p.key;
1639     V v = p.val;
1640     if (function.test(v) && replaceNode(k, null, v) != null)
1641     removed = true;
1642     }
1643     }
1644     return removed;
1645     }
1646    
1647     /**
1648 dl 1.222 * If the specified key is not already associated with a value,
1649     * attempts to compute its value using the given mapping function
1650     * and enters it into this map unless {@code null}. The entire
1651     * method invocation is performed atomically, so the function is
1652     * applied at most once per key. Some attempted update operations
1653     * on this map by other threads may be blocked while computation
1654     * is in progress, so the computation should be short and simple,
1655     * and must not attempt to update any other mappings of this map.
1656     *
1657     * @param key key with which the specified value is to be associated
1658     * @param mappingFunction the function to compute a value
1659     * @return the current (existing or computed) value associated with
1660     * the specified key, or null if the computed value is null
1661     * @throws NullPointerException if the specified key or mappingFunction
1662     * is null
1663     * @throws IllegalStateException if the computation detectably
1664     * attempts a recursive update to this map that would
1665     * otherwise never complete
1666     * @throws RuntimeException or Error if the mappingFunction does so,
1667     * in which case the mapping is left unestablished
1668     */
1669     public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) {
1670     if (key == null || mappingFunction == null)
1671 dl 1.149 throw new NullPointerException();
1672 dl 1.222 int h = spread(key.hashCode());
1673 dl 1.151 V val = null;
1674 dl 1.222 int binCount = 0;
1675 dl 1.210 for (Node<K,V>[] tab = table;;) {
1676 dl 1.222 Node<K,V> f; int n, i, fh;
1677     if (tab == null || (n = tab.length) == 0)
1678 dl 1.119 tab = initTable();
1679 dl 1.222 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1680     Node<K,V> r = new ReservationNode<K,V>();
1681     synchronized (r) {
1682     if (casTabAt(tab, i, null, r)) {
1683     binCount = 1;
1684     Node<K,V> node = null;
1685 dl 1.149 try {
1686 dl 1.222 if ((val = mappingFunction.apply(key)) != null)
1687 jsr166 1.290 node = new Node<K,V>(h, key, val);
1688 dl 1.149 } finally {
1689 dl 1.222 setTabAt(tab, i, node);
1690 dl 1.119 }
1691     }
1692     }
1693 dl 1.222 if (binCount != 0)
1694 dl 1.119 break;
1695     }
1696 dl 1.222 else if ((fh = f.hash) == MOVED)
1697     tab = helpTransfer(tab, f);
1698 dl 1.119 else {
1699 dl 1.149 boolean added = false;
1700 jsr166 1.150 synchronized (f) {
1701 dl 1.149 if (tabAt(tab, i) == f) {
1702 dl 1.222 if (fh >= 0) {
1703     binCount = 1;
1704     for (Node<K,V> e = f;; ++binCount) {
1705 dl 1.266 K ek;
1706 dl 1.222 if (e.hash == h &&
1707     ((ek = e.key) == key ||
1708     (ek != null && key.equals(ek)))) {
1709     val = e.val;
1710     break;
1711     }
1712     Node<K,V> pred = e;
1713     if ((e = e.next) == null) {
1714     if ((val = mappingFunction.apply(key)) != null) {
1715 dl 1.259 if (pred.next != null)
1716     throw new IllegalStateException("Recursive update");
1717 dl 1.222 added = true;
1718 jsr166 1.290 pred.next = new Node<K,V>(h, key, val);
1719 dl 1.222 }
1720     break;
1721     }
1722 dl 1.149 }
1723 dl 1.222 }
1724     else if (f instanceof TreeBin) {
1725     binCount = 2;
1726     TreeBin<K,V> t = (TreeBin<K,V>)f;
1727     TreeNode<K,V> r, p;
1728     if ((r = t.root) != null &&
1729     (p = r.findTreeNode(h, key, null)) != null)
1730     val = p.val;
1731     else if ((val = mappingFunction.apply(key)) != null) {
1732     added = true;
1733     t.putTreeVal(h, key, val);
1734 dl 1.119 }
1735     }
1736 dl 1.259 else if (f instanceof ReservationNode)
1737     throw new IllegalStateException("Recursive update");
1738 dl 1.119 }
1739 dl 1.149 }
1740 dl 1.222 if (binCount != 0) {
1741     if (binCount >= TREEIFY_THRESHOLD)
1742     treeifyBin(tab, i);
1743 dl 1.149 if (!added)
1744 dl 1.151 return val;
1745 dl 1.149 break;
1746 dl 1.119 }
1747 dl 1.105 }
1748 dl 1.99 }
1749 dl 1.149 if (val != null)
1750 dl 1.222 addCount(1L, binCount);
1751 dl 1.151 return val;
1752 tim 1.1 }
1753    
1754 dl 1.222 /**
1755     * If the value for the specified key is present, attempts to
1756     * compute a new mapping given the key and its current mapped
1757     * value. The entire method invocation is performed atomically.
1758     * Some attempted update operations on this map by other threads
1759     * may be blocked while computation is in progress, so the
1760     * computation should be short and simple, and must not attempt to
1761     * update any other mappings of this map.
1762     *
1763     * @param key key with which a value may be associated
1764     * @param remappingFunction the function to compute a value
1765     * @return the new value associated with the specified key, or null if none
1766     * @throws NullPointerException if the specified key or remappingFunction
1767     * is null
1768     * @throws IllegalStateException if the computation detectably
1769     * attempts a recursive update to this map that would
1770     * otherwise never complete
1771     * @throws RuntimeException or Error if the remappingFunction does so,
1772     * in which case the mapping is unchanged
1773     */
1774     public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1775     if (key == null || remappingFunction == null)
1776 dl 1.149 throw new NullPointerException();
1777 dl 1.222 int h = spread(key.hashCode());
1778 dl 1.151 V val = null;
1779 dl 1.119 int delta = 0;
1780 dl 1.222 int binCount = 0;
1781 dl 1.210 for (Node<K,V>[] tab = table;;) {
1782 dl 1.222 Node<K,V> f; int n, i, fh;
1783     if (tab == null || (n = tab.length) == 0)
1784 dl 1.119 tab = initTable();
1785 dl 1.222 else if ((f = tabAt(tab, i = (n - 1) & h)) == null)
1786     break;
1787     else if ((fh = f.hash) == MOVED)
1788     tab = helpTransfer(tab, f);
1789     else {
1790     synchronized (f) {
1791     if (tabAt(tab, i) == f) {
1792     if (fh >= 0) {
1793     binCount = 1;
1794     for (Node<K,V> e = f, pred = null;; ++binCount) {
1795     K ek;
1796     if (e.hash == h &&
1797     ((ek = e.key) == key ||
1798     (ek != null && key.equals(ek)))) {
1799     val = remappingFunction.apply(key, e.val);
1800     if (val != null)
1801     e.val = val;
1802 dl 1.210 else {
1803 dl 1.222 delta = -1;
1804     Node<K,V> en = e.next;
1805     if (pred != null)
1806     pred.next = en;
1807     else
1808     setTabAt(tab, i, en);
1809 dl 1.210 }
1810 dl 1.222 break;
1811 dl 1.210 }
1812 dl 1.222 pred = e;
1813     if ((e = e.next) == null)
1814     break;
1815 dl 1.119 }
1816     }
1817 dl 1.222 else if (f instanceof TreeBin) {
1818     binCount = 2;
1819     TreeBin<K,V> t = (TreeBin<K,V>)f;
1820     TreeNode<K,V> r, p;
1821     if ((r = t.root) != null &&
1822     (p = r.findTreeNode(h, key, null)) != null) {
1823     val = remappingFunction.apply(key, p.val);
1824 dl 1.119 if (val != null)
1825 dl 1.222 p.val = val;
1826 dl 1.119 else {
1827     delta = -1;
1828 dl 1.222 if (t.removeTreeNode(p))
1829     setTabAt(tab, i, untreeify(t.first));
1830 dl 1.119 }
1831     }
1832     }
1833 dl 1.259 else if (f instanceof ReservationNode)
1834     throw new IllegalStateException("Recursive update");
1835 dl 1.119 }
1836     }
1837 dl 1.222 if (binCount != 0)
1838 dl 1.119 break;
1839     }
1840     }
1841 dl 1.149 if (delta != 0)
1842 dl 1.222 addCount((long)delta, binCount);
1843 dl 1.151 return val;
1844 dl 1.119 }
1845    
1846 dl 1.222 /**
1847     * Attempts to compute a mapping for the specified key and its
1848     * current mapped value (or {@code null} if there is no current
1849     * mapping). The entire method invocation is performed atomically.
1850     * Some attempted update operations on this map by other threads
1851     * may be blocked while computation is in progress, so the
1852     * computation should be short and simple, and must not attempt to
1853     * update any other mappings of this Map.
1854     *
1855     * @param key key with which the specified value is to be associated
1856     * @param remappingFunction the function to compute a value
1857     * @return the new value associated with the specified key, or null if none
1858     * @throws NullPointerException if the specified key or remappingFunction
1859     * is null
1860     * @throws IllegalStateException if the computation detectably
1861     * attempts a recursive update to this map that would
1862     * otherwise never complete
1863     * @throws RuntimeException or Error if the remappingFunction does so,
1864     * in which case the mapping is unchanged
1865     */
1866     public V compute(K key,
1867     BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1868     if (key == null || remappingFunction == null)
1869 dl 1.149 throw new NullPointerException();
1870 dl 1.222 int h = spread(key.hashCode());
1871 dl 1.151 V val = null;
1872 dl 1.119 int delta = 0;
1873 dl 1.222 int binCount = 0;
1874 dl 1.210 for (Node<K,V>[] tab = table;;) {
1875 dl 1.222 Node<K,V> f; int n, i, fh;
1876     if (tab == null || (n = tab.length) == 0)
1877 dl 1.119 tab = initTable();
1878 dl 1.222 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1879     Node<K,V> r = new ReservationNode<K,V>();
1880     synchronized (r) {
1881     if (casTabAt(tab, i, null, r)) {
1882     binCount = 1;
1883     Node<K,V> node = null;
1884     try {
1885     if ((val = remappingFunction.apply(key, null)) != null) {
1886     delta = 1;
1887 jsr166 1.290 node = new Node<K,V>(h, key, val);
1888 dl 1.222 }
1889     } finally {
1890     setTabAt(tab, i, node);
1891     }
1892     }
1893     }
1894     if (binCount != 0)
1895 dl 1.119 break;
1896     }
1897 dl 1.222 else if ((fh = f.hash) == MOVED)
1898     tab = helpTransfer(tab, f);
1899     else {
1900     synchronized (f) {
1901     if (tabAt(tab, i) == f) {
1902     if (fh >= 0) {
1903     binCount = 1;
1904     for (Node<K,V> e = f, pred = null;; ++binCount) {
1905     K ek;
1906     if (e.hash == h &&
1907     ((ek = e.key) == key ||
1908     (ek != null && key.equals(ek)))) {
1909     val = remappingFunction.apply(key, e.val);
1910     if (val != null)
1911     e.val = val;
1912     else {
1913     delta = -1;
1914     Node<K,V> en = e.next;
1915     if (pred != null)
1916     pred.next = en;
1917     else
1918     setTabAt(tab, i, en);
1919     }
1920     break;
1921     }
1922     pred = e;
1923     if ((e = e.next) == null) {
1924     val = remappingFunction.apply(key, null);
1925     if (val != null) {
1926 dl 1.259 if (pred.next != null)
1927     throw new IllegalStateException("Recursive update");
1928 dl 1.222 delta = 1;
1929 jsr166 1.290 pred.next = new Node<K,V>(h, key, val);
1930 dl 1.222 }
1931     break;
1932     }
1933     }
1934     }
1935     else if (f instanceof TreeBin) {
1936     binCount = 1;
1937     TreeBin<K,V> t = (TreeBin<K,V>)f;
1938     TreeNode<K,V> r, p;
1939     if ((r = t.root) != null)
1940     p = r.findTreeNode(h, key, null);
1941     else
1942     p = null;
1943     V pv = (p == null) ? null : p.val;
1944     val = remappingFunction.apply(key, pv);
1945 dl 1.119 if (val != null) {
1946     if (p != null)
1947     p.val = val;
1948     else {
1949     delta = 1;
1950 dl 1.222 t.putTreeVal(h, key, val);
1951 dl 1.119 }
1952     }
1953     else if (p != null) {
1954     delta = -1;
1955 dl 1.222 if (t.removeTreeNode(p))
1956     setTabAt(tab, i, untreeify(t.first));
1957 dl 1.119 }
1958     }
1959 dl 1.259 else if (f instanceof ReservationNode)
1960     throw new IllegalStateException("Recursive update");
1961 dl 1.119 }
1962     }
1963 dl 1.222 if (binCount != 0) {
1964     if (binCount >= TREEIFY_THRESHOLD)
1965     treeifyBin(tab, i);
1966     break;
1967 dl 1.119 }
1968 dl 1.105 }
1969 dl 1.99 }
1970 dl 1.149 if (delta != 0)
1971 dl 1.222 addCount((long)delta, binCount);
1972 dl 1.151 return val;
1973 dl 1.119 }
1974    
1975 dl 1.222 /**
1976     * If the specified key is not already associated with a
1977     * (non-null) value, associates it with the given value.
1978     * Otherwise, replaces the value with the results of the given
1979     * remapping function, or removes if {@code null}. The entire
1980     * method invocation is performed atomically. Some attempted
1981     * update operations on this map by other threads may be blocked
1982     * while computation is in progress, so the computation should be
1983     * short and simple, and must not attempt to update any other
1984     * mappings of this Map.
1985     *
1986     * @param key key with which the specified value is to be associated
1987     * @param value the value to use if absent
1988     * @param remappingFunction the function to recompute a value if present
1989     * @return the new value associated with the specified key, or null if none
1990     * @throws NullPointerException if the specified key or the
1991     * remappingFunction is null
1992     * @throws RuntimeException or Error if the remappingFunction does so,
1993     * in which case the mapping is unchanged
1994     */
1995     public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
1996     if (key == null || value == null || remappingFunction == null)
1997     throw new NullPointerException();
1998     int h = spread(key.hashCode());
1999     V val = null;
2000     int delta = 0;
2001     int binCount = 0;
2002     for (Node<K,V>[] tab = table;;) {
2003     Node<K,V> f; int n, i, fh;
2004     if (tab == null || (n = tab.length) == 0)
2005     tab = initTable();
2006     else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
2007 jsr166 1.290 if (casTabAt(tab, i, null, new Node<K,V>(h, key, value))) {
2008 dl 1.222 delta = 1;
2009     val = value;
2010 dl 1.119 break;
2011     }
2012 dl 1.222 }
2013     else if ((fh = f.hash) == MOVED)
2014     tab = helpTransfer(tab, f);
2015     else {
2016     synchronized (f) {
2017     if (tabAt(tab, i) == f) {
2018     if (fh >= 0) {
2019     binCount = 1;
2020     for (Node<K,V> e = f, pred = null;; ++binCount) {
2021     K ek;
2022     if (e.hash == h &&
2023     ((ek = e.key) == key ||
2024     (ek != null && key.equals(ek)))) {
2025     val = remappingFunction.apply(e.val, value);
2026     if (val != null)
2027     e.val = val;
2028 dl 1.119 else {
2029 dl 1.222 delta = -1;
2030     Node<K,V> en = e.next;
2031     if (pred != null)
2032     pred.next = en;
2033     else
2034     setTabAt(tab, i, en);
2035 dl 1.119 }
2036 dl 1.222 break;
2037     }
2038     pred = e;
2039     if ((e = e.next) == null) {
2040     delta = 1;
2041     val = value;
2042 jsr166 1.290 pred.next = new Node<K,V>(h, key, val);
2043 dl 1.222 break;
2044 dl 1.119 }
2045     }
2046     }
2047 dl 1.222 else if (f instanceof TreeBin) {
2048     binCount = 2;
2049     TreeBin<K,V> t = (TreeBin<K,V>)f;
2050     TreeNode<K,V> r = t.root;
2051     TreeNode<K,V> p = (r == null) ? null :
2052     r.findTreeNode(h, key, null);
2053     val = (p == null) ? value :
2054     remappingFunction.apply(p.val, value);
2055     if (val != null) {
2056     if (p != null)
2057     p.val = val;
2058     else {
2059     delta = 1;
2060     t.putTreeVal(h, key, val);
2061 dl 1.119 }
2062     }
2063 dl 1.222 else if (p != null) {
2064     delta = -1;
2065     if (t.removeTreeNode(p))
2066     setTabAt(tab, i, untreeify(t.first));
2067 dl 1.164 }
2068 dl 1.99 }
2069 dl 1.259 else if (f instanceof ReservationNode)
2070     throw new IllegalStateException("Recursive update");
2071 dl 1.21 }
2072     }
2073 dl 1.222 if (binCount != 0) {
2074     if (binCount >= TREEIFY_THRESHOLD)
2075     treeifyBin(tab, i);
2076     break;
2077     }
2078 dl 1.45 }
2079     }
2080 dl 1.222 if (delta != 0)
2081     addCount((long)delta, binCount);
2082     return val;
2083 tim 1.1 }
2084 dl 1.19
2085 dl 1.222 // Hashtable legacy methods
2086    
2087 dl 1.149 /**
2088 jsr166 1.275 * Tests if some key maps into the specified value in this table.
2089 jsr166 1.250 *
2090 jsr166 1.275 * <p>Note that this method is identical in functionality to
2091 dl 1.222 * {@link #containsValue(Object)}, and exists solely to ensure
2092     * full compatibility with class {@link java.util.Hashtable},
2093 jsr166 1.281 * which supported this method prior to introduction of the
2094     * Java Collections Framework.
2095 dl 1.222 *
2096     * @param value a value to search for
2097     * @return {@code true} if and only if some key maps to the
2098     * {@code value} argument in this table as
2099     * determined by the {@code equals} method;
2100     * {@code false} otherwise
2101     * @throws NullPointerException if the specified value is null
2102 dl 1.149 */
2103 dl 1.246 public boolean contains(Object value) {
2104 dl 1.222 return containsValue(value);
2105 dl 1.149 }
2106    
2107 tim 1.1 /**
2108 dl 1.222 * Returns an enumeration of the keys in this table.
2109     *
2110     * @return an enumeration of the keys in this table
2111     * @see #keySet()
2112 tim 1.1 */
2113 dl 1.222 public Enumeration<K> keys() {
2114     Node<K,V>[] t;
2115     int f = (t = table) == null ? 0 : t.length;
2116     return new KeyIterator<K,V>(t, f, 0, f, this);
2117 tim 1.1 }
2118    
2119     /**
2120 dl 1.222 * Returns an enumeration of the values in this table.
2121     *
2122     * @return an enumeration of the values in this table
2123     * @see #values()
2124     */
2125     public Enumeration<V> elements() {
2126     Node<K,V>[] t;
2127     int f = (t = table) == null ? 0 : t.length;
2128     return new ValueIterator<K,V>(t, f, 0, f, this);
2129     }
2130    
2131     // ConcurrentHashMap-only methods
2132    
2133     /**
2134     * Returns the number of mappings. This method should be used
2135     * instead of {@link #size} because a ConcurrentHashMap may
2136     * contain more mappings than can be represented as an int. The
2137     * value returned is an estimate; the actual count may differ if
2138     * there are concurrent insertions or removals.
2139     *
2140     * @return the number of mappings
2141     * @since 1.8
2142     */
2143     public long mappingCount() {
2144     long n = sumCount();
2145     return (n < 0L) ? 0L : n; // ignore transient negative values
2146     }
2147    
2148     /**
2149     * Creates a new {@link Set} backed by a ConcurrentHashMap
2150     * from the given type to {@code Boolean.TRUE}.
2151     *
2152 jsr166 1.237 * @param <K> the element type of the returned set
2153 dl 1.222 * @return the new set
2154     * @since 1.8
2155     */
2156     public static <K> KeySetView<K,Boolean> newKeySet() {
2157     return new KeySetView<K,Boolean>
2158     (new ConcurrentHashMap<K,Boolean>(), Boolean.TRUE);
2159     }
2160    
2161     /**
2162     * Creates a new {@link Set} backed by a ConcurrentHashMap
2163     * from the given type to {@code Boolean.TRUE}.
2164     *
2165     * @param initialCapacity The implementation performs internal
2166     * sizing to accommodate this many elements.
2167 jsr166 1.237 * @param <K> the element type of the returned set
2168 jsr166 1.239 * @return the new set
2169 dl 1.222 * @throws IllegalArgumentException if the initial capacity of
2170     * elements is negative
2171     * @since 1.8
2172     */
2173     public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2174     return new KeySetView<K,Boolean>
2175     (new ConcurrentHashMap<K,Boolean>(initialCapacity), Boolean.TRUE);
2176     }
2177    
2178     /**
2179     * Returns a {@link Set} view of the keys in this map, using the
2180     * given common mapped value for any additions (i.e., {@link
2181     * Collection#add} and {@link Collection#addAll(Collection)}).
2182     * This is of course only appropriate if it is acceptable to use
2183     * the same value for all additions from this view.
2184     *
2185     * @param mappedValue the mapped value to use for any additions
2186     * @return the set view
2187     * @throws NullPointerException if the mappedValue is null
2188     */
2189     public KeySetView<K,V> keySet(V mappedValue) {
2190     if (mappedValue == null)
2191     throw new NullPointerException();
2192     return new KeySetView<K,V>(this, mappedValue);
2193     }
2194    
2195     /* ---------------- Special Nodes -------------- */
2196    
2197     /**
2198     * A node inserted at head of bins during transfer operations.
2199     */
2200     static final class ForwardingNode<K,V> extends Node<K,V> {
2201     final Node<K,V>[] nextTable;
2202     ForwardingNode(Node<K,V>[] tab) {
2203 jsr166 1.290 super(MOVED, null, null);
2204 dl 1.222 this.nextTable = tab;
2205     }
2206    
2207     Node<K,V> find(int h, Object k) {
2208 dl 1.234 // loop to avoid arbitrarily deep recursion on forwarding nodes
2209     outer: for (Node<K,V>[] tab = nextTable;;) {
2210     Node<K,V> e; int n;
2211     if (k == null || tab == null || (n = tab.length) == 0 ||
2212     (e = tabAt(tab, (n - 1) & h)) == null)
2213     return null;
2214     for (;;) {
2215 dl 1.222 int eh; K ek;
2216     if ((eh = e.hash) == h &&
2217     ((ek = e.key) == k || (ek != null && k.equals(ek))))
2218     return e;
2219 dl 1.234 if (eh < 0) {
2220     if (e instanceof ForwardingNode) {
2221     tab = ((ForwardingNode<K,V>)e).nextTable;
2222     continue outer;
2223     }
2224     else
2225     return e.find(h, k);
2226     }
2227     if ((e = e.next) == null)
2228     return null;
2229     }
2230 dl 1.222 }
2231     }
2232     }
2233    
2234     /**
2235 jsr166 1.285 * A place-holder node used in computeIfAbsent and compute.
2236 dl 1.222 */
2237     static final class ReservationNode<K,V> extends Node<K,V> {
2238     ReservationNode() {
2239 jsr166 1.290 super(RESERVED, null, null);
2240 dl 1.222 }
2241    
2242     Node<K,V> find(int h, Object k) {
2243     return null;
2244     }
2245     }
2246    
2247     /* ---------------- Table Initialization and Resizing -------------- */
2248    
2249     /**
2250 dl 1.252 * Returns the stamp bits for resizing a table of size n.
2251     * Must be negative when shifted left by RESIZE_STAMP_SHIFT.
2252     */
2253     static final int resizeStamp(int n) {
2254 jsr166 1.254 return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1));
2255 dl 1.252 }
2256    
2257     /**
2258 dl 1.222 * Initializes table, using the size recorded in sizeCtl.
2259 dl 1.119 */
2260 dl 1.210 private final Node<K,V>[] initTable() {
2261     Node<K,V>[] tab; int sc;
2262 dl 1.222 while ((tab = table) == null || tab.length == 0) {
2263 dl 1.119 if ((sc = sizeCtl) < 0)
2264     Thread.yield(); // lost initialization race; just spin
2265 dl 1.149 else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
2266 dl 1.119 try {
2267 dl 1.222 if ((tab = table) == null || tab.length == 0) {
2268 dl 1.119 int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
2269 jsr166 1.245 @SuppressWarnings("unchecked")
2270 dl 1.246 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
2271 dl 1.222 table = tab = nt;
2272 dl 1.119 sc = n - (n >>> 2);
2273     }
2274     } finally {
2275     sizeCtl = sc;
2276     }
2277     break;
2278     }
2279     }
2280     return tab;
2281 dl 1.4 }
2282    
2283     /**
2284 dl 1.149 * Adds to count, and if table is too small and not already
2285     * resizing, initiates transfer. If already resizing, helps
2286     * perform transfer if work is available. Rechecks occupancy
2287     * after a transfer to see if another resize is already needed
2288     * because resizings are lagging additions.
2289     *
2290     * @param x the count to add
2291     * @param check if <0, don't check resize, if <= 1 only check if uncontended
2292     */
2293     private final void addCount(long x, int check) {
2294 dl 1.222 CounterCell[] as; long b, s;
2295 dl 1.149 if ((as = counterCells) != null ||
2296     !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
2297 dl 1.222 CounterCell a; long v; int m;
2298 dl 1.149 boolean uncontended = true;
2299 dl 1.160 if (as == null || (m = as.length - 1) < 0 ||
2300     (a = as[ThreadLocalRandom.getProbe() & m]) == null ||
2301 dl 1.149 !(uncontended =
2302     U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
2303 dl 1.160 fullAddCount(x, uncontended);
2304 dl 1.149 return;
2305     }
2306     if (check <= 1)
2307     return;
2308     s = sumCount();
2309     }
2310     if (check >= 0) {
2311 dl 1.252 Node<K,V>[] tab, nt; int n, sc;
2312 dl 1.149 while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
2313 dl 1.252 (n = tab.length) < MAXIMUM_CAPACITY) {
2314     int rs = resizeStamp(n);
2315 dl 1.149 if (sc < 0) {
2316 dl 1.252 if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
2317 jsr166 1.254 sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
2318     transferIndex <= 0)
2319 dl 1.149 break;
2320 dl 1.252 if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
2321 dl 1.149 transfer(tab, nt);
2322 dl 1.119 }
2323 dl 1.252 else if (U.compareAndSwapInt(this, SIZECTL, sc,
2324 jsr166 1.254 (rs << RESIZE_STAMP_SHIFT) + 2))
2325 dl 1.149 transfer(tab, null);
2326     s = sumCount();
2327 dl 1.119 }
2328     }
2329 dl 1.4 }
2330    
2331     /**
2332 dl 1.222 * Helps transfer if a resize is in progress.
2333     */
2334     final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) {
2335     Node<K,V>[] nextTab; int sc;
2336 dl 1.252 if (tab != null && (f instanceof ForwardingNode) &&
2337 dl 1.222 (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) {
2338 jsr166 1.254 int rs = resizeStamp(tab.length);
2339 dl 1.252 while (nextTab == nextTable && table == tab &&
2340 jsr166 1.254 (sc = sizeCtl) < 0) {
2341     if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
2342 dl 1.252 sc == rs + MAX_RESIZERS || transferIndex <= 0)
2343 jsr166 1.254 break;
2344     if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) {
2345 dl 1.246 transfer(tab, nextTab);
2346     break;
2347     }
2348     }
2349 dl 1.222 return nextTab;
2350     }
2351     return table;
2352     }
2353    
2354     /**
2355 dl 1.119 * Tries to presize table to accommodate the given number of elements.
2356 tim 1.1 *
2357 dl 1.119 * @param size number of elements (doesn't need to be perfectly accurate)
2358 tim 1.1 */
2359 dl 1.210 private final void tryPresize(int size) {
2360 dl 1.119 int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
2361     tableSizeFor(size + (size >>> 1) + 1);
2362     int sc;
2363     while ((sc = sizeCtl) >= 0) {
2364 dl 1.210 Node<K,V>[] tab = table; int n;
2365 dl 1.119 if (tab == null || (n = tab.length) == 0) {
2366     n = (sc > c) ? sc : c;
2367 dl 1.149 if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
2368 dl 1.119 try {
2369     if (table == tab) {
2370 jsr166 1.245 @SuppressWarnings("unchecked")
2371 dl 1.246 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
2372 dl 1.222 table = nt;
2373 dl 1.119 sc = n - (n >>> 2);
2374     }
2375     } finally {
2376     sizeCtl = sc;
2377     }
2378     }
2379     }
2380     else if (c <= sc || n >= MAXIMUM_CAPACITY)
2381     break;
2382 dl 1.252 else if (tab == table) {
2383     int rs = resizeStamp(n);
2384 dl 1.258 if (U.compareAndSwapInt(this, SIZECTL, sc,
2385     (rs << RESIZE_STAMP_SHIFT) + 2))
2386 dl 1.252 transfer(tab, null);
2387     }
2388 dl 1.119 }
2389 dl 1.4 }
2390    
2391 jsr166 1.170 /**
2392 dl 1.119 * Moves and/or copies the nodes in each bin to new table. See
2393     * above for explanation.
2394 dl 1.4 */
2395 dl 1.210 private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
2396 dl 1.149 int n = tab.length, stride;
2397     if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
2398     stride = MIN_TRANSFER_STRIDE; // subdivide range
2399     if (nextTab == null) { // initiating
2400     try {
2401 jsr166 1.245 @SuppressWarnings("unchecked")
2402 dl 1.246 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1];
2403 dl 1.222 nextTab = nt;
2404 jsr166 1.150 } catch (Throwable ex) { // try to cope with OOME
2405 dl 1.149 sizeCtl = Integer.MAX_VALUE;
2406     return;
2407     }
2408     nextTable = nextTab;
2409     transferIndex = n;
2410     }
2411     int nextn = nextTab.length;
2412 dl 1.222 ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
2413 dl 1.149 boolean advance = true;
2414 dl 1.234 boolean finishing = false; // to ensure sweep before committing nextTab
2415 dl 1.149 for (int i = 0, bound = 0;;) {
2416 dl 1.246 Node<K,V> f; int fh;
2417 dl 1.149 while (advance) {
2418 dl 1.246 int nextIndex, nextBound;
2419 dl 1.234 if (--i >= bound || finishing)
2420 dl 1.149 advance = false;
2421 dl 1.246 else if ((nextIndex = transferIndex) <= 0) {
2422 dl 1.149 i = -1;
2423     advance = false;
2424     }
2425     else if (U.compareAndSwapInt
2426     (this, TRANSFERINDEX, nextIndex,
2427 jsr166 1.150 nextBound = (nextIndex > stride ?
2428 dl 1.149 nextIndex - stride : 0))) {
2429     bound = nextBound;
2430     i = nextIndex - 1;
2431     advance = false;
2432     }
2433     }
2434     if (i < 0 || i >= n || i + n >= nextn) {
2435 dl 1.246 int sc;
2436 dl 1.234 if (finishing) {
2437     nextTable = null;
2438     table = nextTab;
2439     sizeCtl = (n << 1) - (n >>> 1);
2440     return;
2441     }
2442 dl 1.252 if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
2443 dl 1.255 if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
2444 dl 1.246 return;
2445     finishing = advance = true;
2446     i = n; // recheck before commit
2447 dl 1.149 }
2448     }
2449 dl 1.246 else if ((f = tabAt(tab, i)) == null)
2450     advance = casTabAt(tab, i, null, fwd);
2451 dl 1.222 else if ((fh = f.hash) == MOVED)
2452     advance = true; // already processed
2453     else {
2454 jsr166 1.150 synchronized (f) {
2455 dl 1.149 if (tabAt(tab, i) == f) {
2456 dl 1.222 Node<K,V> ln, hn;
2457     if (fh >= 0) {
2458     int runBit = fh & n;
2459     Node<K,V> lastRun = f;
2460     for (Node<K,V> p = f.next; p != null; p = p.next) {
2461     int b = p.hash & n;
2462     if (b != runBit) {
2463     runBit = b;
2464     lastRun = p;
2465     }
2466     }
2467     if (runBit == 0) {
2468     ln = lastRun;
2469     hn = null;
2470     }
2471     else {
2472     hn = lastRun;
2473     ln = null;
2474 dl 1.149 }
2475 dl 1.222 for (Node<K,V> p = f; p != lastRun; p = p.next) {
2476     int ph = p.hash; K pk = p.key; V pv = p.val;
2477     if ((ph & n) == 0)
2478     ln = new Node<K,V>(ph, pk, pv, ln);
2479 dl 1.210 else
2480 dl 1.222 hn = new Node<K,V>(ph, pk, pv, hn);
2481 dl 1.149 }
2482 dl 1.233 setTabAt(nextTab, i, ln);
2483     setTabAt(nextTab, i + n, hn);
2484     setTabAt(tab, i, fwd);
2485     advance = true;
2486 dl 1.222 }
2487     else if (f instanceof TreeBin) {
2488     TreeBin<K,V> t = (TreeBin<K,V>)f;
2489     TreeNode<K,V> lo = null, loTail = null;
2490     TreeNode<K,V> hi = null, hiTail = null;
2491     int lc = 0, hc = 0;
2492     for (Node<K,V> e = t.first; e != null; e = e.next) {
2493     int h = e.hash;
2494     TreeNode<K,V> p = new TreeNode<K,V>
2495     (h, e.key, e.val, null, null);
2496     if ((h & n) == 0) {
2497     if ((p.prev = loTail) == null)
2498     lo = p;
2499     else
2500     loTail.next = p;
2501     loTail = p;
2502     ++lc;
2503 dl 1.210 }
2504 dl 1.222 else {
2505     if ((p.prev = hiTail) == null)
2506     hi = p;
2507     else
2508     hiTail.next = p;
2509     hiTail = p;
2510     ++hc;
2511 dl 1.210 }
2512 dl 1.149 }
2513 jsr166 1.228 ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
2514     (hc != 0) ? new TreeBin<K,V>(lo) : t;
2515     hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
2516     (lc != 0) ? new TreeBin<K,V>(hi) : t;
2517 dl 1.233 setTabAt(nextTab, i, ln);
2518     setTabAt(nextTab, i + n, hn);
2519     setTabAt(tab, i, fwd);
2520     advance = true;
2521 dl 1.149 }
2522 dl 1.119 }
2523     }
2524     }
2525     }
2526 dl 1.4 }
2527 tim 1.1
2528 dl 1.149 /* ---------------- Counter support -------------- */
2529    
2530 dl 1.222 /**
2531     * A padded cell for distributing counts. Adapted from LongAdder
2532     * and Striped64. See their internal docs for explanation.
2533     */
2534 jsr166 1.288 @jdk.internal.vm.annotation.Contended static final class CounterCell {
2535 dl 1.222 volatile long value;
2536     CounterCell(long x) { value = x; }
2537     }
2538    
2539 dl 1.149 final long sumCount() {
2540 dl 1.222 CounterCell[] as = counterCells; CounterCell a;
2541 dl 1.149 long sum = baseCount;
2542     if (as != null) {
2543     for (int i = 0; i < as.length; ++i) {
2544     if ((a = as[i]) != null)
2545     sum += a.value;
2546 dl 1.119 }
2547     }
2548 dl 1.149 return sum;
2549 dl 1.119 }
2550    
2551 dl 1.149 // See LongAdder version for explanation
2552 dl 1.160 private final void fullAddCount(long x, boolean wasUncontended) {
2553 dl 1.149 int h;
2554 dl 1.160 if ((h = ThreadLocalRandom.getProbe()) == 0) {
2555     ThreadLocalRandom.localInit(); // force initialization
2556     h = ThreadLocalRandom.getProbe();
2557     wasUncontended = true;
2558 dl 1.119 }
2559 dl 1.149 boolean collide = false; // True if last slot nonempty
2560     for (;;) {
2561 dl 1.222 CounterCell[] as; CounterCell a; int n; long v;
2562 dl 1.149 if ((as = counterCells) != null && (n = as.length) > 0) {
2563     if ((a = as[(n - 1) & h]) == null) {
2564 dl 1.153 if (cellsBusy == 0) { // Try to attach new Cell
2565 dl 1.222 CounterCell r = new CounterCell(x); // Optimistic create
2566 dl 1.153 if (cellsBusy == 0 &&
2567     U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
2568 dl 1.149 boolean created = false;
2569     try { // Recheck under lock
2570 dl 1.222 CounterCell[] rs; int m, j;
2571 dl 1.149 if ((rs = counterCells) != null &&
2572     (m = rs.length) > 0 &&
2573     rs[j = (m - 1) & h] == null) {
2574     rs[j] = r;
2575     created = true;
2576 dl 1.128 }
2577 dl 1.149 } finally {
2578 dl 1.153 cellsBusy = 0;
2579 dl 1.119 }
2580 dl 1.149 if (created)
2581     break;
2582     continue; // Slot is now non-empty
2583     }
2584     }
2585     collide = false;
2586     }
2587     else if (!wasUncontended) // CAS already known to fail
2588     wasUncontended = true; // Continue after rehash
2589     else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))
2590     break;
2591     else if (counterCells != as || n >= NCPU)
2592     collide = false; // At max size or stale
2593     else if (!collide)
2594     collide = true;
2595 dl 1.153 else if (cellsBusy == 0 &&
2596     U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
2597 dl 1.149 try {
2598     if (counterCells == as) {// Expand table unless stale
2599 dl 1.222 CounterCell[] rs = new CounterCell[n << 1];
2600 dl 1.149 for (int i = 0; i < n; ++i)
2601     rs[i] = as[i];
2602     counterCells = rs;
2603 dl 1.119 }
2604     } finally {
2605 dl 1.153 cellsBusy = 0;
2606 dl 1.119 }
2607 dl 1.149 collide = false;
2608     continue; // Retry with expanded table
2609 dl 1.119 }
2610 dl 1.160 h = ThreadLocalRandom.advanceProbe(h);
2611 dl 1.149 }
2612 dl 1.153 else if (cellsBusy == 0 && counterCells == as &&
2613     U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
2614 dl 1.149 boolean init = false;
2615     try { // Initialize table
2616     if (counterCells == as) {
2617 dl 1.222 CounterCell[] rs = new CounterCell[2];
2618     rs[h & 1] = new CounterCell(x);
2619 dl 1.149 counterCells = rs;
2620     init = true;
2621 dl 1.119 }
2622     } finally {
2623 dl 1.153 cellsBusy = 0;
2624 dl 1.119 }
2625 dl 1.149 if (init)
2626     break;
2627 dl 1.119 }
2628 dl 1.149 else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x))
2629     break; // Fall back on using base
2630 dl 1.119 }
2631     }
2632    
2633 dl 1.222 /* ---------------- Conversion from/to TreeBins -------------- */
2634 dl 1.119
2635     /**
2636 dl 1.222 * Replaces all linked nodes in bin at given index unless table is
2637     * too small, in which case resizes instead.
2638 dl 1.119 */
2639 dl 1.222 private final void treeifyBin(Node<K,V>[] tab, int index) {
2640 jsr166 1.267 Node<K,V> b; int n;
2641 dl 1.222 if (tab != null) {
2642 dl 1.252 if ((n = tab.length) < MIN_TREEIFY_CAPACITY)
2643     tryPresize(n << 1);
2644 dl 1.233 else if ((b = tabAt(tab, index)) != null && b.hash >= 0) {
2645 jsr166 1.223 synchronized (b) {
2646 dl 1.222 if (tabAt(tab, index) == b) {
2647     TreeNode<K,V> hd = null, tl = null;
2648     for (Node<K,V> e = b; e != null; e = e.next) {
2649     TreeNode<K,V> p =
2650     new TreeNode<K,V>(e.hash, e.key, e.val,
2651     null, null);
2652     if ((p.prev = tl) == null)
2653     hd = p;
2654     else
2655     tl.next = p;
2656     tl = p;
2657     }
2658     setTabAt(tab, index, new TreeBin<K,V>(hd));
2659 dl 1.210 }
2660     }
2661     }
2662     }
2663     }
2664    
2665     /**
2666 jsr166 1.289 * Returns a list of non-TreeNodes replacing those in given list.
2667 dl 1.210 */
2668 dl 1.222 static <K,V> Node<K,V> untreeify(Node<K,V> b) {
2669     Node<K,V> hd = null, tl = null;
2670     for (Node<K,V> q = b; q != null; q = q.next) {
2671 jsr166 1.290 Node<K,V> p = new Node<K,V>(q.hash, q.key, q.val);
2672 dl 1.222 if (tl == null)
2673     hd = p;
2674     else
2675     tl.next = p;
2676     tl = p;
2677 dl 1.210 }
2678 dl 1.222 return hd;
2679     }
2680 dl 1.210
2681 dl 1.222 /* ---------------- TreeNodes -------------- */
2682    
2683     /**
2684 jsr166 1.285 * Nodes for use in TreeBins.
2685 dl 1.222 */
2686     static final class TreeNode<K,V> extends Node<K,V> {
2687     TreeNode<K,V> parent; // red-black tree links
2688     TreeNode<K,V> left;
2689     TreeNode<K,V> right;
2690     TreeNode<K,V> prev; // needed to unlink next upon deletion
2691     boolean red;
2692 dl 1.210
2693 dl 1.222 TreeNode(int hash, K key, V val, Node<K,V> next,
2694     TreeNode<K,V> parent) {
2695     super(hash, key, val, next);
2696     this.parent = parent;
2697 dl 1.210 }
2698    
2699 dl 1.222 Node<K,V> find(int h, Object k) {
2700     return findTreeNode(h, k, null);
2701 dl 1.210 }
2702    
2703 dl 1.222 /**
2704     * Returns the TreeNode (or null if not found) for the given key
2705     * starting at given root.
2706     */
2707     final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) {
2708 dl 1.224 if (k != null) {
2709     TreeNode<K,V> p = this;
2710 jsr166 1.256 do {
2711 dl 1.224 int ph, dir; K pk; TreeNode<K,V> q;
2712     TreeNode<K,V> pl = p.left, pr = p.right;
2713     if ((ph = p.hash) > h)
2714     p = pl;
2715     else if (ph < h)
2716     p = pr;
2717     else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2718     return p;
2719 dl 1.240 else if (pl == null)
2720     p = pr;
2721     else if (pr == null)
2722     p = pl;
2723 jsr166 1.225 else if ((kc != null ||
2724 dl 1.224 (kc = comparableClassFor(k)) != null) &&
2725     (dir = compareComparables(kc, k, pk)) != 0)
2726     p = (dir < 0) ? pl : pr;
2727 dl 1.240 else if ((q = pr.findTreeNode(h, k, kc)) != null)
2728     return q;
2729     else
2730 dl 1.224 p = pl;
2731     } while (p != null);
2732     }
2733 dl 1.222 return null;
2734 dl 1.210 }
2735     }
2736 dl 1.192
2737 dl 1.222 /* ---------------- TreeBins -------------- */
2738 dl 1.119
2739 dl 1.222 /**
2740     * TreeNodes used at the heads of bins. TreeBins do not hold user
2741     * keys or values, but instead point to list of TreeNodes and
2742     * their root. They also maintain a parasitic read-write lock
2743     * forcing writers (who hold bin lock) to wait for readers (who do
2744     * not) to complete before tree restructuring operations.
2745     */
2746     static final class TreeBin<K,V> extends Node<K,V> {
2747     TreeNode<K,V> root;
2748     volatile TreeNode<K,V> first;
2749     volatile Thread waiter;
2750     volatile int lockState;
2751 dl 1.224 // values for lockState
2752     static final int WRITER = 1; // set while holding write lock
2753     static final int WAITER = 2; // set when waiting for write lock
2754     static final int READER = 4; // increment value for setting read lock
2755 dl 1.119
2756 dl 1.222 /**
2757 dl 1.240 * Tie-breaking utility for ordering insertions when equal
2758     * hashCodes and non-comparable. We don't require a total
2759     * order, just a consistent insertion rule to maintain
2760     * equivalence across rebalancings. Tie-breaking further than
2761     * necessary simplifies testing a bit.
2762     */
2763     static int tieBreakOrder(Object a, Object b) {
2764     int d;
2765     if (a == null || b == null ||
2766     (d = a.getClass().getName().
2767     compareTo(b.getClass().getName())) == 0)
2768     d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
2769     -1 : 1);
2770     return d;
2771     }
2772    
2773     /**
2774 dl 1.222 * Creates bin with initial set of nodes headed by b.
2775     */
2776     TreeBin(TreeNode<K,V> b) {
2777 jsr166 1.290 super(TREEBIN, null, null);
2778 dl 1.224 this.first = b;
2779 dl 1.222 TreeNode<K,V> r = null;
2780     for (TreeNode<K,V> x = b, next; x != null; x = next) {
2781     next = (TreeNode<K,V>)x.next;
2782     x.left = x.right = null;
2783     if (r == null) {
2784     x.parent = null;
2785     x.red = false;
2786     r = x;
2787     }
2788     else {
2789 dl 1.240 K k = x.key;
2790     int h = x.hash;
2791 dl 1.222 Class<?> kc = null;
2792     for (TreeNode<K,V> p = r;;) {
2793     int dir, ph;
2794 dl 1.240 K pk = p.key;
2795     if ((ph = p.hash) > h)
2796 dl 1.222 dir = -1;
2797 dl 1.240 else if (ph < h)
2798 dl 1.222 dir = 1;
2799 dl 1.240 else if ((kc == null &&
2800     (kc = comparableClassFor(k)) == null) ||
2801     (dir = compareComparables(kc, k, pk)) == 0)
2802     dir = tieBreakOrder(k, pk);
2803 jsr166 1.260 TreeNode<K,V> xp = p;
2804 dl 1.222 if ((p = (dir <= 0) ? p.left : p.right) == null) {
2805     x.parent = xp;
2806     if (dir <= 0)
2807     xp.left = x;
2808     else
2809     xp.right = x;
2810     r = balanceInsertion(r, x);
2811     break;
2812     }
2813     }
2814     }
2815     }
2816 dl 1.224 this.root = r;
2817 dl 1.240 assert checkInvariants(root);
2818 dl 1.222 }
2819 dl 1.210
2820 dl 1.222 /**
2821 jsr166 1.229 * Acquires write lock for tree restructuring.
2822 dl 1.222 */
2823     private final void lockRoot() {
2824     if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER))
2825     contendedLock(); // offload to separate method
2826 dl 1.153 }
2827    
2828 dl 1.222 /**
2829 jsr166 1.229 * Releases write lock for tree restructuring.
2830 dl 1.222 */
2831     private final void unlockRoot() {
2832     lockState = 0;
2833 dl 1.191 }
2834    
2835 dl 1.222 /**
2836 jsr166 1.229 * Possibly blocks awaiting root lock.
2837 dl 1.222 */
2838     private final void contendedLock() {
2839     boolean waiting = false;
2840     for (int s;;) {
2841 dl 1.252 if (((s = lockState) & ~WAITER) == 0) {
2842 dl 1.222 if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) {
2843     if (waiting)
2844     waiter = null;
2845     return;
2846     }
2847     }
2848 dl 1.244 else if ((s & WAITER) == 0) {
2849 dl 1.222 if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) {
2850     waiting = true;
2851     waiter = Thread.currentThread();
2852     }
2853     }
2854     else if (waiting)
2855     LockSupport.park(this);
2856     }
2857 dl 1.192 }
2858    
2859 dl 1.222 /**
2860     * Returns matching node or null if none. Tries to search
2861 jsr166 1.232 * using tree comparisons from root, but continues linear
2862 dl 1.222 * search when lock not available.
2863     */
2864     final Node<K,V> find(int h, Object k) {
2865     if (k != null) {
2866 dl 1.253 for (Node<K,V> e = first; e != null; ) {
2867 dl 1.222 int s; K ek;
2868     if (((s = lockState) & (WAITER|WRITER)) != 0) {
2869     if (e.hash == h &&
2870     ((ek = e.key) == k || (ek != null && k.equals(ek))))
2871     return e;
2872 dl 1.253 e = e.next;
2873 dl 1.222 }
2874     else if (U.compareAndSwapInt(this, LOCKSTATE, s,
2875     s + READER)) {
2876     TreeNode<K,V> r, p;
2877     try {
2878     p = ((r = root) == null ? null :
2879     r.findTreeNode(h, k, null));
2880     } finally {
2881     Thread w;
2882     if (U.getAndAddInt(this, LOCKSTATE, -READER) ==
2883     (READER|WAITER) && (w = waiter) != null)
2884     LockSupport.unpark(w);
2885     }
2886     return p;
2887     }
2888     }
2889     }
2890     return null;
2891 dl 1.192 }
2892    
2893 dl 1.222 /**
2894     * Finds or adds a node.
2895     * @return null if added
2896     */
2897     final TreeNode<K,V> putTreeVal(int h, K k, V v) {
2898     Class<?> kc = null;
2899 dl 1.240 boolean searched = false;
2900 dl 1.224 for (TreeNode<K,V> p = root;;) {
2901 dl 1.240 int dir, ph; K pk;
2902 dl 1.224 if (p == null) {
2903     first = root = new TreeNode<K,V>(h, k, v, null, null);
2904     break;
2905     }
2906     else if ((ph = p.hash) > h)
2907 dl 1.222 dir = -1;
2908     else if (ph < h)
2909     dir = 1;
2910     else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2911     return p;
2912     else if ((kc == null &&
2913     (kc = comparableClassFor(k)) == null) ||
2914     (dir = compareComparables(kc, k, pk)) == 0) {
2915 dl 1.240 if (!searched) {
2916     TreeNode<K,V> q, ch;
2917     searched = true;
2918     if (((ch = p.left) != null &&
2919     (q = ch.findTreeNode(h, k, kc)) != null) ||
2920     ((ch = p.right) != null &&
2921     (q = ch.findTreeNode(h, k, kc)) != null))
2922     return q;
2923     }
2924     dir = tieBreakOrder(k, pk);
2925 dl 1.222 }
2926 dl 1.240
2927 dl 1.222 TreeNode<K,V> xp = p;
2928 dl 1.240 if ((p = (dir <= 0) ? p.left : p.right) == null) {
2929 dl 1.222 TreeNode<K,V> x, f = first;
2930     first = x = new TreeNode<K,V>(h, k, v, f, xp);
2931     if (f != null)
2932     f.prev = x;
2933 dl 1.240 if (dir <= 0)
2934 dl 1.222 xp.left = x;
2935     else
2936     xp.right = x;
2937     if (!xp.red)
2938     x.red = true;
2939     else {
2940     lockRoot();
2941     try {
2942     root = balanceInsertion(root, x);
2943     } finally {
2944     unlockRoot();
2945     }
2946     }
2947 dl 1.224 break;
2948 dl 1.222 }
2949     }
2950 dl 1.224 assert checkInvariants(root);
2951     return null;
2952 dl 1.192 }
2953    
2954 dl 1.222 /**
2955     * Removes the given node, that must be present before this
2956     * call. This is messier than typical red-black deletion code
2957     * because we cannot swap the contents of an interior node
2958     * with a leaf successor that is pinned by "next" pointers
2959     * that are accessible independently of lock. So instead we
2960     * swap the tree linkages.
2961     *
2962 jsr166 1.230 * @return true if now too small, so should be untreeified
2963 dl 1.222 */
2964     final boolean removeTreeNode(TreeNode<K,V> p) {
2965     TreeNode<K,V> next = (TreeNode<K,V>)p.next;
2966     TreeNode<K,V> pred = p.prev; // unlink traversal pointers
2967     TreeNode<K,V> r, rl;
2968     if (pred == null)
2969     first = next;
2970     else
2971     pred.next = next;
2972     if (next != null)
2973     next.prev = pred;
2974     if (first == null) {
2975     root = null;
2976     return true;
2977     }
2978 dl 1.224 if ((r = root) == null || r.right == null || // too small
2979 dl 1.222 (rl = r.left) == null || rl.left == null)
2980     return true;
2981     lockRoot();
2982     try {
2983     TreeNode<K,V> replacement;
2984     TreeNode<K,V> pl = p.left;
2985     TreeNode<K,V> pr = p.right;
2986     if (pl != null && pr != null) {
2987     TreeNode<K,V> s = pr, sl;
2988     while ((sl = s.left) != null) // find successor
2989     s = sl;
2990     boolean c = s.red; s.red = p.red; p.red = c; // swap colors
2991     TreeNode<K,V> sr = s.right;
2992     TreeNode<K,V> pp = p.parent;
2993     if (s == pr) { // p was s's direct parent
2994     p.parent = s;
2995     s.right = p;
2996     }
2997     else {
2998     TreeNode<K,V> sp = s.parent;
2999     if ((p.parent = sp) != null) {
3000     if (s == sp.left)
3001     sp.left = p;
3002     else
3003     sp.right = p;
3004     }
3005     if ((s.right = pr) != null)
3006     pr.parent = s;
3007     }
3008     p.left = null;
3009     if ((p.right = sr) != null)
3010     sr.parent = p;
3011     if ((s.left = pl) != null)
3012     pl.parent = s;
3013     if ((s.parent = pp) == null)
3014     r = s;
3015     else if (p == pp.left)
3016     pp.left = s;
3017     else
3018     pp.right = s;
3019     if (sr != null)
3020     replacement = sr;
3021     else
3022     replacement = p;
3023     }
3024     else if (pl != null)
3025     replacement = pl;
3026     else if (pr != null)
3027     replacement = pr;
3028     else
3029     replacement = p;
3030     if (replacement != p) {
3031     TreeNode<K,V> pp = replacement.parent = p.parent;
3032     if (pp == null)
3033     r = replacement;
3034     else if (p == pp.left)
3035     pp.left = replacement;
3036     else
3037     pp.right = replacement;
3038     p.left = p.right = p.parent = null;
3039     }
3040    
3041     root = (p.red) ? r : balanceDeletion(r, replacement);
3042    
3043     if (p == replacement) { // detach pointers
3044     TreeNode<K,V> pp;
3045     if ((pp = p.parent) != null) {
3046     if (p == pp.left)
3047     pp.left = null;
3048     else if (p == pp.right)
3049     pp.right = null;
3050     p.parent = null;
3051     }
3052     }
3053     } finally {
3054     unlockRoot();
3055     }
3056 dl 1.224 assert checkInvariants(root);
3057 dl 1.222 return false;
3058 dl 1.210 }
3059    
3060 dl 1.222 /* ------------------------------------------------------------ */
3061     // Red-black tree methods, all adapted from CLR
3062 dl 1.210
3063 dl 1.222 static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
3064     TreeNode<K,V> p) {
3065 dl 1.224 TreeNode<K,V> r, pp, rl;
3066     if (p != null && (r = p.right) != null) {
3067 dl 1.222 if ((rl = p.right = r.left) != null)
3068     rl.parent = p;
3069     if ((pp = r.parent = p.parent) == null)
3070     (root = r).red = false;
3071     else if (pp.left == p)
3072     pp.left = r;
3073     else
3074     pp.right = r;
3075     r.left = p;
3076     p.parent = r;
3077     }
3078     return root;
3079 dl 1.119 }
3080    
3081 dl 1.222 static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
3082     TreeNode<K,V> p) {
3083 dl 1.224 TreeNode<K,V> l, pp, lr;
3084     if (p != null && (l = p.left) != null) {
3085 dl 1.222 if ((lr = p.left = l.right) != null)
3086     lr.parent = p;
3087     if ((pp = l.parent = p.parent) == null)
3088     (root = l).red = false;
3089     else if (pp.right == p)
3090     pp.right = l;
3091     else
3092     pp.left = l;
3093     l.right = p;
3094     p.parent = l;
3095     }
3096     return root;
3097 dl 1.119 }
3098    
3099 dl 1.222 static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
3100     TreeNode<K,V> x) {
3101     x.red = true;
3102     for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
3103     if ((xp = x.parent) == null) {
3104     x.red = false;
3105     return x;
3106     }
3107     else if (!xp.red || (xpp = xp.parent) == null)
3108     return root;
3109     if (xp == (xppl = xpp.left)) {
3110     if ((xppr = xpp.right) != null && xppr.red) {
3111     xppr.red = false;
3112     xp.red = false;
3113     xpp.red = true;
3114     x = xpp;
3115     }
3116     else {
3117     if (x == xp.right) {
3118     root = rotateLeft(root, x = xp);
3119     xpp = (xp = x.parent) == null ? null : xp.parent;
3120     }
3121     if (xp != null) {
3122     xp.red = false;
3123     if (xpp != null) {
3124     xpp.red = true;
3125     root = rotateRight(root, xpp);
3126     }
3127     }
3128     }
3129     }
3130     else {
3131     if (xppl != null && xppl.red) {
3132     xppl.red = false;
3133     xp.red = false;
3134     xpp.red = true;
3135     x = xpp;
3136     }
3137     else {
3138     if (x == xp.left) {
3139     root = rotateRight(root, x = xp);
3140     xpp = (xp = x.parent) == null ? null : xp.parent;
3141     }
3142     if (xp != null) {
3143     xp.red = false;
3144     if (xpp != null) {
3145     xpp.red = true;
3146     root = rotateLeft(root, xpp);
3147     }
3148     }
3149     }
3150     }
3151     }
3152 dl 1.119 }
3153    
3154 dl 1.222 static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
3155     TreeNode<K,V> x) {
3156 jsr166 1.256 for (TreeNode<K,V> xp, xpl, xpr;;) {
3157 dl 1.222 if (x == null || x == root)
3158     return root;
3159     else if ((xp = x.parent) == null) {
3160     x.red = false;
3161     return x;
3162     }
3163     else if (x.red) {
3164     x.red = false;
3165     return root;
3166     }
3167     else if ((xpl = xp.left) == x) {
3168     if ((xpr = xp.right) != null && xpr.red) {
3169     xpr.red = false;
3170     xp.red = true;
3171     root = rotateLeft(root, xp);
3172     xpr = (xp = x.parent) == null ? null : xp.right;
3173     }
3174     if (xpr == null)
3175     x = xp;
3176     else {
3177     TreeNode<K,V> sl = xpr.left, sr = xpr.right;
3178     if ((sr == null || !sr.red) &&
3179     (sl == null || !sl.red)) {
3180     xpr.red = true;
3181     x = xp;
3182     }
3183     else {
3184     if (sr == null || !sr.red) {
3185     if (sl != null)
3186     sl.red = false;
3187     xpr.red = true;
3188     root = rotateRight(root, xpr);
3189     xpr = (xp = x.parent) == null ?
3190     null : xp.right;
3191     }
3192     if (xpr != null) {
3193     xpr.red = (xp == null) ? false : xp.red;
3194     if ((sr = xpr.right) != null)
3195     sr.red = false;
3196     }
3197     if (xp != null) {
3198     xp.red = false;
3199     root = rotateLeft(root, xp);
3200     }
3201     x = root;
3202     }
3203     }
3204     }
3205     else { // symmetric
3206     if (xpl != null && xpl.red) {
3207     xpl.red = false;
3208     xp.red = true;
3209     root = rotateRight(root, xp);
3210     xpl = (xp = x.parent) == null ? null : xp.left;
3211     }
3212     if (xpl == null)
3213     x = xp;
3214     else {
3215     TreeNode<K,V> sl = xpl.left, sr = xpl.right;
3216     if ((sl == null || !sl.red) &&
3217     (sr == null || !sr.red)) {
3218     xpl.red = true;
3219     x = xp;
3220     }
3221     else {
3222     if (sl == null || !sl.red) {
3223     if (sr != null)
3224     sr.red = false;
3225     xpl.red = true;
3226     root = rotateLeft(root, xpl);
3227     xpl = (xp = x.parent) == null ?
3228     null : xp.left;
3229     }
3230     if (xpl != null) {
3231     xpl.red = (xp == null) ? false : xp.red;
3232     if ((sl = xpl.left) != null)
3233     sl.red = false;
3234     }
3235     if (xp != null) {
3236     xp.red = false;
3237     root = rotateRight(root, xp);
3238     }
3239     x = root;
3240     }
3241     }
3242     }
3243     }
3244 dl 1.210 }
3245 jsr166 1.225
3246 dl 1.222 /**
3247 jsr166 1.285 * Checks invariants recursively for the tree of Nodes rooted at t.
3248 dl 1.222 */
3249 dl 1.224 static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
3250 dl 1.222 TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
3251     tb = t.prev, tn = (TreeNode<K,V>)t.next;
3252     if (tb != null && tb.next != t)
3253     return false;
3254     if (tn != null && tn.prev != t)
3255     return false;
3256     if (tp != null && t != tp.left && t != tp.right)
3257     return false;
3258     if (tl != null && (tl.parent != t || tl.hash > t.hash))
3259     return false;
3260     if (tr != null && (tr.parent != t || tr.hash < t.hash))
3261     return false;
3262     if (t.red && tl != null && tl.red && tr != null && tr.red)
3263     return false;
3264 dl 1.224 if (tl != null && !checkInvariants(tl))
3265 dl 1.222 return false;
3266 dl 1.224 if (tr != null && !checkInvariants(tr))
3267 dl 1.210 return false;
3268     return true;
3269     }
3270 dl 1.146
3271 jsr166 1.293 private static final Unsafe U = Unsafe.getUnsafe();
3272 dl 1.222 private static final long LOCKSTATE;
3273     static {
3274     try {
3275     LOCKSTATE = U.objectFieldOffset
3276 jsr166 1.264 (TreeBin.class.getDeclaredField("lockState"));
3277 jsr166 1.262 } catch (ReflectiveOperationException e) {
3278 dl 1.222 throw new Error(e);
3279     }
3280 dl 1.146 }
3281 dl 1.119 }
3282    
3283 dl 1.222 /* ----------------Table Traversal -------------- */
3284    
3285 jsr166 1.247 /**
3286     * Records the table, its length, and current traversal index for a
3287     * traverser that must process a region of a forwarded table before
3288     * proceeding with current table.
3289     */
3290     static final class TableStack<K,V> {
3291 dl 1.246 int length;
3292     int index;
3293     Node<K,V>[] tab;
3294 jsr166 1.247 TableStack<K,V> next;
3295 dl 1.246 }
3296    
3297 dl 1.222 /**
3298     * Encapsulates traversal for methods such as containsValue; also
3299     * serves as a base class for other iterators and spliterators.
3300     *
3301     * Method advance visits once each still-valid node that was
3302     * reachable upon iterator construction. It might miss some that
3303     * were added to a bin after the bin was visited, which is OK wrt
3304     * consistency guarantees. Maintaining this property in the face
3305     * of possible ongoing resizes requires a fair amount of
3306     * bookkeeping state that is difficult to optimize away amidst
3307     * volatile accesses. Even so, traversal maintains reasonable
3308     * throughput.
3309     *
3310     * Normally, iteration proceeds bin-by-bin traversing lists.
3311     * However, if the table has been resized, then all future steps
3312     * must traverse both the bin at the current index as well as at
3313     * (index + baseSize); and so on for further resizings. To
3314     * paranoically cope with potential sharing by users of iterators
3315     * across threads, iteration terminates if a bounds checks fails
3316     * for a table read.
3317     */
3318     static class Traverser<K,V> {
3319     Node<K,V>[] tab; // current table; updated if resized
3320     Node<K,V> next; // the next entry to use
3321 dl 1.246 TableStack<K,V> stack, spare; // to save/restore on ForwardingNodes
3322 dl 1.222 int index; // index of bin to use next
3323     int baseIndex; // current index of initial table
3324     int baseLimit; // index bound for initial table
3325     final int baseSize; // initial table size
3326    
3327     Traverser(Node<K,V>[] tab, int size, int index, int limit) {
3328     this.tab = tab;
3329     this.baseSize = size;
3330     this.baseIndex = this.index = index;
3331     this.baseLimit = limit;
3332     this.next = null;
3333     }
3334    
3335     /**
3336     * Advances if possible, returning next valid node, or null if none.
3337     */
3338     final Node<K,V> advance() {
3339     Node<K,V> e;
3340     if ((e = next) != null)
3341     e = e.next;
3342     for (;;) {
3343 dl 1.246 Node<K,V>[] t; int i, n; // must use locals in checks
3344 dl 1.222 if (e != null)
3345     return next = e;
3346     if (baseIndex >= baseLimit || (t = tab) == null ||
3347     (n = t.length) <= (i = index) || i < 0)
3348     return next = null;
3349 dl 1.246 if ((e = tabAt(t, i)) != null && e.hash < 0) {
3350 dl 1.222 if (e instanceof ForwardingNode) {
3351     tab = ((ForwardingNode<K,V>)e).nextTable;
3352     e = null;
3353 dl 1.246 pushState(t, i, n);
3354 dl 1.222 continue;
3355     }
3356     else if (e instanceof TreeBin)
3357     e = ((TreeBin<K,V>)e).first;
3358     else
3359     e = null;
3360     }
3361 dl 1.246 if (stack != null)
3362     recoverState(n);
3363     else if ((index = i + baseSize) >= n)
3364     index = ++baseIndex; // visit upper slots if present
3365 dl 1.222 }
3366     }
3367 dl 1.246
3368     /**
3369 jsr166 1.249 * Saves traversal state upon encountering a forwarding node.
3370 dl 1.246 */
3371     private void pushState(Node<K,V>[] t, int i, int n) {
3372     TableStack<K,V> s = spare; // reuse if possible
3373     if (s != null)
3374     spare = s.next;
3375     else
3376     s = new TableStack<K,V>();
3377     s.tab = t;
3378     s.length = n;
3379     s.index = i;
3380     s.next = stack;
3381     stack = s;
3382     }
3383    
3384     /**
3385 jsr166 1.249 * Possibly pops traversal state.
3386 dl 1.246 *
3387     * @param n length of current table
3388     */
3389     private void recoverState(int n) {
3390     TableStack<K,V> s; int len;
3391     while ((s = stack) != null && (index += (len = s.length)) >= n) {
3392     n = len;
3393     index = s.index;
3394     tab = s.tab;
3395     s.tab = null;
3396     TableStack<K,V> next = s.next;
3397     s.next = spare; // save for reuse
3398     stack = next;
3399     spare = s;
3400     }
3401     if (s == null && (index += baseSize) >= n)
3402     index = ++baseIndex;
3403     }
3404 dl 1.222 }
3405    
3406     /**
3407     * Base of key, value, and entry Iterators. Adds fields to
3408 jsr166 1.229 * Traverser to support iterator.remove.
3409 dl 1.222 */
3410     static class BaseIterator<K,V> extends Traverser<K,V> {
3411     final ConcurrentHashMap<K,V> map;
3412     Node<K,V> lastReturned;
3413     BaseIterator(Node<K,V>[] tab, int size, int index, int limit,
3414     ConcurrentHashMap<K,V> map) {
3415 dl 1.210 super(tab, size, index, limit);
3416     this.map = map;
3417 dl 1.222 advance();
3418 dl 1.210 }
3419    
3420 dl 1.222 public final boolean hasNext() { return next != null; }
3421     public final boolean hasMoreElements() { return next != null; }
3422    
3423     public final void remove() {
3424     Node<K,V> p;
3425     if ((p = lastReturned) == null)
3426     throw new IllegalStateException();
3427     lastReturned = null;
3428     map.replaceNode(p.key, null, null);
3429 dl 1.210 }
3430 dl 1.222 }
3431 dl 1.210
3432 dl 1.222 static final class KeyIterator<K,V> extends BaseIterator<K,V>
3433     implements Iterator<K>, Enumeration<K> {
3434     KeyIterator(Node<K,V>[] tab, int index, int size, int limit,
3435     ConcurrentHashMap<K,V> map) {
3436     super(tab, index, size, limit, map);
3437 dl 1.210 }
3438    
3439 dl 1.222 public final K next() {
3440 dl 1.210 Node<K,V> p;
3441 dl 1.222 if ((p = next) == null)
3442     throw new NoSuchElementException();
3443     K k = p.key;
3444     lastReturned = p;
3445     advance();
3446     return k;
3447 dl 1.210 }
3448    
3449 dl 1.222 public final K nextElement() { return next(); }
3450     }
3451    
3452     static final class ValueIterator<K,V> extends BaseIterator<K,V>
3453     implements Iterator<V>, Enumeration<V> {
3454     ValueIterator(Node<K,V>[] tab, int index, int size, int limit,
3455     ConcurrentHashMap<K,V> map) {
3456     super(tab, index, size, limit, map);
3457     }
3458 dl 1.210
3459 dl 1.222 public final V next() {
3460     Node<K,V> p;
3461     if ((p = next) == null)
3462     throw new NoSuchElementException();
3463     V v = p.val;
3464     lastReturned = p;
3465     advance();
3466     return v;
3467 dl 1.210 }
3468 dl 1.222
3469     public final V nextElement() { return next(); }
3470 dl 1.210 }
3471    
3472 dl 1.222 static final class EntryIterator<K,V> extends BaseIterator<K,V>
3473     implements Iterator<Map.Entry<K,V>> {
3474     EntryIterator(Node<K,V>[] tab, int index, int size, int limit,
3475     ConcurrentHashMap<K,V> map) {
3476     super(tab, index, size, limit, map);
3477     }
3478 dl 1.210
3479 dl 1.222 public final Map.Entry<K,V> next() {
3480     Node<K,V> p;
3481     if ((p = next) == null)
3482     throw new NoSuchElementException();
3483     K k = p.key;
3484     V v = p.val;
3485     lastReturned = p;
3486     advance();
3487     return new MapEntry<K,V>(k, v, map);
3488     }
3489     }
3490 dl 1.119
3491     /**
3492 jsr166 1.285 * Exported Entry for EntryIterator.
3493 dl 1.119 */
3494 dl 1.222 static final class MapEntry<K,V> implements Map.Entry<K,V> {
3495     final K key; // non-null
3496     V val; // non-null
3497     final ConcurrentHashMap<K,V> map;
3498     MapEntry(K key, V val, ConcurrentHashMap<K,V> map) {
3499     this.key = key;
3500     this.val = val;
3501     this.map = map;
3502     }
3503     public K getKey() { return key; }
3504     public V getValue() { return val; }
3505     public int hashCode() { return key.hashCode() ^ val.hashCode(); }
3506 jsr166 1.268 public String toString() {
3507     return Helpers.mapEntryToString(key, val);
3508     }
3509 dl 1.119
3510 dl 1.222 public boolean equals(Object o) {
3511     Object k, v; Map.Entry<?,?> e;
3512     return ((o instanceof Map.Entry) &&
3513     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3514     (v = e.getValue()) != null &&
3515     (k == key || k.equals(key)) &&
3516     (v == val || v.equals(val)));
3517     }
3518 dl 1.119
3519 dl 1.222 /**
3520     * Sets our entry's value and writes through to the map. The
3521     * value to return is somewhat arbitrary here. Since we do not
3522     * necessarily track asynchronous changes, the most recent
3523     * "previous" value could be different from what we return (or
3524     * could even have been removed, in which case the put will
3525     * re-establish). We do not and cannot guarantee more.
3526     */
3527     public V setValue(V value) {
3528     if (value == null) throw new NullPointerException();
3529     V v = val;
3530     val = value;
3531     map.put(key, value);
3532     return v;
3533     }
3534 dl 1.119 }
3535    
3536 dl 1.222 static final class KeySpliterator<K,V> extends Traverser<K,V>
3537     implements Spliterator<K> {
3538     long est; // size estimate
3539     KeySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3540     long est) {
3541     super(tab, size, index, limit);
3542     this.est = est;
3543     }
3544 dl 1.119
3545 jsr166 1.286 public KeySpliterator<K,V> trySplit() {
3546 dl 1.222 int i, f, h;
3547     return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3548     new KeySpliterator<K,V>(tab, baseSize, baseLimit = h,
3549     f, est >>>= 1);
3550 dl 1.119 }
3551    
3552 dl 1.222 public void forEachRemaining(Consumer<? super K> action) {
3553     if (action == null) throw new NullPointerException();
3554     for (Node<K,V> p; (p = advance()) != null;)
3555     action.accept(p.key);
3556 dl 1.119 }
3557    
3558 dl 1.222 public boolean tryAdvance(Consumer<? super K> action) {
3559     if (action == null) throw new NullPointerException();
3560     Node<K,V> p;
3561     if ((p = advance()) == null)
3562 dl 1.119 return false;
3563 dl 1.222 action.accept(p.key);
3564     return true;
3565 dl 1.119 }
3566    
3567 dl 1.222 public long estimateSize() { return est; }
3568 dl 1.119
3569 dl 1.222 public int characteristics() {
3570     return Spliterator.DISTINCT | Spliterator.CONCURRENT |
3571     Spliterator.NONNULL;
3572     }
3573 dl 1.142 }
3574 dl 1.119
3575 dl 1.222 static final class ValueSpliterator<K,V> extends Traverser<K,V>
3576     implements Spliterator<V> {
3577     long est; // size estimate
3578     ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit,
3579     long est) {
3580     super(tab, size, index, limit);
3581     this.est = est;
3582 dl 1.209 }
3583    
3584 jsr166 1.286 public ValueSpliterator<K,V> trySplit() {
3585 dl 1.222 int i, f, h;
3586     return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3587     new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h,
3588     f, est >>>= 1);
3589 dl 1.142 }
3590 dl 1.119
3591 dl 1.222 public void forEachRemaining(Consumer<? super V> action) {
3592     if (action == null) throw new NullPointerException();
3593     for (Node<K,V> p; (p = advance()) != null;)
3594     action.accept(p.val);
3595     }
3596 dl 1.119
3597 dl 1.222 public boolean tryAdvance(Consumer<? super V> action) {
3598     if (action == null) throw new NullPointerException();
3599     Node<K,V> p;
3600     if ((p = advance()) == null)
3601     return false;
3602     action.accept(p.val);
3603     return true;
3604 dl 1.119 }
3605 dl 1.222
3606     public long estimateSize() { return est; }
3607    
3608     public int characteristics() {
3609     return Spliterator.CONCURRENT | Spliterator.NONNULL;
3610 dl 1.119 }
3611 dl 1.142 }
3612 dl 1.119
3613 dl 1.222 static final class EntrySpliterator<K,V> extends Traverser<K,V>
3614     implements Spliterator<Map.Entry<K,V>> {
3615     final ConcurrentHashMap<K,V> map; // To export MapEntry
3616     long est; // size estimate
3617     EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3618     long est, ConcurrentHashMap<K,V> map) {
3619     super(tab, size, index, limit);
3620     this.map = map;
3621     this.est = est;
3622     }
3623    
3624 jsr166 1.286 public EntrySpliterator<K,V> trySplit() {
3625 dl 1.222 int i, f, h;
3626     return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3627     new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h,
3628     f, est >>>= 1, map);
3629     }
3630 dl 1.142
3631 dl 1.222 public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) {
3632     if (action == null) throw new NullPointerException();
3633     for (Node<K,V> p; (p = advance()) != null; )
3634     action.accept(new MapEntry<K,V>(p.key, p.val, map));
3635     }
3636 dl 1.210
3637 dl 1.222 public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {
3638     if (action == null) throw new NullPointerException();
3639     Node<K,V> p;
3640     if ((p = advance()) == null)
3641     return false;
3642     action.accept(new MapEntry<K,V>(p.key, p.val, map));
3643     return true;
3644 dl 1.210 }
3645    
3646 dl 1.222 public long estimateSize() { return est; }
3647    
3648     public int characteristics() {
3649     return Spliterator.DISTINCT | Spliterator.CONCURRENT |
3650     Spliterator.NONNULL;
3651 dl 1.210 }
3652     }
3653    
3654     // Parallel bulk operations
3655    
3656     /**
3657     * Computes initial batch value for bulk tasks. The returned value
3658     * is approximately exp2 of the number of times (minus one) to
3659     * split task by two before executing leaf action. This value is
3660     * faster to compute and more convenient to use as a guide to
3661     * splitting than is the depth, since it is used while dividing by
3662     * two anyway.
3663     */
3664     final int batchFor(long b) {
3665     long n;
3666     if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b)
3667     return 0;
3668     int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4
3669     return (b <= 0L || (n /= b) >= sp) ? sp : (int)n;
3670     }
3671 dl 1.151
3672 dl 1.119 /**
3673 dl 1.137 * Performs the given action for each (key, value).
3674 dl 1.119 *
3675 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
3676 jsr166 1.213 * needed for this operation to be executed in parallel
3677 dl 1.137 * @param action the action
3678 jsr166 1.220 * @since 1.8
3679 dl 1.119 */
3680 dl 1.210 public void forEach(long parallelismThreshold,
3681     BiConsumer<? super K,? super V> action) {
3682 dl 1.151 if (action == null) throw new NullPointerException();
3683 dl 1.210 new ForEachMappingTask<K,V>
3684     (null, batchFor(parallelismThreshold), 0, 0, table,
3685     action).invoke();
3686 dl 1.119 }
3687    
3688     /**
3689 dl 1.137 * Performs the given action for each non-null transformation
3690     * of each (key, value).
3691     *
3692 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
3693 jsr166 1.213 * needed for this operation to be executed in parallel
3694 dl 1.137 * @param transformer a function returning the transformation
3695 jsr166 1.169 * for an element, or null if there is no transformation (in
3696 jsr166 1.172 * which case the action is not applied)
3697 dl 1.137 * @param action the action
3698 jsr166 1.237 * @param <U> the return type of the transformer
3699 jsr166 1.220 * @since 1.8
3700 dl 1.119 */
3701 dl 1.210 public <U> void forEach(long parallelismThreshold,
3702     BiFunction<? super K, ? super V, ? extends U> transformer,
3703     Consumer<? super U> action) {
3704 dl 1.151 if (transformer == null || action == null)
3705     throw new NullPointerException();
3706 dl 1.210 new ForEachTransformedMappingTask<K,V,U>
3707     (null, batchFor(parallelismThreshold), 0, 0, table,
3708     transformer, action).invoke();
3709 dl 1.137 }
3710    
3711     /**
3712     * Returns a non-null result from applying the given search
3713 dl 1.210 * function on each (key, value), or null if none. Upon
3714     * success, further element processing is suppressed and the
3715     * results of any other parallel invocations of the search
3716     * function are ignored.
3717 dl 1.137 *
3718 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
3719 jsr166 1.213 * needed for this operation to be executed in parallel
3720 dl 1.137 * @param searchFunction a function returning a non-null
3721     * result on success, else null
3722 jsr166 1.237 * @param <U> the return type of the search function
3723 dl 1.137 * @return a non-null result from applying the given search
3724     * function on each (key, value), or null if none
3725 jsr166 1.220 * @since 1.8
3726 dl 1.137 */
3727 dl 1.210 public <U> U search(long parallelismThreshold,
3728     BiFunction<? super K, ? super V, ? extends U> searchFunction) {
3729 dl 1.151 if (searchFunction == null) throw new NullPointerException();
3730 dl 1.210 return new SearchMappingsTask<K,V,U>
3731     (null, batchFor(parallelismThreshold), 0, 0, table,
3732     searchFunction, new AtomicReference<U>()).invoke();
3733 dl 1.137 }
3734    
3735     /**
3736     * Returns the result of accumulating the given transformation
3737     * of all (key, value) pairs using the given reducer to
3738     * combine values, or null if none.
3739     *
3740 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
3741 jsr166 1.213 * needed for this operation to be executed in parallel
3742 dl 1.137 * @param transformer a function returning the transformation
3743 jsr166 1.169 * for an element, or null if there is no transformation (in
3744 jsr166 1.172 * which case it is not combined)
3745 dl 1.137 * @param reducer a commutative associative combining function
3746 jsr166 1.237 * @param <U> the return type of the transformer
3747 dl 1.137 * @return the result of accumulating the given transformation
3748     * of all (key, value) pairs
3749 jsr166 1.220 * @since 1.8
3750 dl 1.137 */
3751 dl 1.210 public <U> U reduce(long parallelismThreshold,
3752     BiFunction<? super K, ? super V, ? extends U> transformer,
3753     BiFunction<? super U, ? super U, ? extends U> reducer) {
3754 dl 1.151 if (transformer == null || reducer == null)
3755     throw new NullPointerException();
3756 dl 1.210 return new MapReduceMappingsTask<K,V,U>
3757     (null, batchFor(parallelismThreshold), 0, 0, table,
3758     null, transformer, reducer).invoke();
3759 dl 1.137 }
3760    
3761     /**
3762     * Returns the result of accumulating the given transformation
3763     * of all (key, value) pairs using the given reducer to
3764     * combine values, and the given basis as an identity value.
3765     *
3766 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
3767 jsr166 1.213 * needed for this operation to be executed in parallel
3768 dl 1.137 * @param transformer a function returning the transformation
3769     * for an element
3770     * @param basis the identity (initial default value) for the reduction
3771     * @param reducer a commutative associative combining function
3772     * @return the result of accumulating the given transformation
3773     * of all (key, value) pairs
3774 jsr166 1.220 * @since 1.8
3775 dl 1.137 */
3776 dl 1.231 public double reduceToDouble(long parallelismThreshold,
3777     ToDoubleBiFunction<? super K, ? super V> transformer,
3778     double basis,
3779     DoubleBinaryOperator reducer) {
3780 dl 1.151 if (transformer == null || reducer == null)
3781     throw new NullPointerException();
3782 dl 1.210 return new MapReduceMappingsToDoubleTask<K,V>
3783     (null, batchFor(parallelismThreshold), 0, 0, table,
3784     null, transformer, basis, reducer).invoke();
3785 dl 1.137 }
3786 dl 1.119
3787 dl 1.137 /**
3788     * Returns the result of accumulating the given transformation
3789     * of all (key, value) pairs using the given reducer to
3790     * combine values, and the given basis as an identity value.
3791     *
3792 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
3793 jsr166 1.213 * needed for this operation to be executed in parallel
3794 dl 1.137 * @param transformer a function returning the transformation
3795     * for an element
3796     * @param basis the identity (initial default value) for the reduction
3797     * @param reducer a commutative associative combining function
3798     * @return the result of accumulating the given transformation
3799     * of all (key, value) pairs
3800 jsr166 1.220 * @since 1.8
3801 dl 1.137 */
3802 dl 1.210 public long reduceToLong(long parallelismThreshold,
3803     ToLongBiFunction<? super K, ? super V> transformer,
3804     long basis,
3805     LongBinaryOperator reducer) {
3806 dl 1.151 if (transformer == null || reducer == null)
3807     throw new NullPointerException();
3808 dl 1.210 return new MapReduceMappingsToLongTask<K,V>
3809     (null, batchFor(parallelismThreshold), 0, 0, table,
3810     null, transformer, basis, reducer).invoke();
3811 dl 1.137 }
3812    
3813     /**
3814     * Returns the result of accumulating the given transformation
3815     * of all (key, value) pairs using the given reducer to
3816     * combine values, and the given basis as an identity value.
3817     *
3818 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
3819 jsr166 1.213 * needed for this operation to be executed in parallel
3820 dl 1.137 * @param transformer a function returning the transformation
3821     * for an element
3822     * @param basis the identity (initial default value) for the reduction
3823     * @param reducer a commutative associative combining function
3824     * @return the result of accumulating the given transformation
3825     * of all (key, value) pairs
3826 jsr166 1.220 * @since 1.8
3827 dl 1.137 */
3828 dl 1.210 public int reduceToInt(long parallelismThreshold,
3829     ToIntBiFunction<? super K, ? super V> transformer,
3830     int basis,
3831     IntBinaryOperator reducer) {
3832 dl 1.151 if (transformer == null || reducer == null)
3833     throw new NullPointerException();
3834 dl 1.210 return new MapReduceMappingsToIntTask<K,V>
3835     (null, batchFor(parallelismThreshold), 0, 0, table,
3836     null, transformer, basis, reducer).invoke();
3837 dl 1.137 }
3838    
3839     /**
3840     * Performs the given action for each key.
3841     *
3842 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
3843 jsr166 1.213 * needed for this operation to be executed in parallel
3844 dl 1.137 * @param action the action
3845 jsr166 1.220 * @since 1.8
3846 dl 1.137 */
3847 dl 1.210 public void forEachKey(long parallelismThreshold,
3848     Consumer<? super K> action) {
3849     if (action == null) throw new NullPointerException();
3850     new ForEachKeyTask<K,V>
3851     (null, batchFor(parallelismThreshold), 0, 0, table,
3852     action).invoke();
3853 dl 1.137 }
3854 dl 1.119
3855 dl 1.137 /**
3856     * Performs the given action for each non-null transformation
3857     * of each key.
3858     *
3859 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
3860 jsr166 1.213 * needed for this operation to be executed in parallel
3861 dl 1.137 * @param transformer a function returning the transformation
3862 jsr166 1.169 * for an element, or null if there is no transformation (in
3863 jsr166 1.172 * which case the action is not applied)
3864 dl 1.137 * @param action the action
3865 jsr166 1.237 * @param <U> the return type of the transformer
3866 jsr166 1.220 * @since 1.8
3867 dl 1.137 */
3868 dl 1.210 public <U> void forEachKey(long parallelismThreshold,
3869     Function<? super K, ? extends U> transformer,
3870     Consumer<? super U> action) {
3871 dl 1.151 if (transformer == null || action == null)
3872     throw new NullPointerException();
3873 dl 1.210 new ForEachTransformedKeyTask<K,V,U>
3874     (null, batchFor(parallelismThreshold), 0, 0, table,
3875     transformer, action).invoke();
3876 dl 1.137 }
3877 dl 1.119
3878 dl 1.137 /**
3879     * Returns a non-null result from applying the given search
3880 dl 1.210 * function on each key, or null if none. Upon success,
3881     * further element processing is suppressed and the results of
3882     * any other parallel invocations of the search function are
3883     * ignored.
3884 dl 1.137 *
3885 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
3886 jsr166 1.213 * needed for this operation to be executed in parallel
3887 dl 1.137 * @param searchFunction a function returning a non-null
3888     * result on success, else null
3889 jsr166 1.237 * @param <U> the return type of the search function
3890 dl 1.137 * @return a non-null result from applying the given search
3891     * function on each key, or null if none
3892 jsr166 1.220 * @since 1.8
3893 dl 1.137 */
3894 dl 1.210 public <U> U searchKeys(long parallelismThreshold,
3895     Function<? super K, ? extends U> searchFunction) {
3896     if (searchFunction == null) throw new NullPointerException();
3897     return new SearchKeysTask<K,V,U>
3898     (null, batchFor(parallelismThreshold), 0, 0, table,
3899     searchFunction, new AtomicReference<U>()).invoke();
3900 dl 1.137 }
3901 dl 1.119
3902 dl 1.137 /**
3903     * Returns the result of accumulating all keys using the given
3904     * reducer to combine values, or null if none.
3905     *
3906 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
3907 jsr166 1.213 * needed for this operation to be executed in parallel
3908 dl 1.137 * @param reducer a commutative associative combining function
3909     * @return the result of accumulating all keys using the given
3910     * reducer to combine values, or null if none
3911 jsr166 1.220 * @since 1.8
3912 dl 1.137 */
3913 dl 1.210 public K reduceKeys(long parallelismThreshold,
3914     BiFunction<? super K, ? super K, ? extends K> reducer) {
3915 dl 1.151 if (reducer == null) throw new NullPointerException();
3916 dl 1.210 return new ReduceKeysTask<K,V>
3917     (null, batchFor(parallelismThreshold), 0, 0, table,
3918     null, reducer).invoke();
3919 dl 1.137 }
3920 dl 1.119
3921 dl 1.137 /**
3922     * Returns the result of accumulating the given transformation
3923     * of all keys using the given reducer to combine values, or
3924     * null if none.
3925     *
3926 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
3927 jsr166 1.213 * needed for this operation to be executed in parallel
3928 dl 1.137 * @param transformer a function returning the transformation
3929 jsr166 1.169 * for an element, or null if there is no transformation (in
3930 jsr166 1.172 * which case it is not combined)
3931 dl 1.137 * @param reducer a commutative associative combining function
3932 jsr166 1.237 * @param <U> the return type of the transformer
3933 dl 1.137 * @return the result of accumulating the given transformation
3934     * of all keys
3935 jsr166 1.220 * @since 1.8
3936 dl 1.137 */
3937 dl 1.210 public <U> U reduceKeys(long parallelismThreshold,
3938     Function<? super K, ? extends U> transformer,
3939 dl 1.153 BiFunction<? super U, ? super U, ? extends U> reducer) {
3940 dl 1.151 if (transformer == null || reducer == null)
3941     throw new NullPointerException();
3942 dl 1.210 return new MapReduceKeysTask<K,V,U>
3943     (null, batchFor(parallelismThreshold), 0, 0, table,
3944     null, transformer, reducer).invoke();
3945 dl 1.137 }
3946 dl 1.119
3947 dl 1.137 /**
3948     * Returns the result of accumulating the given transformation
3949     * of all keys using the given reducer to combine values, and
3950     * the given basis as an identity value.
3951     *
3952 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
3953 jsr166 1.213 * needed for this operation to be executed in parallel
3954 dl 1.137 * @param transformer a function returning the transformation
3955     * for an element
3956     * @param basis the identity (initial default value) for the reduction
3957     * @param reducer a commutative associative combining function
3958 jsr166 1.157 * @return the result of accumulating the given transformation
3959 dl 1.137 * of all keys
3960 jsr166 1.220 * @since 1.8
3961 dl 1.137 */
3962 dl 1.210 public double reduceKeysToDouble(long parallelismThreshold,
3963     ToDoubleFunction<? super K> transformer,
3964     double basis,
3965     DoubleBinaryOperator reducer) {
3966 dl 1.151 if (transformer == null || reducer == null)
3967     throw new NullPointerException();
3968 dl 1.210 return new MapReduceKeysToDoubleTask<K,V>
3969     (null, batchFor(parallelismThreshold), 0, 0, table,
3970     null, transformer, basis, reducer).invoke();
3971 dl 1.137 }
3972 dl 1.119
3973 dl 1.137 /**
3974     * Returns the result of accumulating the given transformation
3975     * of all keys using the given reducer to combine values, and
3976     * the given basis as an identity value.
3977     *
3978 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
3979 jsr166 1.213 * needed for this operation to be executed in parallel
3980 dl 1.137 * @param transformer a function returning the transformation
3981     * for an element
3982     * @param basis the identity (initial default value) for the reduction
3983     * @param reducer a commutative associative combining function
3984     * @return the result of accumulating the given transformation
3985     * of all keys
3986 jsr166 1.220 * @since 1.8
3987 dl 1.137 */
3988 dl 1.210 public long reduceKeysToLong(long parallelismThreshold,
3989     ToLongFunction<? super K> transformer,
3990     long basis,
3991     LongBinaryOperator reducer) {
3992 dl 1.151 if (transformer == null || reducer == null)
3993     throw new NullPointerException();
3994 dl 1.210 return new MapReduceKeysToLongTask<K,V>
3995     (null, batchFor(parallelismThreshold), 0, 0, table,
3996     null, transformer, basis, reducer).invoke();
3997 dl 1.137 }
3998 dl 1.119
3999 dl 1.137 /**
4000     * Returns the result of accumulating the given transformation
4001     * of all keys using the given reducer to combine values, and
4002     * the given basis as an identity value.
4003     *
4004 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
4005 jsr166 1.213 * needed for this operation to be executed in parallel
4006 dl 1.137 * @param transformer a function returning the transformation
4007     * for an element
4008     * @param basis the identity (initial default value) for the reduction
4009     * @param reducer a commutative associative combining function
4010     * @return the result of accumulating the given transformation
4011     * of all keys
4012 jsr166 1.220 * @since 1.8
4013 dl 1.137 */
4014 dl 1.210 public int reduceKeysToInt(long parallelismThreshold,
4015     ToIntFunction<? super K> transformer,
4016     int basis,
4017     IntBinaryOperator reducer) {
4018 dl 1.151 if (transformer == null || reducer == null)
4019     throw new NullPointerException();
4020 dl 1.210 return new MapReduceKeysToIntTask<K,V>
4021     (null, batchFor(parallelismThreshold), 0, 0, table,
4022     null, transformer, basis, reducer).invoke();
4023 dl 1.137 }
4024 dl 1.119
4025 dl 1.137 /**
4026     * Performs the given action for each value.
4027     *
4028 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
4029 jsr166 1.213 * needed for this operation to be executed in parallel
4030 dl 1.137 * @param action the action
4031 jsr166 1.220 * @since 1.8
4032 dl 1.137 */
4033 dl 1.210 public void forEachValue(long parallelismThreshold,
4034     Consumer<? super V> action) {
4035     if (action == null)
4036     throw new NullPointerException();
4037     new ForEachValueTask<K,V>
4038     (null, batchFor(parallelismThreshold), 0, 0, table,
4039     action).invoke();
4040 dl 1.137 }
4041 dl 1.119
4042 dl 1.137 /**
4043     * Performs the given action for each non-null transformation
4044     * of each value.
4045     *
4046 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
4047 jsr166 1.213 * needed for this operation to be executed in parallel
4048 dl 1.137 * @param transformer a function returning the transformation
4049 jsr166 1.169 * for an element, or null if there is no transformation (in
4050 jsr166 1.172 * which case the action is not applied)
4051 jsr166 1.179 * @param action the action
4052 jsr166 1.237 * @param <U> the return type of the transformer
4053 jsr166 1.220 * @since 1.8
4054 dl 1.137 */
4055 dl 1.210 public <U> void forEachValue(long parallelismThreshold,
4056     Function<? super V, ? extends U> transformer,
4057     Consumer<? super U> action) {
4058 dl 1.151 if (transformer == null || action == null)
4059     throw new NullPointerException();
4060 dl 1.210 new ForEachTransformedValueTask<K,V,U>
4061     (null, batchFor(parallelismThreshold), 0, 0, table,
4062     transformer, action).invoke();
4063 dl 1.137 }
4064 dl 1.119
4065 dl 1.137 /**
4066     * Returns a non-null result from applying the given search
4067 dl 1.210 * function on each value, or null if none. Upon success,
4068     * further element processing is suppressed and the results of
4069     * any other parallel invocations of the search function are
4070     * ignored.
4071 dl 1.137 *
4072 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
4073 jsr166 1.213 * needed for this operation to be executed in parallel
4074 dl 1.137 * @param searchFunction a function returning a non-null
4075     * result on success, else null
4076 jsr166 1.237 * @param <U> the return type of the search function
4077 dl 1.137 * @return a non-null result from applying the given search
4078     * function on each value, or null if none
4079 jsr166 1.220 * @since 1.8
4080 dl 1.137 */
4081 dl 1.210 public <U> U searchValues(long parallelismThreshold,
4082     Function<? super V, ? extends U> searchFunction) {
4083 dl 1.151 if (searchFunction == null) throw new NullPointerException();
4084 dl 1.210 return new SearchValuesTask<K,V,U>
4085     (null, batchFor(parallelismThreshold), 0, 0, table,
4086     searchFunction, new AtomicReference<U>()).invoke();
4087 dl 1.137 }
4088 dl 1.119
4089 dl 1.137 /**
4090     * Returns the result of accumulating all values using the
4091     * given reducer to combine values, or null if none.
4092     *
4093 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
4094 jsr166 1.213 * needed for this operation to be executed in parallel
4095 dl 1.137 * @param reducer a commutative associative combining function
4096 jsr166 1.157 * @return the result of accumulating all values
4097 jsr166 1.220 * @since 1.8
4098 dl 1.137 */
4099 dl 1.210 public V reduceValues(long parallelismThreshold,
4100     BiFunction<? super V, ? super V, ? extends V> reducer) {
4101 dl 1.151 if (reducer == null) throw new NullPointerException();
4102 dl 1.210 return new ReduceValuesTask<K,V>
4103     (null, batchFor(parallelismThreshold), 0, 0, table,
4104     null, reducer).invoke();
4105 dl 1.137 }
4106 dl 1.119
4107 dl 1.137 /**
4108     * Returns the result of accumulating the given transformation
4109     * of all values using the given reducer to combine values, or
4110     * null if none.
4111     *
4112 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
4113 jsr166 1.213 * needed for this operation to be executed in parallel
4114 dl 1.137 * @param transformer a function returning the transformation
4115 jsr166 1.169 * for an element, or null if there is no transformation (in
4116 jsr166 1.172 * which case it is not combined)
4117 dl 1.137 * @param reducer a commutative associative combining function
4118 jsr166 1.237 * @param <U> the return type of the transformer
4119 dl 1.137 * @return the result of accumulating the given transformation
4120     * of all values
4121 jsr166 1.220 * @since 1.8
4122 dl 1.137 */
4123 dl 1.210 public <U> U reduceValues(long parallelismThreshold,
4124     Function<? super V, ? extends U> transformer,
4125     BiFunction<? super U, ? super U, ? extends U> reducer) {
4126 dl 1.151 if (transformer == null || reducer == null)
4127     throw new NullPointerException();
4128 dl 1.210 return new MapReduceValuesTask<K,V,U>
4129     (null, batchFor(parallelismThreshold), 0, 0, table,
4130     null, transformer, reducer).invoke();
4131 dl 1.137 }
4132 dl 1.119
4133 dl 1.137 /**
4134     * Returns the result of accumulating the given transformation
4135     * of all values using the given reducer to combine values,
4136     * and the given basis as an identity value.
4137     *
4138 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
4139 jsr166 1.213 * needed for this operation to be executed in parallel
4140 dl 1.137 * @param transformer a function returning the transformation
4141     * for an element
4142     * @param basis the identity (initial default value) for the reduction
4143     * @param reducer a commutative associative combining function
4144     * @return the result of accumulating the given transformation
4145     * of all values
4146 jsr166 1.220 * @since 1.8
4147 dl 1.137 */
4148 dl 1.210 public double reduceValuesToDouble(long parallelismThreshold,
4149     ToDoubleFunction<? super V> transformer,
4150     double basis,
4151     DoubleBinaryOperator reducer) {
4152 dl 1.151 if (transformer == null || reducer == null)
4153     throw new NullPointerException();
4154 dl 1.210 return new MapReduceValuesToDoubleTask<K,V>
4155     (null, batchFor(parallelismThreshold), 0, 0, table,
4156     null, transformer, basis, reducer).invoke();
4157 dl 1.137 }
4158 dl 1.119
4159 dl 1.137 /**
4160     * Returns the result of accumulating the given transformation
4161     * of all values using the given reducer to combine values,
4162     * and the given basis as an identity value.
4163     *
4164 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
4165 jsr166 1.213 * needed for this operation to be executed in parallel
4166 dl 1.137 * @param transformer a function returning the transformation
4167     * for an element
4168     * @param basis the identity (initial default value) for the reduction
4169     * @param reducer a commutative associative combining function
4170     * @return the result of accumulating the given transformation
4171     * of all values
4172 jsr166 1.220 * @since 1.8
4173 dl 1.137 */
4174 dl 1.210 public long reduceValuesToLong(long parallelismThreshold,
4175     ToLongFunction<? super V> transformer,
4176     long basis,
4177     LongBinaryOperator reducer) {
4178 dl 1.151 if (transformer == null || reducer == null)
4179     throw new NullPointerException();
4180 dl 1.210 return new MapReduceValuesToLongTask<K,V>
4181     (null, batchFor(parallelismThreshold), 0, 0, table,
4182     null, transformer, basis, reducer).invoke();
4183 dl 1.137 }
4184 dl 1.119
4185 dl 1.137 /**
4186     * Returns the result of accumulating the given transformation
4187     * of all values using the given reducer to combine values,
4188     * and the given basis as an identity value.
4189     *
4190 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
4191 jsr166 1.213 * needed for this operation to be executed in parallel
4192 dl 1.137 * @param transformer a function returning the transformation
4193     * for an element
4194     * @param basis the identity (initial default value) for the reduction
4195     * @param reducer a commutative associative combining function
4196     * @return the result of accumulating the given transformation
4197     * of all values
4198 jsr166 1.220 * @since 1.8
4199 dl 1.137 */
4200 dl 1.210 public int reduceValuesToInt(long parallelismThreshold,
4201     ToIntFunction<? super V> transformer,
4202     int basis,
4203     IntBinaryOperator reducer) {
4204 dl 1.151 if (transformer == null || reducer == null)
4205     throw new NullPointerException();
4206 dl 1.210 return new MapReduceValuesToIntTask<K,V>
4207     (null, batchFor(parallelismThreshold), 0, 0, table,
4208     null, transformer, basis, reducer).invoke();
4209 dl 1.137 }
4210 dl 1.119
4211 dl 1.137 /**
4212     * Performs the given action for each entry.
4213     *
4214 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
4215 jsr166 1.213 * needed for this operation to be executed in parallel
4216 dl 1.137 * @param action the action
4217 jsr166 1.220 * @since 1.8
4218 dl 1.137 */
4219 dl 1.210 public void forEachEntry(long parallelismThreshold,
4220     Consumer<? super Map.Entry<K,V>> action) {
4221 dl 1.151 if (action == null) throw new NullPointerException();
4222 dl 1.210 new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table,
4223     action).invoke();
4224 dl 1.137 }
4225 dl 1.119
4226 dl 1.137 /**
4227     * Performs the given action for each non-null transformation
4228     * of each entry.
4229     *
4230 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
4231 jsr166 1.213 * needed for this operation to be executed in parallel
4232 dl 1.137 * @param transformer a function returning the transformation
4233 jsr166 1.169 * for an element, or null if there is no transformation (in
4234 jsr166 1.172 * which case the action is not applied)
4235 dl 1.137 * @param action the action
4236 jsr166 1.237 * @param <U> the return type of the transformer
4237 jsr166 1.220 * @since 1.8
4238 dl 1.137 */
4239 dl 1.210 public <U> void forEachEntry(long parallelismThreshold,
4240     Function<Map.Entry<K,V>, ? extends U> transformer,
4241     Consumer<? super U> action) {
4242 dl 1.151 if (transformer == null || action == null)
4243     throw new NullPointerException();
4244 dl 1.210 new ForEachTransformedEntryTask<K,V,U>
4245     (null, batchFor(parallelismThreshold), 0, 0, table,
4246     transformer, action).invoke();
4247 dl 1.137 }
4248 dl 1.119
4249 dl 1.137 /**
4250     * Returns a non-null result from applying the given search
4251 dl 1.210 * function on each entry, or null if none. Upon success,
4252     * further element processing is suppressed and the results of
4253     * any other parallel invocations of the search function are
4254     * ignored.
4255 dl 1.137 *
4256 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
4257 jsr166 1.213 * needed for this operation to be executed in parallel
4258 dl 1.137 * @param searchFunction a function returning a non-null
4259     * result on success, else null
4260 jsr166 1.237 * @param <U> the return type of the search function
4261 dl 1.137 * @return a non-null result from applying the given search
4262     * function on each entry, or null if none
4263 jsr166 1.220 * @since 1.8
4264 dl 1.137 */
4265 dl 1.210 public <U> U searchEntries(long parallelismThreshold,
4266     Function<Map.Entry<K,V>, ? extends U> searchFunction) {
4267 dl 1.151 if (searchFunction == null) throw new NullPointerException();
4268 dl 1.210 return new SearchEntriesTask<K,V,U>
4269     (null, batchFor(parallelismThreshold), 0, 0, table,
4270     searchFunction, new AtomicReference<U>()).invoke();
4271 dl 1.137 }
4272 dl 1.119
4273 dl 1.137 /**
4274     * Returns the result of accumulating all entries using the
4275     * given reducer to combine values, or null if none.
4276     *
4277 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
4278 jsr166 1.213 * needed for this operation to be executed in parallel
4279 dl 1.137 * @param reducer a commutative associative combining function
4280     * @return the result of accumulating all entries
4281 jsr166 1.220 * @since 1.8
4282 dl 1.137 */
4283 dl 1.210 public Map.Entry<K,V> reduceEntries(long parallelismThreshold,
4284     BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4285 dl 1.151 if (reducer == null) throw new NullPointerException();
4286 dl 1.210 return new ReduceEntriesTask<K,V>
4287     (null, batchFor(parallelismThreshold), 0, 0, table,
4288     null, reducer).invoke();
4289 dl 1.137 }
4290 dl 1.119
4291 dl 1.137 /**
4292     * Returns the result of accumulating the given transformation
4293     * of all entries using the given reducer to combine values,
4294     * or null if none.
4295     *
4296 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
4297 jsr166 1.213 * needed for this operation to be executed in parallel
4298 dl 1.137 * @param transformer a function returning the transformation
4299 jsr166 1.169 * for an element, or null if there is no transformation (in
4300 jsr166 1.172 * which case it is not combined)
4301 dl 1.137 * @param reducer a commutative associative combining function
4302 jsr166 1.237 * @param <U> the return type of the transformer
4303 dl 1.137 * @return the result of accumulating the given transformation
4304     * of all entries
4305 jsr166 1.220 * @since 1.8
4306 dl 1.137 */
4307 dl 1.210 public <U> U reduceEntries(long parallelismThreshold,
4308     Function<Map.Entry<K,V>, ? extends U> transformer,
4309     BiFunction<? super U, ? super U, ? extends U> reducer) {
4310 dl 1.151 if (transformer == null || reducer == null)
4311     throw new NullPointerException();
4312 dl 1.210 return new MapReduceEntriesTask<K,V,U>
4313     (null, batchFor(parallelismThreshold), 0, 0, table,
4314     null, transformer, reducer).invoke();
4315 dl 1.137 }
4316 dl 1.119
4317 dl 1.137 /**
4318     * Returns the result of accumulating the given transformation
4319     * of all entries using the given reducer to combine values,
4320     * and the given basis as an identity value.
4321     *
4322 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
4323 jsr166 1.213 * needed for this operation to be executed in parallel
4324 dl 1.137 * @param transformer a function returning the transformation
4325     * for an element
4326     * @param basis the identity (initial default value) for the reduction
4327     * @param reducer a commutative associative combining function
4328     * @return the result of accumulating the given transformation
4329     * of all entries
4330 jsr166 1.220 * @since 1.8
4331 dl 1.137 */
4332 dl 1.210 public double reduceEntriesToDouble(long parallelismThreshold,
4333     ToDoubleFunction<Map.Entry<K,V>> transformer,
4334     double basis,
4335     DoubleBinaryOperator reducer) {
4336 dl 1.151 if (transformer == null || reducer == null)
4337     throw new NullPointerException();
4338 dl 1.210 return new MapReduceEntriesToDoubleTask<K,V>
4339     (null, batchFor(parallelismThreshold), 0, 0, table,
4340     null, transformer, basis, reducer).invoke();
4341 dl 1.137 }
4342 dl 1.119
4343 dl 1.137 /**
4344     * Returns the result of accumulating the given transformation
4345     * of all entries using the given reducer to combine values,
4346     * and the given basis as an identity value.
4347     *
4348 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
4349 jsr166 1.213 * needed for this operation to be executed in parallel
4350 dl 1.137 * @param transformer a function returning the transformation
4351     * for an element
4352     * @param basis the identity (initial default value) for the reduction
4353     * @param reducer a commutative associative combining function
4354 jsr166 1.157 * @return the result of accumulating the given transformation
4355 dl 1.137 * of all entries
4356 jsr166 1.221 * @since 1.8
4357 dl 1.137 */
4358 dl 1.210 public long reduceEntriesToLong(long parallelismThreshold,
4359     ToLongFunction<Map.Entry<K,V>> transformer,
4360     long basis,
4361     LongBinaryOperator reducer) {
4362 dl 1.151 if (transformer == null || reducer == null)
4363     throw new NullPointerException();
4364 dl 1.210 return new MapReduceEntriesToLongTask<K,V>
4365     (null, batchFor(parallelismThreshold), 0, 0, table,
4366     null, transformer, basis, reducer).invoke();
4367 dl 1.137 }
4368 dl 1.119
4369 dl 1.137 /**
4370     * Returns the result of accumulating the given transformation
4371     * of all entries using the given reducer to combine values,
4372     * and the given basis as an identity value.
4373     *
4374 dl 1.210 * @param parallelismThreshold the (estimated) number of elements
4375 jsr166 1.213 * needed for this operation to be executed in parallel
4376 dl 1.137 * @param transformer a function returning the transformation
4377     * for an element
4378     * @param basis the identity (initial default value) for the reduction
4379     * @param reducer a commutative associative combining function
4380     * @return the result of accumulating the given transformation
4381     * of all entries
4382 jsr166 1.221 * @since 1.8
4383 dl 1.137 */
4384 dl 1.210 public int reduceEntriesToInt(long parallelismThreshold,
4385     ToIntFunction<Map.Entry<K,V>> transformer,
4386     int basis,
4387     IntBinaryOperator reducer) {
4388 dl 1.151 if (transformer == null || reducer == null)
4389     throw new NullPointerException();
4390 dl 1.210 return new MapReduceEntriesToIntTask<K,V>
4391     (null, batchFor(parallelismThreshold), 0, 0, table,
4392     null, transformer, basis, reducer).invoke();
4393 dl 1.119 }
4394    
4395 dl 1.209
4396 dl 1.210 /* ----------------Views -------------- */
4397 dl 1.142
4398     /**
4399 dl 1.210 * Base class for views.
4400 dl 1.142 */
4401 dl 1.210 abstract static class CollectionView<K,V,E>
4402     implements Collection<E>, java.io.Serializable {
4403     private static final long serialVersionUID = 7249069246763182397L;
4404     final ConcurrentHashMap<K,V> map;
4405     CollectionView(ConcurrentHashMap<K,V> map) { this.map = map; }
4406    
4407     /**
4408     * Returns the map backing this view.
4409     *
4410     * @return the map backing this view
4411     */
4412     public ConcurrentHashMap<K,V> getMap() { return map; }
4413 dl 1.142
4414 dl 1.210 /**
4415     * Removes all of the elements from this view, by removing all
4416     * the mappings from the map backing this view.
4417 jsr166 1.184 */
4418     public final void clear() { map.clear(); }
4419     public final int size() { return map.size(); }
4420     public final boolean isEmpty() { return map.isEmpty(); }
4421 dl 1.151
4422     // implementations below rely on concrete classes supplying these
4423 jsr166 1.184 // abstract methods
4424     /**
4425 jsr166 1.242 * Returns an iterator over the elements in this collection.
4426     *
4427     * <p>The returned iterator is
4428     * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
4429     *
4430     * @return an iterator over the elements in this collection
4431 jsr166 1.184 */
4432     public abstract Iterator<E> iterator();
4433 jsr166 1.165 public abstract boolean contains(Object o);
4434     public abstract boolean remove(Object o);
4435 dl 1.151
4436 jsr166 1.284 private static final String OOME_MSG = "Required array size too large";
4437 dl 1.142
4438     public final Object[] toArray() {
4439     long sz = map.mappingCount();
4440 jsr166 1.184 if (sz > MAX_ARRAY_SIZE)
4441 jsr166 1.284 throw new OutOfMemoryError(OOME_MSG);
4442 dl 1.142 int n = (int)sz;
4443     Object[] r = new Object[n];
4444     int i = 0;
4445 jsr166 1.184 for (E e : this) {
4446 dl 1.142 if (i == n) {
4447     if (n >= MAX_ARRAY_SIZE)
4448 jsr166 1.284 throw new OutOfMemoryError(OOME_MSG);
4449 dl 1.142 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4450     n = MAX_ARRAY_SIZE;
4451     else
4452     n += (n >>> 1) + 1;
4453     r = Arrays.copyOf(r, n);
4454     }
4455 jsr166 1.184 r[i++] = e;
4456 dl 1.142 }
4457     return (i == n) ? r : Arrays.copyOf(r, i);
4458     }
4459    
4460 dl 1.222 @SuppressWarnings("unchecked")
4461 jsr166 1.184 public final <T> T[] toArray(T[] a) {
4462 dl 1.142 long sz = map.mappingCount();
4463 jsr166 1.184 if (sz > MAX_ARRAY_SIZE)
4464 jsr166 1.284 throw new OutOfMemoryError(OOME_MSG);
4465 dl 1.142 int m = (int)sz;
4466     T[] r = (a.length >= m) ? a :
4467     (T[])java.lang.reflect.Array
4468     .newInstance(a.getClass().getComponentType(), m);
4469     int n = r.length;
4470     int i = 0;
4471 jsr166 1.184 for (E e : this) {
4472 dl 1.142 if (i == n) {
4473     if (n >= MAX_ARRAY_SIZE)
4474 jsr166 1.284 throw new OutOfMemoryError(OOME_MSG);
4475 dl 1.142 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4476     n = MAX_ARRAY_SIZE;
4477     else
4478     n += (n >>> 1) + 1;
4479     r = Arrays.copyOf(r, n);
4480     }
4481 jsr166 1.184 r[i++] = (T)e;
4482 dl 1.142 }
4483     if (a == r && i < n) {
4484     r[i] = null; // null-terminate
4485     return r;
4486     }
4487     return (i == n) ? r : Arrays.copyOf(r, i);
4488     }
4489    
4490 jsr166 1.184 /**
4491     * Returns a string representation of this collection.
4492     * The string representation consists of the string representations
4493     * of the collection's elements in the order they are returned by
4494     * its iterator, enclosed in square brackets ({@code "[]"}).
4495     * Adjacent elements are separated by the characters {@code ", "}
4496     * (comma and space). Elements are converted to strings as by
4497     * {@link String#valueOf(Object)}.
4498     *
4499     * @return a string representation of this collection
4500     */
4501 dl 1.142 public final String toString() {
4502     StringBuilder sb = new StringBuilder();
4503     sb.append('[');
4504 jsr166 1.184 Iterator<E> it = iterator();
4505 dl 1.142 if (it.hasNext()) {
4506     for (;;) {
4507     Object e = it.next();
4508     sb.append(e == this ? "(this Collection)" : e);
4509     if (!it.hasNext())
4510     break;
4511     sb.append(',').append(' ');
4512     }
4513     }
4514     return sb.append(']').toString();
4515     }
4516    
4517     public final boolean containsAll(Collection<?> c) {
4518     if (c != this) {
4519 jsr166 1.184 for (Object e : c) {
4520 dl 1.142 if (e == null || !contains(e))
4521     return false;
4522     }
4523     }
4524     return true;
4525     }
4526    
4527 jsr166 1.295 public boolean removeAll(Collection<?> c) {
4528 dl 1.251 if (c == null) throw new NullPointerException();
4529 dl 1.142 boolean modified = false;
4530 jsr166 1.295 // Use (c instanceof Set) as a hint that lookup in c is as
4531     // efficient as this view
4532     if (c instanceof Set<?> && c.size() > map.table.length) {
4533     for (Iterator<?> it = iterator(); it.hasNext(); ) {
4534     if (c.contains(it.next())) {
4535     it.remove();
4536     modified = true;
4537     }
4538 dl 1.142 }
4539 jsr166 1.295 } else {
4540     for (Object e : c)
4541     modified |= remove(e);
4542 dl 1.142 }
4543     return modified;
4544     }
4545    
4546     public final boolean retainAll(Collection<?> c) {
4547 dl 1.251 if (c == null) throw new NullPointerException();
4548 dl 1.142 boolean modified = false;
4549 jsr166 1.184 for (Iterator<E> it = iterator(); it.hasNext();) {
4550 dl 1.142 if (!c.contains(it.next())) {
4551     it.remove();
4552     modified = true;
4553     }
4554     }
4555     return modified;
4556     }
4557    
4558     }
4559    
4560     /**
4561     * A view of a ConcurrentHashMap as a {@link Set} of keys, in
4562     * which additions may optionally be enabled by mapping to a
4563 jsr166 1.185 * common value. This class cannot be directly instantiated.
4564     * See {@link #keySet() keySet()},
4565     * {@link #keySet(Object) keySet(V)},
4566     * {@link #newKeySet() newKeySet()},
4567     * {@link #newKeySet(int) newKeySet(int)}.
4568 jsr166 1.221 *
4569     * @since 1.8
4570 dl 1.142 */
4571 dl 1.210 public static class KeySetView<K,V> extends CollectionView<K,V,K>
4572     implements Set<K>, java.io.Serializable {
4573 dl 1.142 private static final long serialVersionUID = 7249069246763182397L;
4574     private final V value;
4575 jsr166 1.186 KeySetView(ConcurrentHashMap<K,V> map, V value) { // non-public
4576 dl 1.142 super(map);
4577     this.value = value;
4578     }
4579    
4580     /**
4581     * Returns the default mapped value for additions,
4582     * or {@code null} if additions are not supported.
4583     *
4584     * @return the default mapped value for additions, or {@code null}
4585 jsr166 1.172 * if not supported
4586 dl 1.142 */
4587     public V getMappedValue() { return value; }
4588    
4589 jsr166 1.184 /**
4590     * {@inheritDoc}
4591     * @throws NullPointerException if the specified key is null
4592     */
4593     public boolean contains(Object o) { return map.containsKey(o); }
4594 dl 1.142
4595 jsr166 1.184 /**
4596     * Removes the key from this map view, by removing the key (and its
4597     * corresponding value) from the backing map. This method does
4598     * nothing if the key is not in the map.
4599     *
4600     * @param o the key to be removed from the backing map
4601     * @return {@code true} if the backing map contained the specified key
4602     * @throws NullPointerException if the specified key is null
4603     */
4604     public boolean remove(Object o) { return map.remove(o) != null; }
4605    
4606     /**
4607     * @return an iterator over the keys of the backing map
4608     */
4609 dl 1.210 public Iterator<K> iterator() {
4610     Node<K,V>[] t;
4611     ConcurrentHashMap<K,V> m = map;
4612     int f = (t = m.table) == null ? 0 : t.length;
4613     return new KeyIterator<K,V>(t, f, 0, f, m);
4614     }
4615 dl 1.142
4616     /**
4617 jsr166 1.184 * Adds the specified key to this set view by mapping the key to
4618     * the default mapped value in the backing map, if defined.
4619 dl 1.142 *
4620 jsr166 1.184 * @param e key to be added
4621     * @return {@code true} if this set changed as a result of the call
4622     * @throws NullPointerException if the specified key is null
4623     * @throws UnsupportedOperationException if no default mapped value
4624     * for additions was provided
4625 dl 1.142 */
4626     public boolean add(K e) {
4627     V v;
4628     if ((v = value) == null)
4629     throw new UnsupportedOperationException();
4630 dl 1.222 return map.putVal(e, v, true) == null;
4631 dl 1.142 }
4632 jsr166 1.184
4633     /**
4634     * Adds all of the elements in the specified collection to this set,
4635     * as if by calling {@link #add} on each one.
4636     *
4637     * @param c the elements to be inserted into this set
4638     * @return {@code true} if this set changed as a result of the call
4639     * @throws NullPointerException if the collection or any of its
4640     * elements are {@code null}
4641     * @throws UnsupportedOperationException if no default mapped value
4642     * for additions was provided
4643     */
4644 dl 1.142 public boolean addAll(Collection<? extends K> c) {
4645     boolean added = false;
4646     V v;
4647     if ((v = value) == null)
4648     throw new UnsupportedOperationException();
4649     for (K e : c) {
4650 dl 1.222 if (map.putVal(e, v, true) == null)
4651 dl 1.142 added = true;
4652     }
4653     return added;
4654     }
4655 dl 1.153
4656 dl 1.210 public int hashCode() {
4657     int h = 0;
4658     for (K e : this)
4659     h += e.hashCode();
4660     return h;
4661 dl 1.191 }
4662    
4663 dl 1.210 public boolean equals(Object o) {
4664     Set<?> c;
4665     return ((o instanceof Set) &&
4666     ((c = (Set<?>)o) == this ||
4667     (containsAll(c) && c.containsAll(this))));
4668 dl 1.119 }
4669 jsr166 1.125
4670 dl 1.210 public Spliterator<K> spliterator() {
4671     Node<K,V>[] t;
4672     ConcurrentHashMap<K,V> m = map;
4673     long n = m.sumCount();
4674     int f = (t = m.table) == null ? 0 : t.length;
4675     return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4676 dl 1.119 }
4677    
4678 dl 1.210 public void forEach(Consumer<? super K> action) {
4679     if (action == null) throw new NullPointerException();
4680     Node<K,V>[] t;
4681     if ((t = map.table) != null) {
4682     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4683     for (Node<K,V> p; (p = it.advance()) != null; )
4684 dl 1.222 action.accept(p.key);
4685 dl 1.210 }
4686 dl 1.119 }
4687 dl 1.210 }
4688 dl 1.119
4689 dl 1.210 /**
4690     * A view of a ConcurrentHashMap as a {@link Collection} of
4691     * values, in which additions are disabled. This class cannot be
4692     * directly instantiated. See {@link #values()}.
4693     */
4694     static final class ValuesView<K,V> extends CollectionView<K,V,V>
4695     implements Collection<V>, java.io.Serializable {
4696     private static final long serialVersionUID = 2249069246763182397L;
4697     ValuesView(ConcurrentHashMap<K,V> map) { super(map); }
4698     public final boolean contains(Object o) {
4699     return map.containsValue(o);
4700 dl 1.119 }
4701    
4702 dl 1.210 public final boolean remove(Object o) {
4703     if (o != null) {
4704     for (Iterator<V> it = iterator(); it.hasNext();) {
4705     if (o.equals(it.next())) {
4706     it.remove();
4707     return true;
4708     }
4709     }
4710     }
4711     return false;
4712 dl 1.119 }
4713    
4714 dl 1.210 public final Iterator<V> iterator() {
4715     ConcurrentHashMap<K,V> m = map;
4716     Node<K,V>[] t;
4717     int f = (t = m.table) == null ? 0 : t.length;
4718     return new ValueIterator<K,V>(t, f, 0, f, m);
4719 dl 1.119 }
4720    
4721 dl 1.210 public final boolean add(V e) {
4722     throw new UnsupportedOperationException();
4723     }
4724     public final boolean addAll(Collection<? extends V> c) {
4725     throw new UnsupportedOperationException();
4726 dl 1.119 }
4727    
4728 jsr166 1.295 @Override public boolean removeAll(Collection<?> c) {
4729     if (c == null) throw new NullPointerException();
4730     boolean modified = false;
4731     for (Iterator<V> it = iterator(); it.hasNext();) {
4732     if (c.contains(it.next())) {
4733     it.remove();
4734     modified = true;
4735     }
4736     }
4737     return modified;
4738     }
4739    
4740 dl 1.272 public boolean removeIf(Predicate<? super V> filter) {
4741     return map.removeValueIf(filter);
4742     }
4743    
4744 dl 1.210 public Spliterator<V> spliterator() {
4745     Node<K,V>[] t;
4746     ConcurrentHashMap<K,V> m = map;
4747     long n = m.sumCount();
4748     int f = (t = m.table) == null ? 0 : t.length;
4749     return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4750 dl 1.119 }
4751    
4752 dl 1.210 public void forEach(Consumer<? super V> action) {
4753     if (action == null) throw new NullPointerException();
4754     Node<K,V>[] t;
4755     if ((t = map.table) != null) {
4756     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4757     for (Node<K,V> p; (p = it.advance()) != null; )
4758     action.accept(p.val);
4759     }
4760 dl 1.119 }
4761 dl 1.210 }
4762    
4763     /**
4764     * A view of a ConcurrentHashMap as a {@link Set} of (key, value)
4765     * entries. This class cannot be directly instantiated. See
4766     * {@link #entrySet()}.
4767     */
4768     static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>>
4769     implements Set<Map.Entry<K,V>>, java.io.Serializable {
4770     private static final long serialVersionUID = 2249069246763182397L;
4771     EntrySetView(ConcurrentHashMap<K,V> map) { super(map); }
4772 dl 1.119
4773 dl 1.210 public boolean contains(Object o) {
4774     Object k, v, r; Map.Entry<?,?> e;
4775     return ((o instanceof Map.Entry) &&
4776     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4777     (r = map.get(k)) != null &&
4778     (v = e.getValue()) != null &&
4779     (v == r || v.equals(r)));
4780 dl 1.119 }
4781    
4782 dl 1.210 public boolean remove(Object o) {
4783     Object k, v; Map.Entry<?,?> e;
4784     return ((o instanceof Map.Entry) &&
4785     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4786     (v = e.getValue()) != null &&
4787     map.remove(k, v));
4788 dl 1.119 }
4789    
4790     /**
4791 dl 1.210 * @return an iterator over the entries of the backing map
4792 dl 1.119 */
4793 dl 1.210 public Iterator<Map.Entry<K,V>> iterator() {
4794     ConcurrentHashMap<K,V> m = map;
4795     Node<K,V>[] t;
4796     int f = (t = m.table) == null ? 0 : t.length;
4797     return new EntryIterator<K,V>(t, f, 0, f, m);
4798 dl 1.119 }
4799    
4800 dl 1.210 public boolean add(Entry<K,V> e) {
4801 dl 1.222 return map.putVal(e.getKey(), e.getValue(), false) == null;
4802 dl 1.119 }
4803    
4804 dl 1.210 public boolean addAll(Collection<? extends Entry<K,V>> c) {
4805     boolean added = false;
4806     for (Entry<K,V> e : c) {
4807     if (add(e))
4808     added = true;
4809     }
4810     return added;
4811 dl 1.119 }
4812    
4813 jsr166 1.273 public boolean removeIf(Predicate<? super Entry<K,V>> filter) {
4814 dl 1.271 return map.removeEntryIf(filter);
4815     }
4816    
4817 dl 1.210 public final int hashCode() {
4818     int h = 0;
4819     Node<K,V>[] t;
4820     if ((t = map.table) != null) {
4821     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4822     for (Node<K,V> p; (p = it.advance()) != null; ) {
4823     h += p.hashCode();
4824     }
4825     }
4826     return h;
4827 dl 1.119 }
4828    
4829 dl 1.210 public final boolean equals(Object o) {
4830     Set<?> c;
4831     return ((o instanceof Set) &&
4832     ((c = (Set<?>)o) == this ||
4833     (containsAll(c) && c.containsAll(this))));
4834 dl 1.119 }
4835    
4836 dl 1.210 public Spliterator<Map.Entry<K,V>> spliterator() {
4837     Node<K,V>[] t;
4838     ConcurrentHashMap<K,V> m = map;
4839     long n = m.sumCount();
4840     int f = (t = m.table) == null ? 0 : t.length;
4841     return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m);
4842 dl 1.119 }
4843    
4844 dl 1.210 public void forEach(Consumer<? super Map.Entry<K,V>> action) {
4845     if (action == null) throw new NullPointerException();
4846     Node<K,V>[] t;
4847     if ((t = map.table) != null) {
4848     Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4849     for (Node<K,V> p; (p = it.advance()) != null; )
4850 dl 1.222 action.accept(new MapEntry<K,V>(p.key, p.val, map));
4851 dl 1.210 }
4852 dl 1.119 }
4853    
4854 dl 1.210 }
4855    
4856     // -------------------------------------------------------
4857 dl 1.119
4858 dl 1.210 /**
4859     * Base class for bulk tasks. Repeats some fields and code from
4860     * class Traverser, because we need to subclass CountedCompleter.
4861     */
4862 dl 1.243 @SuppressWarnings("serial")
4863 jsr166 1.211 abstract static class BulkTask<K,V,R> extends CountedCompleter<R> {
4864 dl 1.210 Node<K,V>[] tab; // same as Traverser
4865     Node<K,V> next;
4866 dl 1.246 TableStack<K,V> stack, spare;
4867 dl 1.210 int index;
4868     int baseIndex;
4869     int baseLimit;
4870     final int baseSize;
4871     int batch; // split control
4872    
4873     BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) {
4874     super(par);
4875     this.batch = b;
4876     this.index = this.baseIndex = i;
4877     if ((this.tab = t) == null)
4878     this.baseSize = this.baseLimit = 0;
4879     else if (par == null)
4880     this.baseSize = this.baseLimit = t.length;
4881     else {
4882     this.baseLimit = f;
4883     this.baseSize = par.baseSize;
4884     }
4885 dl 1.119 }
4886    
4887     /**
4888 jsr166 1.285 * Same as Traverser version.
4889 dl 1.119 */
4890 dl 1.210 final Node<K,V> advance() {
4891     Node<K,V> e;
4892     if ((e = next) != null)
4893     e = e.next;
4894     for (;;) {
4895 dl 1.246 Node<K,V>[] t; int i, n;
4896 dl 1.210 if (e != null)
4897     return next = e;
4898     if (baseIndex >= baseLimit || (t = tab) == null ||
4899     (n = t.length) <= (i = index) || i < 0)
4900     return next = null;
4901 dl 1.246 if ((e = tabAt(t, i)) != null && e.hash < 0) {
4902 dl 1.222 if (e instanceof ForwardingNode) {
4903     tab = ((ForwardingNode<K,V>)e).nextTable;
4904 dl 1.210 e = null;
4905 dl 1.246 pushState(t, i, n);
4906 dl 1.210 continue;
4907     }
4908 dl 1.222 else if (e instanceof TreeBin)
4909     e = ((TreeBin<K,V>)e).first;
4910     else
4911     e = null;
4912 dl 1.210 }
4913 dl 1.246 if (stack != null)
4914     recoverState(n);
4915     else if ((index = i + baseSize) >= n)
4916     index = ++baseIndex;
4917     }
4918     }
4919    
4920     private void pushState(Node<K,V>[] t, int i, int n) {
4921     TableStack<K,V> s = spare;
4922     if (s != null)
4923     spare = s.next;
4924     else
4925     s = new TableStack<K,V>();
4926     s.tab = t;
4927     s.length = n;
4928     s.index = i;
4929     s.next = stack;
4930     stack = s;
4931     }
4932    
4933     private void recoverState(int n) {
4934     TableStack<K,V> s; int len;
4935     while ((s = stack) != null && (index += (len = s.length)) >= n) {
4936     n = len;
4937     index = s.index;
4938     tab = s.tab;
4939     s.tab = null;
4940     TableStack<K,V> next = s.next;
4941     s.next = spare; // save for reuse
4942     stack = next;
4943     spare = s;
4944 dl 1.210 }
4945 dl 1.246 if (s == null && (index += baseSize) >= n)
4946     index = ++baseIndex;
4947 dl 1.119 }
4948     }
4949    
4950     /*
4951     * Task classes. Coded in a regular but ugly format/style to
4952     * simplify checks that each variant differs in the right way from
4953 dl 1.149 * others. The null screenings exist because compilers cannot tell
4954     * that we've already null-checked task arguments, so we force
4955     * simplest hoisted bypass to help avoid convoluted traps.
4956 dl 1.119 */
4957 dl 1.222 @SuppressWarnings("serial")
4958 dl 1.210 static final class ForEachKeyTask<K,V>
4959     extends BulkTask<K,V,Void> {
4960 dl 1.171 final Consumer<? super K> action;
4961 dl 1.119 ForEachKeyTask
4962 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4963 dl 1.171 Consumer<? super K> action) {
4964 dl 1.210 super(p, b, i, f, t);
4965 dl 1.119 this.action = action;
4966     }
4967 jsr166 1.168 public final void compute() {
4968 dl 1.171 final Consumer<? super K> action;
4969 dl 1.149 if ((action = this.action) != null) {
4970 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
4971     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4972     addToPendingCount(1);
4973     new ForEachKeyTask<K,V>
4974     (this, batch >>>= 1, baseLimit = h, f, tab,
4975     action).fork();
4976     }
4977     for (Node<K,V> p; (p = advance()) != null;)
4978 dl 1.222 action.accept(p.key);
4979 dl 1.149 propagateCompletion();
4980     }
4981 dl 1.119 }
4982     }
4983    
4984 dl 1.222 @SuppressWarnings("serial")
4985 dl 1.210 static final class ForEachValueTask<K,V>
4986     extends BulkTask<K,V,Void> {
4987 dl 1.171 final Consumer<? super V> action;
4988 dl 1.119 ForEachValueTask
4989 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4990 dl 1.171 Consumer<? super V> action) {
4991 dl 1.210 super(p, b, i, f, t);
4992 dl 1.119 this.action = action;
4993     }
4994 jsr166 1.168 public final void compute() {
4995 dl 1.171 final Consumer<? super V> action;
4996 dl 1.149 if ((action = this.action) != null) {
4997 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
4998     (h = ((f = baseLimit) + i) >>> 1) > i;) {
4999     addToPendingCount(1);
5000     new ForEachValueTask<K,V>
5001     (this, batch >>>= 1, baseLimit = h, f, tab,
5002     action).fork();
5003     }
5004     for (Node<K,V> p; (p = advance()) != null;)
5005     action.accept(p.val);
5006 dl 1.149 propagateCompletion();
5007     }
5008 dl 1.119 }
5009     }
5010    
5011 dl 1.222 @SuppressWarnings("serial")
5012 dl 1.210 static final class ForEachEntryTask<K,V>
5013     extends BulkTask<K,V,Void> {
5014 dl 1.171 final Consumer<? super Entry<K,V>> action;
5015 dl 1.119 ForEachEntryTask
5016 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5017 dl 1.171 Consumer<? super Entry<K,V>> action) {
5018 dl 1.210 super(p, b, i, f, t);
5019 dl 1.119 this.action = action;
5020     }
5021 jsr166 1.168 public final void compute() {
5022 dl 1.171 final Consumer<? super Entry<K,V>> action;
5023 dl 1.149 if ((action = this.action) != null) {
5024 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
5025     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5026     addToPendingCount(1);
5027     new ForEachEntryTask<K,V>
5028     (this, batch >>>= 1, baseLimit = h, f, tab,
5029     action).fork();
5030     }
5031     for (Node<K,V> p; (p = advance()) != null; )
5032     action.accept(p);
5033 dl 1.149 propagateCompletion();
5034     }
5035 dl 1.119 }
5036     }
5037    
5038 dl 1.222 @SuppressWarnings("serial")
5039 dl 1.210 static final class ForEachMappingTask<K,V>
5040     extends BulkTask<K,V,Void> {
5041 dl 1.171 final BiConsumer<? super K, ? super V> action;
5042 dl 1.119 ForEachMappingTask
5043 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5044 dl 1.171 BiConsumer<? super K,? super V> action) {
5045 dl 1.210 super(p, b, i, f, t);
5046 dl 1.119 this.action = action;
5047     }
5048 jsr166 1.168 public final void compute() {
5049 dl 1.171 final BiConsumer<? super K, ? super V> action;
5050 dl 1.149 if ((action = this.action) != null) {
5051 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
5052     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5053     addToPendingCount(1);
5054     new ForEachMappingTask<K,V>
5055     (this, batch >>>= 1, baseLimit = h, f, tab,
5056     action).fork();
5057     }
5058     for (Node<K,V> p; (p = advance()) != null; )
5059 dl 1.222 action.accept(p.key, p.val);
5060 dl 1.149 propagateCompletion();
5061     }
5062 dl 1.119 }
5063     }
5064    
5065 dl 1.222 @SuppressWarnings("serial")
5066 dl 1.210 static final class ForEachTransformedKeyTask<K,V,U>
5067     extends BulkTask<K,V,Void> {
5068 dl 1.153 final Function<? super K, ? extends U> transformer;
5069 dl 1.171 final Consumer<? super U> action;
5070 dl 1.119 ForEachTransformedKeyTask
5071 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5072 dl 1.171 Function<? super K, ? extends U> transformer, Consumer<? super U> action) {
5073 dl 1.210 super(p, b, i, f, t);
5074 dl 1.146 this.transformer = transformer; this.action = action;
5075     }
5076 jsr166 1.168 public final void compute() {
5077 dl 1.153 final Function<? super K, ? extends U> transformer;
5078 dl 1.171 final Consumer<? super U> action;
5079 dl 1.149 if ((transformer = this.transformer) != null &&
5080     (action = this.action) != null) {
5081 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
5082     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5083     addToPendingCount(1);
5084 dl 1.149 new ForEachTransformedKeyTask<K,V,U>
5085 dl 1.210 (this, batch >>>= 1, baseLimit = h, f, tab,
5086     transformer, action).fork();
5087     }
5088     for (Node<K,V> p; (p = advance()) != null; ) {
5089     U u;
5090 dl 1.222 if ((u = transformer.apply(p.key)) != null)
5091 dl 1.153 action.accept(u);
5092 dl 1.149 }
5093     propagateCompletion();
5094 dl 1.119 }
5095     }
5096     }
5097    
5098 dl 1.222 @SuppressWarnings("serial")
5099 dl 1.210 static final class ForEachTransformedValueTask<K,V,U>
5100     extends BulkTask<K,V,Void> {
5101 dl 1.153 final Function<? super V, ? extends U> transformer;
5102 dl 1.171 final Consumer<? super U> action;
5103 dl 1.119 ForEachTransformedValueTask
5104 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5105 dl 1.171 Function<? super V, ? extends U> transformer, Consumer<? super U> action) {
5106 dl 1.210 super(p, b, i, f, t);
5107 dl 1.146 this.transformer = transformer; this.action = action;
5108     }
5109 jsr166 1.168 public final void compute() {
5110 dl 1.153 final Function<? super V, ? extends U> transformer;
5111 dl 1.171 final Consumer<? super U> action;
5112 dl 1.149 if ((transformer = this.transformer) != null &&
5113     (action = this.action) != null) {
5114 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
5115     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5116     addToPendingCount(1);
5117 dl 1.149 new ForEachTransformedValueTask<K,V,U>
5118 dl 1.210 (this, batch >>>= 1, baseLimit = h, f, tab,
5119     transformer, action).fork();
5120     }
5121     for (Node<K,V> p; (p = advance()) != null; ) {
5122     U u;
5123     if ((u = transformer.apply(p.val)) != null)
5124 dl 1.153 action.accept(u);
5125 dl 1.149 }
5126     propagateCompletion();
5127 dl 1.119 }
5128     }
5129 tim 1.1 }
5130    
5131 dl 1.222 @SuppressWarnings("serial")
5132 dl 1.210 static final class ForEachTransformedEntryTask<K,V,U>
5133     extends BulkTask<K,V,Void> {
5134 dl 1.153 final Function<Map.Entry<K,V>, ? extends U> transformer;
5135 dl 1.171 final Consumer<? super U> action;
5136 dl 1.119 ForEachTransformedEntryTask
5137 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5138 dl 1.171 Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action) {
5139 dl 1.210 super(p, b, i, f, t);
5140 dl 1.146 this.transformer = transformer; this.action = action;
5141     }
5142 jsr166 1.168 public final void compute() {
5143 dl 1.153 final Function<Map.Entry<K,V>, ? extends U> transformer;
5144 dl 1.171 final Consumer<? super U> action;
5145 dl 1.149 if ((transformer = this.transformer) != null &&
5146     (action = this.action) != null) {
5147 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
5148     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5149     addToPendingCount(1);
5150 dl 1.149 new ForEachTransformedEntryTask<K,V,U>
5151 dl 1.210 (this, batch >>>= 1, baseLimit = h, f, tab,
5152     transformer, action).fork();
5153     }
5154     for (Node<K,V> p; (p = advance()) != null; ) {
5155     U u;
5156     if ((u = transformer.apply(p)) != null)
5157 dl 1.153 action.accept(u);
5158 dl 1.149 }
5159     propagateCompletion();
5160 dl 1.119 }
5161     }
5162 tim 1.1 }
5163    
5164 dl 1.222 @SuppressWarnings("serial")
5165 dl 1.210 static final class ForEachTransformedMappingTask<K,V,U>
5166     extends BulkTask<K,V,Void> {
5167 dl 1.153 final BiFunction<? super K, ? super V, ? extends U> transformer;
5168 dl 1.171 final Consumer<? super U> action;
5169 dl 1.119 ForEachTransformedMappingTask
5170 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5171 dl 1.153 BiFunction<? super K, ? super V, ? extends U> transformer,
5172 dl 1.171 Consumer<? super U> action) {
5173 dl 1.210 super(p, b, i, f, t);
5174 dl 1.146 this.transformer = transformer; this.action = action;
5175 dl 1.119 }
5176 jsr166 1.168 public final void compute() {
5177 dl 1.153 final BiFunction<? super K, ? super V, ? extends U> transformer;
5178 dl 1.171 final Consumer<? super U> action;
5179 dl 1.149 if ((transformer = this.transformer) != null &&
5180     (action = this.action) != null) {
5181 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
5182     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5183     addToPendingCount(1);
5184 dl 1.149 new ForEachTransformedMappingTask<K,V,U>
5185 dl 1.210 (this, batch >>>= 1, baseLimit = h, f, tab,
5186     transformer, action).fork();
5187     }
5188     for (Node<K,V> p; (p = advance()) != null; ) {
5189     U u;
5190 dl 1.222 if ((u = transformer.apply(p.key, p.val)) != null)
5191 dl 1.153 action.accept(u);
5192 dl 1.149 }
5193     propagateCompletion();
5194 dl 1.119 }
5195     }
5196 tim 1.1 }
5197    
5198 dl 1.222 @SuppressWarnings("serial")
5199 dl 1.210 static final class SearchKeysTask<K,V,U>
5200     extends BulkTask<K,V,U> {
5201 dl 1.153 final Function<? super K, ? extends U> searchFunction;
5202 dl 1.119 final AtomicReference<U> result;
5203     SearchKeysTask
5204 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5205 dl 1.153 Function<? super K, ? extends U> searchFunction,
5206 dl 1.119 AtomicReference<U> result) {
5207 dl 1.210 super(p, b, i, f, t);
5208 dl 1.119 this.searchFunction = searchFunction; this.result = result;
5209     }
5210 dl 1.146 public final U getRawResult() { return result.get(); }
5211 jsr166 1.168 public final void compute() {
5212 dl 1.153 final Function<? super K, ? extends U> searchFunction;
5213 dl 1.146 final AtomicReference<U> result;
5214 dl 1.149 if ((searchFunction = this.searchFunction) != null &&
5215     (result = this.result) != null) {
5216 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
5217     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5218 dl 1.149 if (result.get() != null)
5219     return;
5220 dl 1.210 addToPendingCount(1);
5221 dl 1.149 new SearchKeysTask<K,V,U>
5222 dl 1.210 (this, batch >>>= 1, baseLimit = h, f, tab,
5223     searchFunction, result).fork();
5224 dl 1.128 }
5225 dl 1.149 while (result.get() == null) {
5226 dl 1.210 U u;
5227     Node<K,V> p;
5228     if ((p = advance()) == null) {
5229 dl 1.149 propagateCompletion();
5230     break;
5231     }
5232 dl 1.222 if ((u = searchFunction.apply(p.key)) != null) {
5233 dl 1.149 if (result.compareAndSet(null, u))
5234     quietlyCompleteRoot();
5235     break;
5236     }
5237 dl 1.119 }
5238     }
5239     }
5240 tim 1.1 }
5241    
5242 dl 1.222 @SuppressWarnings("serial")
5243 dl 1.210 static final class SearchValuesTask<K,V,U>
5244     extends BulkTask<K,V,U> {
5245 dl 1.153 final Function<? super V, ? extends U> searchFunction;
5246 dl 1.119 final AtomicReference<U> result;
5247     SearchValuesTask
5248 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5249 dl 1.153 Function<? super V, ? extends U> searchFunction,
5250 dl 1.119 AtomicReference<U> result) {
5251 dl 1.210 super(p, b, i, f, t);
5252 dl 1.119 this.searchFunction = searchFunction; this.result = result;
5253     }
5254 dl 1.146 public final U getRawResult() { return result.get(); }
5255 jsr166 1.168 public final void compute() {
5256 dl 1.153 final Function<? super V, ? extends U> searchFunction;
5257 dl 1.146 final AtomicReference<U> result;
5258 dl 1.149 if ((searchFunction = this.searchFunction) != null &&
5259     (result = this.result) != null) {
5260 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
5261     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5262 dl 1.149 if (result.get() != null)
5263     return;
5264 dl 1.210 addToPendingCount(1);
5265 dl 1.149 new SearchValuesTask<K,V,U>
5266 dl 1.210 (this, batch >>>= 1, baseLimit = h, f, tab,
5267     searchFunction, result).fork();
5268 dl 1.128 }
5269 dl 1.149 while (result.get() == null) {
5270 dl 1.210 U u;
5271     Node<K,V> p;
5272     if ((p = advance()) == null) {
5273 dl 1.149 propagateCompletion();
5274     break;
5275     }
5276 dl 1.210 if ((u = searchFunction.apply(p.val)) != null) {
5277 dl 1.149 if (result.compareAndSet(null, u))
5278     quietlyCompleteRoot();
5279     break;
5280     }
5281 dl 1.119 }
5282     }
5283     }
5284     }
5285 tim 1.11
5286 dl 1.222 @SuppressWarnings("serial")
5287 dl 1.210 static final class SearchEntriesTask<K,V,U>
5288     extends BulkTask<K,V,U> {
5289 dl 1.153 final Function<Entry<K,V>, ? extends U> searchFunction;
5290 dl 1.119 final AtomicReference<U> result;
5291     SearchEntriesTask
5292 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5293 dl 1.153 Function<Entry<K,V>, ? extends U> searchFunction,
5294 dl 1.119 AtomicReference<U> result) {
5295 dl 1.210 super(p, b, i, f, t);
5296 dl 1.119 this.searchFunction = searchFunction; this.result = result;
5297     }
5298 dl 1.146 public final U getRawResult() { return result.get(); }
5299 jsr166 1.168 public final void compute() {
5300 dl 1.153 final Function<Entry<K,V>, ? extends U> searchFunction;
5301 dl 1.146 final AtomicReference<U> result;
5302 dl 1.149 if ((searchFunction = this.searchFunction) != null &&
5303     (result = this.result) != null) {
5304 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
5305     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5306 dl 1.149 if (result.get() != null)
5307     return;
5308 dl 1.210 addToPendingCount(1);
5309 dl 1.149 new SearchEntriesTask<K,V,U>
5310 dl 1.210 (this, batch >>>= 1, baseLimit = h, f, tab,
5311     searchFunction, result).fork();
5312 dl 1.128 }
5313 dl 1.149 while (result.get() == null) {
5314 dl 1.210 U u;
5315     Node<K,V> p;
5316     if ((p = advance()) == null) {
5317 dl 1.149 propagateCompletion();
5318     break;
5319     }
5320 dl 1.210 if ((u = searchFunction.apply(p)) != null) {
5321 dl 1.149 if (result.compareAndSet(null, u))
5322     quietlyCompleteRoot();
5323     return;
5324     }
5325 dl 1.119 }
5326     }
5327     }
5328     }
5329 tim 1.1
5330 dl 1.222 @SuppressWarnings("serial")
5331 dl 1.210 static final class SearchMappingsTask<K,V,U>
5332     extends BulkTask<K,V,U> {
5333 dl 1.153 final BiFunction<? super K, ? super V, ? extends U> searchFunction;
5334 dl 1.119 final AtomicReference<U> result;
5335     SearchMappingsTask
5336 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5337 dl 1.153 BiFunction<? super K, ? super V, ? extends U> searchFunction,
5338 dl 1.119 AtomicReference<U> result) {
5339 dl 1.210 super(p, b, i, f, t);
5340 dl 1.119 this.searchFunction = searchFunction; this.result = result;
5341     }
5342 dl 1.146 public final U getRawResult() { return result.get(); }
5343 jsr166 1.168 public final void compute() {
5344 dl 1.153 final BiFunction<? super K, ? super V, ? extends U> searchFunction;
5345 dl 1.146 final AtomicReference<U> result;
5346 dl 1.149 if ((searchFunction = this.searchFunction) != null &&
5347     (result = this.result) != null) {
5348 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
5349     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5350 dl 1.149 if (result.get() != null)
5351     return;
5352 dl 1.210 addToPendingCount(1);
5353 dl 1.149 new SearchMappingsTask<K,V,U>
5354 dl 1.210 (this, batch >>>= 1, baseLimit = h, f, tab,
5355     searchFunction, result).fork();
5356 dl 1.128 }
5357 dl 1.149 while (result.get() == null) {
5358 dl 1.210 U u;
5359     Node<K,V> p;
5360     if ((p = advance()) == null) {
5361 dl 1.149 propagateCompletion();
5362     break;
5363     }
5364 dl 1.222 if ((u = searchFunction.apply(p.key, p.val)) != null) {
5365 dl 1.149 if (result.compareAndSet(null, u))
5366     quietlyCompleteRoot();
5367     break;
5368     }
5369 dl 1.119 }
5370     }
5371 tim 1.1 }
5372 dl 1.119 }
5373 tim 1.1
5374 dl 1.222 @SuppressWarnings("serial")
5375 dl 1.210 static final class ReduceKeysTask<K,V>
5376     extends BulkTask<K,V,K> {
5377 dl 1.153 final BiFunction<? super K, ? super K, ? extends K> reducer;
5378 dl 1.119 K result;
5379 dl 1.128 ReduceKeysTask<K,V> rights, nextRight;
5380 dl 1.119 ReduceKeysTask
5381 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5382 dl 1.128 ReduceKeysTask<K,V> nextRight,
5383 dl 1.153 BiFunction<? super K, ? super K, ? extends K> reducer) {
5384 dl 1.210 super(p, b, i, f, t); this.nextRight = nextRight;
5385 dl 1.119 this.reducer = reducer;
5386     }
5387 dl 1.146 public final K getRawResult() { return result; }
5388 dl 1.210 public final void compute() {
5389 dl 1.153 final BiFunction<? super K, ? super K, ? extends K> reducer;
5390 dl 1.149 if ((reducer = this.reducer) != null) {
5391 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
5392     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5393     addToPendingCount(1);
5394 dl 1.149 (rights = new ReduceKeysTask<K,V>
5395 dl 1.210 (this, batch >>>= 1, baseLimit = h, f, tab,
5396     rights, reducer)).fork();
5397     }
5398     K r = null;
5399     for (Node<K,V> p; (p = advance()) != null; ) {
5400 dl 1.222 K u = p.key;
5401 jsr166 1.154 r = (r == null) ? u : u == null ? r : reducer.apply(r, u);
5402 dl 1.149 }
5403     result = r;
5404     CountedCompleter<?> c;
5405     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5406 dl 1.246 @SuppressWarnings("unchecked")
5407     ReduceKeysTask<K,V>
5408 dl 1.149 t = (ReduceKeysTask<K,V>)c,
5409     s = t.rights;
5410     while (s != null) {
5411     K tr, sr;
5412     if ((sr = s.result) != null)
5413     t.result = (((tr = t.result) == null) ? sr :
5414     reducer.apply(tr, sr));
5415     s = t.rights = s.nextRight;
5416     }
5417 dl 1.99 }
5418 dl 1.138 }
5419 tim 1.1 }
5420 dl 1.119 }
5421 tim 1.1
5422 dl 1.222 @SuppressWarnings("serial")
5423 dl 1.210 static final class ReduceValuesTask<K,V>
5424     extends BulkTask<K,V,V> {
5425 dl 1.153 final BiFunction<? super V, ? super V, ? extends V> reducer;
5426 dl 1.119 V result;
5427 dl 1.128 ReduceValuesTask<K,V> rights, nextRight;
5428 dl 1.119 ReduceValuesTask
5429 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5430 dl 1.128 ReduceValuesTask<K,V> nextRight,
5431 dl 1.153 BiFunction<? super V, ? super V, ? extends V> reducer) {
5432 dl 1.210 super(p, b, i, f, t); this.nextRight = nextRight;
5433 dl 1.119 this.reducer = reducer;
5434     }
5435 dl 1.146 public final V getRawResult() { return result; }
5436 dl 1.210 public final void compute() {
5437 dl 1.153 final BiFunction<? super V, ? super V, ? extends V> reducer;
5438 dl 1.149 if ((reducer = this.reducer) != null) {
5439 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
5440     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5441     addToPendingCount(1);
5442 dl 1.149 (rights = new ReduceValuesTask<K,V>
5443 dl 1.210 (this, batch >>>= 1, baseLimit = h, f, tab,
5444     rights, reducer)).fork();
5445     }
5446     V r = null;
5447     for (Node<K,V> p; (p = advance()) != null; ) {
5448     V v = p.val;
5449 dl 1.156 r = (r == null) ? v : reducer.apply(r, v);
5450 dl 1.210 }
5451 dl 1.149 result = r;
5452     CountedCompleter<?> c;
5453     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5454 dl 1.246 @SuppressWarnings("unchecked")
5455     ReduceValuesTask<K,V>
5456 dl 1.149 t = (ReduceValuesTask<K,V>)c,
5457     s = t.rights;
5458     while (s != null) {
5459     V tr, sr;
5460     if ((sr = s.result) != null)
5461     t.result = (((tr = t.result) == null) ? sr :
5462     reducer.apply(tr, sr));
5463     s = t.rights = s.nextRight;
5464     }
5465 dl 1.119 }
5466     }
5467 tim 1.1 }
5468 dl 1.119 }
5469 tim 1.1
5470 dl 1.222 @SuppressWarnings("serial")
5471 dl 1.210 static final class ReduceEntriesTask<K,V>
5472     extends BulkTask<K,V,Map.Entry<K,V>> {
5473 dl 1.153 final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5474 dl 1.119 Map.Entry<K,V> result;
5475 dl 1.128 ReduceEntriesTask<K,V> rights, nextRight;
5476 dl 1.119 ReduceEntriesTask
5477 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5478 dl 1.130 ReduceEntriesTask<K,V> nextRight,
5479 dl 1.153 BiFunction<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5480 dl 1.210 super(p, b, i, f, t); this.nextRight = nextRight;
5481 dl 1.119 this.reducer = reducer;
5482     }
5483 dl 1.146 public final Map.Entry<K,V> getRawResult() { return result; }
5484 dl 1.210 public final void compute() {
5485 dl 1.153 final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5486 dl 1.149 if ((reducer = this.reducer) != null) {
5487 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
5488     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5489     addToPendingCount(1);
5490 dl 1.149 (rights = new ReduceEntriesTask<K,V>
5491 dl 1.210 (this, batch >>>= 1, baseLimit = h, f, tab,
5492     rights, reducer)).fork();
5493     }
5494 dl 1.149 Map.Entry<K,V> r = null;
5495 dl 1.210 for (Node<K,V> p; (p = advance()) != null; )
5496     r = (r == null) ? p : reducer.apply(r, p);
5497 dl 1.149 result = r;
5498     CountedCompleter<?> c;
5499     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5500 dl 1.246 @SuppressWarnings("unchecked")
5501     ReduceEntriesTask<K,V>
5502 dl 1.149 t = (ReduceEntriesTask<K,V>)c,
5503     s = t.rights;
5504     while (s != null) {
5505     Map.Entry<K,V> tr, sr;
5506     if ((sr = s.result) != null)
5507     t.result = (((tr = t.result) == null) ? sr :
5508     reducer.apply(tr, sr));
5509     s = t.rights = s.nextRight;
5510     }
5511 dl 1.119 }
5512 dl 1.138 }
5513 dl 1.119 }
5514     }
5515 dl 1.99
5516 dl 1.222 @SuppressWarnings("serial")
5517 dl 1.210 static final class MapReduceKeysTask<K,V,U>
5518     extends BulkTask<K,V,U> {
5519 dl 1.153 final Function<? super K, ? extends U> transformer;
5520     final BiFunction<? super U, ? super U, ? extends U> reducer;
5521 dl 1.119 U result;
5522 dl 1.128 MapReduceKeysTask<K,V,U> rights, nextRight;
5523 dl 1.119 MapReduceKeysTask
5524 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5525 dl 1.128 MapReduceKeysTask<K,V,U> nextRight,
5526 dl 1.153 Function<? super K, ? extends U> transformer,
5527     BiFunction<? super U, ? super U, ? extends U> reducer) {
5528 dl 1.210 super(p, b, i, f, t); this.nextRight = nextRight;
5529 dl 1.119 this.transformer = transformer;
5530     this.reducer = reducer;
5531     }
5532 dl 1.146 public final U getRawResult() { return result; }
5533 dl 1.210 public final void compute() {
5534 dl 1.153 final Function<? super K, ? extends U> transformer;
5535     final BiFunction<? super U, ? super U, ? extends U> reducer;
5536 dl 1.149 if ((transformer = this.transformer) != null &&
5537     (reducer = this.reducer) != null) {
5538 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
5539     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5540     addToPendingCount(1);
5541 dl 1.149 (rights = new MapReduceKeysTask<K,V,U>
5542 dl 1.210 (this, batch >>>= 1, baseLimit = h, f, tab,
5543     rights, transformer, reducer)).fork();
5544     }
5545     U r = null;
5546     for (Node<K,V> p; (p = advance()) != null; ) {
5547     U u;
5548 dl 1.222 if ((u = transformer.apply(p.key)) != null)
5549 dl 1.149 r = (r == null) ? u : reducer.apply(r, u);
5550     }
5551     result = r;
5552     CountedCompleter<?> c;
5553     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5554 dl 1.246 @SuppressWarnings("unchecked")
5555     MapReduceKeysTask<K,V,U>
5556 dl 1.149 t = (MapReduceKeysTask<K,V,U>)c,
5557     s = t.rights;
5558     while (s != null) {
5559     U tr, sr;
5560     if ((sr = s.result) != null)
5561     t.result = (((tr = t.result) == null) ? sr :
5562     reducer.apply(tr, sr));
5563     s = t.rights = s.nextRight;
5564     }
5565 dl 1.119 }
5566 dl 1.138 }
5567 tim 1.1 }
5568 dl 1.4 }
5569    
5570 dl 1.222 @SuppressWarnings("serial")
5571 dl 1.210 static final class MapReduceValuesTask<K,V,U>
5572     extends BulkTask<K,V,U> {
5573 dl 1.153 final Function<? super V, ? extends U> transformer;
5574     final BiFunction<? super U, ? super U, ? extends U> reducer;
5575 dl 1.119 U result;
5576 dl 1.128 MapReduceValuesTask<K,V,U> rights, nextRight;
5577 dl 1.119 MapReduceValuesTask
5578 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5579 dl 1.128 MapReduceValuesTask<K,V,U> nextRight,
5580 dl 1.153 Function<? super V, ? extends U> transformer,
5581     BiFunction<? super U, ? super U, ? extends U> reducer) {
5582 dl 1.210 super(p, b, i, f, t); this.nextRight = nextRight;
5583 dl 1.119 this.transformer = transformer;
5584     this.reducer = reducer;
5585     }
5586 dl 1.146 public final U getRawResult() { return result; }
5587 dl 1.210 public final void compute() {
5588 dl 1.153 final Function<? super V, ? extends U> transformer;
5589     final BiFunction<? super U, ? super U, ? extends U> reducer;
5590 dl 1.149 if ((transformer = this.transformer) != null &&
5591     (reducer = this.reducer) != null) {
5592 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
5593     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5594     addToPendingCount(1);
5595 dl 1.149 (rights = new MapReduceValuesTask<K,V,U>
5596 dl 1.210 (this, batch >>>= 1, baseLimit = h, f, tab,
5597     rights, transformer, reducer)).fork();
5598     }
5599     U r = null;
5600     for (Node<K,V> p; (p = advance()) != null; ) {
5601     U u;
5602     if ((u = transformer.apply(p.val)) != null)
5603 dl 1.149 r = (r == null) ? u : reducer.apply(r, u);
5604     }
5605     result = r;
5606     CountedCompleter<?> c;
5607     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5608 dl 1.246 @SuppressWarnings("unchecked")
5609     MapReduceValuesTask<K,V,U>
5610 dl 1.149 t = (MapReduceValuesTask<K,V,U>)c,
5611     s = t.rights;
5612     while (s != null) {
5613     U tr, sr;
5614     if ((sr = s.result) != null)
5615     t.result = (((tr = t.result) == null) ? sr :
5616     reducer.apply(tr, sr));
5617     s = t.rights = s.nextRight;
5618     }
5619 dl 1.119 }
5620     }
5621     }
5622 dl 1.4 }
5623    
5624 dl 1.222 @SuppressWarnings("serial")
5625 dl 1.210 static final class MapReduceEntriesTask<K,V,U>
5626     extends BulkTask<K,V,U> {
5627 dl 1.153 final Function<Map.Entry<K,V>, ? extends U> transformer;
5628     final BiFunction<? super U, ? super U, ? extends U> reducer;
5629 dl 1.119 U result;
5630 dl 1.128 MapReduceEntriesTask<K,V,U> rights, nextRight;
5631 dl 1.119 MapReduceEntriesTask
5632 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5633 dl 1.128 MapReduceEntriesTask<K,V,U> nextRight,
5634 dl 1.153 Function<Map.Entry<K,V>, ? extends U> transformer,
5635     BiFunction<? super U, ? super U, ? extends U> reducer) {
5636 dl 1.210 super(p, b, i, f, t); this.nextRight = nextRight;
5637 dl 1.119 this.transformer = transformer;
5638     this.reducer = reducer;
5639     }
5640 dl 1.146 public final U getRawResult() { return result; }
5641 dl 1.210 public final void compute() {
5642 dl 1.153 final Function<Map.Entry<K,V>, ? extends U> transformer;
5643     final BiFunction<? super U, ? super U, ? extends U> reducer;
5644 dl 1.149 if ((transformer = this.transformer) != null &&
5645     (reducer = this.reducer) != null) {
5646 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
5647     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5648     addToPendingCount(1);
5649 dl 1.149 (rights = new MapReduceEntriesTask<K,V,U>
5650 dl 1.210 (this, batch >>>= 1, baseLimit = h, f, tab,
5651     rights, transformer, reducer)).fork();
5652     }
5653     U r = null;
5654     for (Node<K,V> p; (p = advance()) != null; ) {
5655     U u;
5656     if ((u = transformer.apply(p)) != null)
5657 dl 1.149 r = (r == null) ? u : reducer.apply(r, u);
5658     }
5659     result = r;
5660     CountedCompleter<?> c;
5661     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5662 dl 1.246 @SuppressWarnings("unchecked")
5663     MapReduceEntriesTask<K,V,U>
5664 dl 1.149 t = (MapReduceEntriesTask<K,V,U>)c,
5665     s = t.rights;
5666     while (s != null) {
5667     U tr, sr;
5668     if ((sr = s.result) != null)
5669     t.result = (((tr = t.result) == null) ? sr :
5670     reducer.apply(tr, sr));
5671     s = t.rights = s.nextRight;
5672     }
5673 dl 1.119 }
5674 dl 1.138 }
5675 dl 1.119 }
5676 dl 1.4 }
5677 tim 1.1
5678 dl 1.222 @SuppressWarnings("serial")
5679 dl 1.210 static final class MapReduceMappingsTask<K,V,U>
5680     extends BulkTask<K,V,U> {
5681 dl 1.153 final BiFunction<? super K, ? super V, ? extends U> transformer;
5682     final BiFunction<? super U, ? super U, ? extends U> reducer;
5683 dl 1.119 U result;
5684 dl 1.128 MapReduceMappingsTask<K,V,U> rights, nextRight;
5685 dl 1.119 MapReduceMappingsTask
5686 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5687 dl 1.128 MapReduceMappingsTask<K,V,U> nextRight,
5688 dl 1.153 BiFunction<? super K, ? super V, ? extends U> transformer,
5689     BiFunction<? super U, ? super U, ? extends U> reducer) {
5690 dl 1.210 super(p, b, i, f, t); this.nextRight = nextRight;
5691 dl 1.119 this.transformer = transformer;
5692     this.reducer = reducer;
5693     }
5694 dl 1.146 public final U getRawResult() { return result; }
5695 dl 1.210 public final void compute() {
5696 dl 1.153 final BiFunction<? super K, ? super V, ? extends U> transformer;
5697     final BiFunction<? super U, ? super U, ? extends U> reducer;
5698 dl 1.149 if ((transformer = this.transformer) != null &&
5699     (reducer = this.reducer) != null) {
5700 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
5701     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5702     addToPendingCount(1);
5703 dl 1.149 (rights = new MapReduceMappingsTask<K,V,U>
5704 dl 1.210 (this, batch >>>= 1, baseLimit = h, f, tab,
5705     rights, transformer, reducer)).fork();
5706     }
5707     U r = null;
5708     for (Node<K,V> p; (p = advance()) != null; ) {
5709     U u;
5710 dl 1.222 if ((u = transformer.apply(p.key, p.val)) != null)
5711 dl 1.149 r = (r == null) ? u : reducer.apply(r, u);
5712     }
5713     result = r;
5714     CountedCompleter<?> c;
5715     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5716 dl 1.246 @SuppressWarnings("unchecked")
5717     MapReduceMappingsTask<K,V,U>
5718 dl 1.149 t = (MapReduceMappingsTask<K,V,U>)c,
5719     s = t.rights;
5720     while (s != null) {
5721     U tr, sr;
5722     if ((sr = s.result) != null)
5723     t.result = (((tr = t.result) == null) ? sr :
5724     reducer.apply(tr, sr));
5725     s = t.rights = s.nextRight;
5726     }
5727 dl 1.119 }
5728     }
5729     }
5730     }
5731 jsr166 1.114
5732 dl 1.222 @SuppressWarnings("serial")
5733 dl 1.210 static final class MapReduceKeysToDoubleTask<K,V>
5734     extends BulkTask<K,V,Double> {
5735 dl 1.171 final ToDoubleFunction<? super K> transformer;
5736 dl 1.153 final DoubleBinaryOperator reducer;
5737 dl 1.119 final double basis;
5738     double result;
5739 dl 1.128 MapReduceKeysToDoubleTask<K,V> rights, nextRight;
5740 dl 1.119 MapReduceKeysToDoubleTask
5741 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5742 dl 1.128 MapReduceKeysToDoubleTask<K,V> nextRight,
5743 dl 1.171 ToDoubleFunction<? super K> transformer,
5744 dl 1.119 double basis,
5745 dl 1.153 DoubleBinaryOperator reducer) {
5746 dl 1.210 super(p, b, i, f, t); this.nextRight = nextRight;
5747 dl 1.119 this.transformer = transformer;
5748     this.basis = basis; this.reducer = reducer;
5749     }
5750 dl 1.146 public final Double getRawResult() { return result; }
5751 dl 1.210 public final void compute() {
5752 dl 1.171 final ToDoubleFunction<? super K> transformer;
5753 dl 1.153 final DoubleBinaryOperator reducer;
5754 dl 1.149 if ((transformer = this.transformer) != null &&
5755     (reducer = this.reducer) != null) {
5756     double r = this.basis;
5757 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
5758     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5759     addToPendingCount(1);
5760 dl 1.149 (rights = new MapReduceKeysToDoubleTask<K,V>
5761 dl 1.210 (this, batch >>>= 1, baseLimit = h, f, tab,
5762     rights, transformer, r, reducer)).fork();
5763     }
5764     for (Node<K,V> p; (p = advance()) != null; )
5765 dl 1.222 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key));
5766 dl 1.149 result = r;
5767     CountedCompleter<?> c;
5768     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5769 dl 1.246 @SuppressWarnings("unchecked")
5770     MapReduceKeysToDoubleTask<K,V>
5771 dl 1.149 t = (MapReduceKeysToDoubleTask<K,V>)c,
5772     s = t.rights;
5773     while (s != null) {
5774 dl 1.153 t.result = reducer.applyAsDouble(t.result, s.result);
5775 dl 1.149 s = t.rights = s.nextRight;
5776     }
5777 dl 1.119 }
5778 dl 1.138 }
5779 dl 1.79 }
5780 dl 1.119 }
5781 dl 1.79
5782 dl 1.222 @SuppressWarnings("serial")
5783 dl 1.210 static final class MapReduceValuesToDoubleTask<K,V>
5784     extends BulkTask<K,V,Double> {
5785 dl 1.171 final ToDoubleFunction<? super V> transformer;
5786 dl 1.153 final DoubleBinaryOperator reducer;
5787 dl 1.119 final double basis;
5788     double result;
5789 dl 1.128 MapReduceValuesToDoubleTask<K,V> rights, nextRight;
5790 dl 1.119 MapReduceValuesToDoubleTask
5791 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5792 dl 1.128 MapReduceValuesToDoubleTask<K,V> nextRight,
5793 dl 1.171 ToDoubleFunction<? super V> transformer,
5794 dl 1.119 double basis,
5795 dl 1.153 DoubleBinaryOperator reducer) {
5796 dl 1.210 super(p, b, i, f, t); this.nextRight = nextRight;
5797 dl 1.119 this.transformer = transformer;
5798     this.basis = basis; this.reducer = reducer;
5799     }
5800 dl 1.146 public final Double getRawResult() { return result; }
5801 dl 1.210 public final void compute() {
5802 dl 1.171 final ToDoubleFunction<? super V> transformer;
5803 dl 1.153 final DoubleBinaryOperator reducer;
5804 dl 1.149 if ((transformer = this.transformer) != null &&
5805     (reducer = this.reducer) != null) {
5806     double r = this.basis;
5807 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
5808     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5809     addToPendingCount(1);
5810 dl 1.149 (rights = new MapReduceValuesToDoubleTask<K,V>
5811 dl 1.210 (this, batch >>>= 1, baseLimit = h, f, tab,
5812     rights, transformer, r, reducer)).fork();
5813     }
5814     for (Node<K,V> p; (p = advance()) != null; )
5815     r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.val));
5816 dl 1.149 result = r;
5817     CountedCompleter<?> c;
5818     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5819 dl 1.246 @SuppressWarnings("unchecked")
5820     MapReduceValuesToDoubleTask<K,V>
5821 dl 1.149 t = (MapReduceValuesToDoubleTask<K,V>)c,
5822     s = t.rights;
5823     while (s != null) {
5824 dl 1.153 t.result = reducer.applyAsDouble(t.result, s.result);
5825 dl 1.149 s = t.rights = s.nextRight;
5826     }
5827 dl 1.119 }
5828     }
5829 dl 1.30 }
5830 dl 1.79 }
5831 dl 1.30
5832 dl 1.222 @SuppressWarnings("serial")
5833 dl 1.210 static final class MapReduceEntriesToDoubleTask<K,V>
5834     extends BulkTask<K,V,Double> {
5835 dl 1.171 final ToDoubleFunction<Map.Entry<K,V>> transformer;
5836 dl 1.153 final DoubleBinaryOperator reducer;
5837 dl 1.119 final double basis;
5838     double result;
5839 dl 1.128 MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
5840 dl 1.119 MapReduceEntriesToDoubleTask
5841 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5842 dl 1.128 MapReduceEntriesToDoubleTask<K,V> nextRight,
5843 dl 1.171 ToDoubleFunction<Map.Entry<K,V>> transformer,
5844 dl 1.119 double basis,
5845 dl 1.153 DoubleBinaryOperator reducer) {
5846 dl 1.210 super(p, b, i, f, t); this.nextRight = nextRight;
5847 dl 1.119 this.transformer = transformer;
5848     this.basis = basis; this.reducer = reducer;
5849     }
5850 dl 1.146 public final Double getRawResult() { return result; }
5851 dl 1.210 public final void compute() {
5852 dl 1.171 final ToDoubleFunction<Map.Entry<K,V>> transformer;
5853 dl 1.153 final DoubleBinaryOperator reducer;
5854 dl 1.149 if ((transformer = this.transformer) != null &&
5855     (reducer = this.reducer) != null) {
5856     double r = this.basis;
5857 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
5858     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5859     addToPendingCount(1);
5860 dl 1.149 (rights = new MapReduceEntriesToDoubleTask<K,V>
5861 dl 1.210 (this, batch >>>= 1, baseLimit = h, f, tab,
5862     rights, transformer, r, reducer)).fork();
5863     }
5864     for (Node<K,V> p; (p = advance()) != null; )
5865     r = reducer.applyAsDouble(r, transformer.applyAsDouble(p));
5866 dl 1.149 result = r;
5867     CountedCompleter<?> c;
5868     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5869 dl 1.246 @SuppressWarnings("unchecked")
5870     MapReduceEntriesToDoubleTask<K,V>
5871 dl 1.149 t = (MapReduceEntriesToDoubleTask<K,V>)c,
5872     s = t.rights;
5873     while (s != null) {
5874 dl 1.153 t.result = reducer.applyAsDouble(t.result, s.result);
5875 dl 1.149 s = t.rights = s.nextRight;
5876     }
5877 dl 1.119 }
5878 dl 1.138 }
5879 dl 1.30 }
5880 tim 1.1 }
5881    
5882 dl 1.222 @SuppressWarnings("serial")
5883 dl 1.210 static final class MapReduceMappingsToDoubleTask<K,V>
5884     extends BulkTask<K,V,Double> {
5885 dl 1.171 final ToDoubleBiFunction<? super K, ? super V> transformer;
5886 dl 1.153 final DoubleBinaryOperator reducer;
5887 dl 1.119 final double basis;
5888     double result;
5889 dl 1.128 MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
5890 dl 1.119 MapReduceMappingsToDoubleTask
5891 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5892 dl 1.128 MapReduceMappingsToDoubleTask<K,V> nextRight,
5893 dl 1.171 ToDoubleBiFunction<? super K, ? super V> transformer,
5894 dl 1.119 double basis,
5895 dl 1.153 DoubleBinaryOperator reducer) {
5896 dl 1.210 super(p, b, i, f, t); this.nextRight = nextRight;
5897 dl 1.119 this.transformer = transformer;
5898     this.basis = basis; this.reducer = reducer;
5899     }
5900 dl 1.146 public final Double getRawResult() { return result; }
5901 dl 1.210 public final void compute() {
5902 dl 1.171 final ToDoubleBiFunction<? super K, ? super V> transformer;
5903 dl 1.153 final DoubleBinaryOperator reducer;
5904 dl 1.149 if ((transformer = this.transformer) != null &&
5905     (reducer = this.reducer) != null) {
5906     double r = this.basis;
5907 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
5908     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5909     addToPendingCount(1);
5910 dl 1.149 (rights = new MapReduceMappingsToDoubleTask<K,V>
5911 dl 1.210 (this, batch >>>= 1, baseLimit = h, f, tab,
5912     rights, transformer, r, reducer)).fork();
5913     }
5914     for (Node<K,V> p; (p = advance()) != null; )
5915 dl 1.222 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key, p.val));
5916 dl 1.149 result = r;
5917     CountedCompleter<?> c;
5918     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5919 dl 1.246 @SuppressWarnings("unchecked")
5920     MapReduceMappingsToDoubleTask<K,V>
5921 dl 1.149 t = (MapReduceMappingsToDoubleTask<K,V>)c,
5922     s = t.rights;
5923     while (s != null) {
5924 dl 1.153 t.result = reducer.applyAsDouble(t.result, s.result);
5925 dl 1.149 s = t.rights = s.nextRight;
5926     }
5927 dl 1.119 }
5928     }
5929 dl 1.4 }
5930 dl 1.119 }
5931    
5932 dl 1.222 @SuppressWarnings("serial")
5933 dl 1.210 static final class MapReduceKeysToLongTask<K,V>
5934     extends BulkTask<K,V,Long> {
5935 dl 1.171 final ToLongFunction<? super K> transformer;
5936 dl 1.153 final LongBinaryOperator reducer;
5937 dl 1.119 final long basis;
5938     long result;
5939 dl 1.128 MapReduceKeysToLongTask<K,V> rights, nextRight;
5940 dl 1.119 MapReduceKeysToLongTask
5941 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5942 dl 1.128 MapReduceKeysToLongTask<K,V> nextRight,
5943 dl 1.171 ToLongFunction<? super K> transformer,
5944 dl 1.119 long basis,
5945 dl 1.153 LongBinaryOperator reducer) {
5946 dl 1.210 super(p, b, i, f, t); this.nextRight = nextRight;
5947 dl 1.119 this.transformer = transformer;
5948     this.basis = basis; this.reducer = reducer;
5949     }
5950 dl 1.146 public final Long getRawResult() { return result; }
5951 dl 1.210 public final void compute() {
5952 dl 1.171 final ToLongFunction<? super K> transformer;
5953 dl 1.153 final LongBinaryOperator reducer;
5954 dl 1.149 if ((transformer = this.transformer) != null &&
5955     (reducer = this.reducer) != null) {
5956     long r = this.basis;
5957 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
5958     (h = ((f = baseLimit) + i) >>> 1) > i;) {
5959     addToPendingCount(1);
5960 dl 1.149 (rights = new MapReduceKeysToLongTask<K,V>
5961 dl 1.210 (this, batch >>>= 1, baseLimit = h, f, tab,
5962     rights, transformer, r, reducer)).fork();
5963     }
5964     for (Node<K,V> p; (p = advance()) != null; )
5965 dl 1.222 r = reducer.applyAsLong(r, transformer.applyAsLong(p.key));
5966 dl 1.149 result = r;
5967     CountedCompleter<?> c;
5968     for (c = firstComplete(); c != null; c = c.nextComplete()) {
5969 dl 1.246 @SuppressWarnings("unchecked")
5970     MapReduceKeysToLongTask<K,V>
5971 dl 1.149 t = (MapReduceKeysToLongTask<K,V>)c,
5972     s = t.rights;
5973     while (s != null) {
5974 dl 1.153 t.result = reducer.applyAsLong(t.result, s.result);
5975 dl 1.149 s = t.rights = s.nextRight;
5976     }
5977 dl 1.119 }
5978 dl 1.138 }
5979 dl 1.4 }
5980 dl 1.119 }
5981    
5982 dl 1.222 @SuppressWarnings("serial")
5983 dl 1.210 static final class MapReduceValuesToLongTask<K,V>
5984     extends BulkTask<K,V,Long> {
5985 dl 1.171 final ToLongFunction<? super V> transformer;
5986 dl 1.153 final LongBinaryOperator reducer;
5987 dl 1.119 final long basis;
5988     long result;
5989 dl 1.128 MapReduceValuesToLongTask<K,V> rights, nextRight;
5990 dl 1.119 MapReduceValuesToLongTask
5991 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5992 dl 1.128 MapReduceValuesToLongTask<K,V> nextRight,
5993 dl 1.171 ToLongFunction<? super V> transformer,
5994 dl 1.119 long basis,
5995 dl 1.153 LongBinaryOperator reducer) {
5996 dl 1.210 super(p, b, i, f, t); this.nextRight = nextRight;
5997 dl 1.119 this.transformer = transformer;
5998     this.basis = basis; this.reducer = reducer;
5999     }
6000 dl 1.146 public final Long getRawResult() { return result; }
6001 dl 1.210 public final void compute() {
6002 dl 1.171 final ToLongFunction<? super V> transformer;
6003 dl 1.153 final LongBinaryOperator reducer;
6004 dl 1.149 if ((transformer = this.transformer) != null &&
6005     (reducer = this.reducer) != null) {
6006     long r = this.basis;
6007 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
6008     (h = ((f = baseLimit) + i) >>> 1) > i;) {
6009     addToPendingCount(1);
6010 dl 1.149 (rights = new MapReduceValuesToLongTask<K,V>
6011 dl 1.210 (this, batch >>>= 1, baseLimit = h, f, tab,
6012     rights, transformer, r, reducer)).fork();
6013     }
6014     for (Node<K,V> p; (p = advance()) != null; )
6015     r = reducer.applyAsLong(r, transformer.applyAsLong(p.val));
6016 dl 1.149 result = r;
6017     CountedCompleter<?> c;
6018     for (c = firstComplete(); c != null; c = c.nextComplete()) {
6019 dl 1.246 @SuppressWarnings("unchecked")
6020     MapReduceValuesToLongTask<K,V>
6021 dl 1.149 t = (MapReduceValuesToLongTask<K,V>)c,
6022     s = t.rights;
6023     while (s != null) {
6024 dl 1.153 t.result = reducer.applyAsLong(t.result, s.result);
6025 dl 1.149 s = t.rights = s.nextRight;
6026     }
6027 dl 1.119 }
6028     }
6029 jsr166 1.95 }
6030 dl 1.119 }
6031    
6032 dl 1.222 @SuppressWarnings("serial")
6033 dl 1.210 static final class MapReduceEntriesToLongTask<K,V>
6034     extends BulkTask<K,V,Long> {
6035 dl 1.171 final ToLongFunction<Map.Entry<K,V>> transformer;
6036 dl 1.153 final LongBinaryOperator reducer;
6037 dl 1.119 final long basis;
6038     long result;
6039 dl 1.128 MapReduceEntriesToLongTask<K,V> rights, nextRight;
6040 dl 1.119 MapReduceEntriesToLongTask
6041 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6042 dl 1.128 MapReduceEntriesToLongTask<K,V> nextRight,
6043 dl 1.171 ToLongFunction<Map.Entry<K,V>> transformer,
6044 dl 1.119 long basis,
6045 dl 1.153 LongBinaryOperator reducer) {
6046 dl 1.210 super(p, b, i, f, t); this.nextRight = nextRight;
6047 dl 1.119 this.transformer = transformer;
6048     this.basis = basis; this.reducer = reducer;
6049     }
6050 dl 1.146 public final Long getRawResult() { return result; }
6051 dl 1.210 public final void compute() {
6052 dl 1.171 final ToLongFunction<Map.Entry<K,V>> transformer;
6053 dl 1.153 final LongBinaryOperator reducer;
6054 dl 1.149 if ((transformer = this.transformer) != null &&
6055     (reducer = this.reducer) != null) {
6056     long r = this.basis;
6057 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
6058     (h = ((f = baseLimit) + i) >>> 1) > i;) {
6059     addToPendingCount(1);
6060 dl 1.149 (rights = new MapReduceEntriesToLongTask<K,V>
6061 dl 1.210 (this, batch >>>= 1, baseLimit = h, f, tab,
6062     rights, transformer, r, reducer)).fork();
6063     }
6064     for (Node<K,V> p; (p = advance()) != null; )
6065     r = reducer.applyAsLong(r, transformer.applyAsLong(p));
6066 dl 1.149 result = r;
6067     CountedCompleter<?> c;
6068     for (c = firstComplete(); c != null; c = c.nextComplete()) {
6069 dl 1.246 @SuppressWarnings("unchecked")
6070     MapReduceEntriesToLongTask<K,V>
6071 dl 1.149 t = (MapReduceEntriesToLongTask<K,V>)c,
6072     s = t.rights;
6073     while (s != null) {
6074 dl 1.153 t.result = reducer.applyAsLong(t.result, s.result);
6075 dl 1.149 s = t.rights = s.nextRight;
6076     }
6077 dl 1.119 }
6078 dl 1.138 }
6079 dl 1.4 }
6080 tim 1.1 }
6081    
6082 dl 1.222 @SuppressWarnings("serial")
6083 dl 1.210 static final class MapReduceMappingsToLongTask<K,V>
6084     extends BulkTask<K,V,Long> {
6085 dl 1.171 final ToLongBiFunction<? super K, ? super V> transformer;
6086 dl 1.153 final LongBinaryOperator reducer;
6087 dl 1.119 final long basis;
6088     long result;
6089 dl 1.128 MapReduceMappingsToLongTask<K,V> rights, nextRight;
6090 dl 1.119 MapReduceMappingsToLongTask
6091 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6092 dl 1.128 MapReduceMappingsToLongTask<K,V> nextRight,
6093 dl 1.171 ToLongBiFunction<? super K, ? super V> transformer,
6094 dl 1.119 long basis,
6095 dl 1.153 LongBinaryOperator reducer) {
6096 dl 1.210 super(p, b, i, f, t); this.nextRight = nextRight;
6097 dl 1.119 this.transformer = transformer;
6098     this.basis = basis; this.reducer = reducer;
6099     }
6100 dl 1.146 public final Long getRawResult() { return result; }
6101 dl 1.210 public final void compute() {
6102 dl 1.171 final ToLongBiFunction<? super K, ? super V> transformer;
6103 dl 1.153 final LongBinaryOperator reducer;
6104 dl 1.149 if ((transformer = this.transformer) != null &&
6105     (reducer = this.reducer) != null) {
6106     long r = this.basis;
6107 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
6108     (h = ((f = baseLimit) + i) >>> 1) > i;) {
6109     addToPendingCount(1);
6110 dl 1.149 (rights = new MapReduceMappingsToLongTask<K,V>
6111 dl 1.210 (this, batch >>>= 1, baseLimit = h, f, tab,
6112     rights, transformer, r, reducer)).fork();
6113     }
6114     for (Node<K,V> p; (p = advance()) != null; )
6115 dl 1.222 r = reducer.applyAsLong(r, transformer.applyAsLong(p.key, p.val));
6116 dl 1.149 result = r;
6117     CountedCompleter<?> c;
6118     for (c = firstComplete(); c != null; c = c.nextComplete()) {
6119 dl 1.246 @SuppressWarnings("unchecked")
6120     MapReduceMappingsToLongTask<K,V>
6121 dl 1.149 t = (MapReduceMappingsToLongTask<K,V>)c,
6122     s = t.rights;
6123     while (s != null) {
6124 dl 1.153 t.result = reducer.applyAsLong(t.result, s.result);
6125 dl 1.149 s = t.rights = s.nextRight;
6126     }
6127 dl 1.119 }
6128     }
6129 dl 1.4 }
6130 tim 1.1 }
6131    
6132 dl 1.222 @SuppressWarnings("serial")
6133 dl 1.210 static final class MapReduceKeysToIntTask<K,V>
6134     extends BulkTask<K,V,Integer> {
6135 dl 1.171 final ToIntFunction<? super K> transformer;
6136 dl 1.153 final IntBinaryOperator reducer;
6137 dl 1.119 final int basis;
6138     int result;
6139 dl 1.128 MapReduceKeysToIntTask<K,V> rights, nextRight;
6140 dl 1.119 MapReduceKeysToIntTask
6141 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6142 dl 1.128 MapReduceKeysToIntTask<K,V> nextRight,
6143 dl 1.171 ToIntFunction<? super K> transformer,
6144 dl 1.119 int basis,
6145 dl 1.153 IntBinaryOperator reducer) {
6146 dl 1.210 super(p, b, i, f, t); this.nextRight = nextRight;
6147 dl 1.119 this.transformer = transformer;
6148     this.basis = basis; this.reducer = reducer;
6149     }
6150 dl 1.146 public final Integer getRawResult() { return result; }
6151 dl 1.210 public final void compute() {
6152 dl 1.171 final ToIntFunction<? super K> transformer;
6153 dl 1.153 final IntBinaryOperator reducer;
6154 dl 1.149 if ((transformer = this.transformer) != null &&
6155     (reducer = this.reducer) != null) {
6156     int r = this.basis;
6157 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
6158     (h = ((f = baseLimit) + i) >>> 1) > i;) {
6159     addToPendingCount(1);
6160 dl 1.149 (rights = new MapReduceKeysToIntTask<K,V>
6161 dl 1.210 (this, batch >>>= 1, baseLimit = h, f, tab,
6162     rights, transformer, r, reducer)).fork();
6163     }
6164     for (Node<K,V> p; (p = advance()) != null; )
6165 dl 1.222 r = reducer.applyAsInt(r, transformer.applyAsInt(p.key));
6166 dl 1.149 result = r;
6167     CountedCompleter<?> c;
6168     for (c = firstComplete(); c != null; c = c.nextComplete()) {
6169 dl 1.246 @SuppressWarnings("unchecked")
6170     MapReduceKeysToIntTask<K,V>
6171 dl 1.149 t = (MapReduceKeysToIntTask<K,V>)c,
6172     s = t.rights;
6173     while (s != null) {
6174 dl 1.153 t.result = reducer.applyAsInt(t.result, s.result);
6175 dl 1.149 s = t.rights = s.nextRight;
6176     }
6177 dl 1.119 }
6178 dl 1.138 }
6179 dl 1.30 }
6180     }
6181    
6182 dl 1.222 @SuppressWarnings("serial")
6183 dl 1.210 static final class MapReduceValuesToIntTask<K,V>
6184     extends BulkTask<K,V,Integer> {
6185 dl 1.171 final ToIntFunction<? super V> transformer;
6186 dl 1.153 final IntBinaryOperator reducer;
6187 dl 1.119 final int basis;
6188     int result;
6189 dl 1.128 MapReduceValuesToIntTask<K,V> rights, nextRight;
6190 dl 1.119 MapReduceValuesToIntTask
6191 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6192 dl 1.128 MapReduceValuesToIntTask<K,V> nextRight,
6193 dl 1.171 ToIntFunction<? super V> transformer,
6194 dl 1.119 int basis,
6195 dl 1.153 IntBinaryOperator reducer) {
6196 dl 1.210 super(p, b, i, f, t); this.nextRight = nextRight;
6197 dl 1.119 this.transformer = transformer;
6198     this.basis = basis; this.reducer = reducer;
6199     }
6200 dl 1.146 public final Integer getRawResult() { return result; }
6201 dl 1.210 public final void compute() {
6202 dl 1.171 final ToIntFunction<? super V> transformer;
6203 dl 1.153 final IntBinaryOperator reducer;
6204 dl 1.149 if ((transformer = this.transformer) != null &&
6205     (reducer = this.reducer) != null) {
6206     int r = this.basis;
6207 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
6208     (h = ((f = baseLimit) + i) >>> 1) > i;) {
6209     addToPendingCount(1);
6210 dl 1.149 (rights = new MapReduceValuesToIntTask<K,V>
6211 dl 1.210 (this, batch >>>= 1, baseLimit = h, f, tab,
6212     rights, transformer, r, reducer)).fork();
6213     }
6214     for (Node<K,V> p; (p = advance()) != null; )
6215     r = reducer.applyAsInt(r, transformer.applyAsInt(p.val));
6216 dl 1.149 result = r;
6217     CountedCompleter<?> c;
6218     for (c = firstComplete(); c != null; c = c.nextComplete()) {
6219 dl 1.246 @SuppressWarnings("unchecked")
6220     MapReduceValuesToIntTask<K,V>
6221 dl 1.149 t = (MapReduceValuesToIntTask<K,V>)c,
6222     s = t.rights;
6223     while (s != null) {
6224 dl 1.153 t.result = reducer.applyAsInt(t.result, s.result);
6225 dl 1.149 s = t.rights = s.nextRight;
6226     }
6227 dl 1.119 }
6228 dl 1.2 }
6229 tim 1.1 }
6230     }
6231    
6232 dl 1.222 @SuppressWarnings("serial")
6233 dl 1.210 static final class MapReduceEntriesToIntTask<K,V>
6234     extends BulkTask<K,V,Integer> {
6235 dl 1.171 final ToIntFunction<Map.Entry<K,V>> transformer;
6236 dl 1.153 final IntBinaryOperator reducer;
6237 dl 1.119 final int basis;
6238     int result;
6239 dl 1.128 MapReduceEntriesToIntTask<K,V> rights, nextRight;
6240 dl 1.119 MapReduceEntriesToIntTask
6241 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6242 dl 1.128 MapReduceEntriesToIntTask<K,V> nextRight,
6243 dl 1.171 ToIntFunction<Map.Entry<K,V>> transformer,
6244 dl 1.119 int basis,
6245 dl 1.153 IntBinaryOperator reducer) {
6246 dl 1.210 super(p, b, i, f, t); this.nextRight = nextRight;
6247 dl 1.119 this.transformer = transformer;
6248     this.basis = basis; this.reducer = reducer;
6249     }
6250 dl 1.146 public final Integer getRawResult() { return result; }
6251 dl 1.210 public final void compute() {
6252 dl 1.171 final ToIntFunction<Map.Entry<K,V>> transformer;
6253 dl 1.153 final IntBinaryOperator reducer;
6254 dl 1.149 if ((transformer = this.transformer) != null &&
6255     (reducer = this.reducer) != null) {
6256     int r = this.basis;
6257 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
6258     (h = ((f = baseLimit) + i) >>> 1) > i;) {
6259     addToPendingCount(1);
6260 dl 1.149 (rights = new MapReduceEntriesToIntTask<K,V>
6261 dl 1.210 (this, batch >>>= 1, baseLimit = h, f, tab,
6262     rights, transformer, r, reducer)).fork();
6263     }
6264     for (Node<K,V> p; (p = advance()) != null; )
6265     r = reducer.applyAsInt(r, transformer.applyAsInt(p));
6266 dl 1.149 result = r;
6267     CountedCompleter<?> c;
6268     for (c = firstComplete(); c != null; c = c.nextComplete()) {
6269 dl 1.246 @SuppressWarnings("unchecked")
6270     MapReduceEntriesToIntTask<K,V>
6271 dl 1.149 t = (MapReduceEntriesToIntTask<K,V>)c,
6272     s = t.rights;
6273     while (s != null) {
6274 dl 1.153 t.result = reducer.applyAsInt(t.result, s.result);
6275 dl 1.149 s = t.rights = s.nextRight;
6276     }
6277 dl 1.119 }
6278 dl 1.138 }
6279 dl 1.4 }
6280 dl 1.119 }
6281 tim 1.1
6282 dl 1.222 @SuppressWarnings("serial")
6283 dl 1.210 static final class MapReduceMappingsToIntTask<K,V>
6284     extends BulkTask<K,V,Integer> {
6285 dl 1.171 final ToIntBiFunction<? super K, ? super V> transformer;
6286 dl 1.153 final IntBinaryOperator reducer;
6287 dl 1.119 final int basis;
6288     int result;
6289 dl 1.128 MapReduceMappingsToIntTask<K,V> rights, nextRight;
6290 dl 1.119 MapReduceMappingsToIntTask
6291 dl 1.210 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6292 dl 1.146 MapReduceMappingsToIntTask<K,V> nextRight,
6293 dl 1.171 ToIntBiFunction<? super K, ? super V> transformer,
6294 dl 1.119 int basis,
6295 dl 1.153 IntBinaryOperator reducer) {
6296 dl 1.210 super(p, b, i, f, t); this.nextRight = nextRight;
6297 dl 1.119 this.transformer = transformer;
6298     this.basis = basis; this.reducer = reducer;
6299     }
6300 dl 1.146 public final Integer getRawResult() { return result; }
6301 dl 1.210 public final void compute() {
6302 dl 1.171 final ToIntBiFunction<? super K, ? super V> transformer;
6303 dl 1.153 final IntBinaryOperator reducer;
6304 dl 1.149 if ((transformer = this.transformer) != null &&
6305     (reducer = this.reducer) != null) {
6306     int r = this.basis;
6307 dl 1.210 for (int i = baseIndex, f, h; batch > 0 &&
6308     (h = ((f = baseLimit) + i) >>> 1) > i;) {
6309     addToPendingCount(1);
6310 dl 1.149 (rights = new MapReduceMappingsToIntTask<K,V>
6311 dl 1.210 (this, batch >>>= 1, baseLimit = h, f, tab,
6312     rights, transformer, r, reducer)).fork();
6313     }
6314     for (Node<K,V> p; (p = advance()) != null; )
6315 dl 1.222 r = reducer.applyAsInt(r, transformer.applyAsInt(p.key, p.val));
6316 dl 1.149 result = r;
6317     CountedCompleter<?> c;
6318     for (c = firstComplete(); c != null; c = c.nextComplete()) {
6319 dl 1.246 @SuppressWarnings("unchecked")
6320     MapReduceMappingsToIntTask<K,V>
6321 dl 1.149 t = (MapReduceMappingsToIntTask<K,V>)c,
6322     s = t.rights;
6323     while (s != null) {
6324 dl 1.153 t.result = reducer.applyAsInt(t.result, s.result);
6325 dl 1.149 s = t.rights = s.nextRight;
6326     }
6327 dl 1.119 }
6328 dl 1.138 }
6329 tim 1.1 }
6330     }
6331 dl 1.99
6332     // Unsafe mechanics
6333 jsr166 1.293 private static final Unsafe U = Unsafe.getUnsafe();
6334 dl 1.149 private static final long SIZECTL;
6335     private static final long TRANSFERINDEX;
6336     private static final long BASECOUNT;
6337 dl 1.153 private static final long CELLSBUSY;
6338 dl 1.149 private static final long CELLVALUE;
6339 jsr166 1.264 private static final int ABASE;
6340 dl 1.119 private static final int ASHIFT;
6341 dl 1.99
6342     static {
6343     try {
6344 dl 1.149 SIZECTL = U.objectFieldOffset
6345 jsr166 1.264 (ConcurrentHashMap.class.getDeclaredField("sizeCtl"));
6346 dl 1.149 TRANSFERINDEX = U.objectFieldOffset
6347 jsr166 1.264 (ConcurrentHashMap.class.getDeclaredField("transferIndex"));
6348 dl 1.149 BASECOUNT = U.objectFieldOffset
6349 jsr166 1.264 (ConcurrentHashMap.class.getDeclaredField("baseCount"));
6350 dl 1.153 CELLSBUSY = U.objectFieldOffset
6351 jsr166 1.264 (ConcurrentHashMap.class.getDeclaredField("cellsBusy"));
6352    
6353 dl 1.149 CELLVALUE = U.objectFieldOffset
6354 jsr166 1.264 (CounterCell.class.getDeclaredField("value"));
6355    
6356     ABASE = U.arrayBaseOffset(Node[].class);
6357     int scale = U.arrayIndexScale(Node[].class);
6358 jsr166 1.167 if ((scale & (scale - 1)) != 0)
6359 jsr166 1.263 throw new Error("array index scale not a power of two");
6360 jsr166 1.167 ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
6361 jsr166 1.262 } catch (ReflectiveOperationException e) {
6362 dl 1.99 throw new Error(e);
6363     }
6364 jsr166 1.269
6365     // Reduce the risk of rare disastrous classloading in first call to
6366     // LockSupport.park: https://bugs.openjdk.java.net/browse/JDK-8074773
6367     Class<?> ensureLoaded = LockSupport.class;
6368 dl 1.99 }
6369 jsr166 1.152 }