ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/main/java/util/concurrent/ConcurrentHashMap.java
Revision: 1.137
Committed: Sun Oct 28 22:35:55 2012 UTC (11 years, 7 months ago) by dl
Branch: MAIN
Changes since 1.136: +889 -686 lines
Log Message:
Introduce ForkJoinPool.commonPool

File Contents

# User Rev Content
1 dl 1.2 /*
2     * Written by Doug Lea with assistance from members of JCP JSR-166
3 dl 1.36 * Expert Group and released to the public domain, as explained at
4 dl 1.100 * http://creativecommons.org/publicdomain/zero/1.0/
5 dl 1.2 */
6    
7 tim 1.1 package java.util.concurrent;
8 dl 1.119 import java.util.concurrent.atomic.LongAdder;
9     import java.util.concurrent.ForkJoinPool;
10     import java.util.concurrent.ForkJoinTask;
11    
12     import java.util.Comparator;
13     import java.util.Arrays;
14     import java.util.Map;
15     import java.util.Set;
16     import java.util.Collection;
17     import java.util.AbstractMap;
18     import java.util.AbstractSet;
19     import java.util.AbstractCollection;
20     import java.util.Hashtable;
21     import java.util.HashMap;
22     import java.util.Iterator;
23     import java.util.Enumeration;
24     import java.util.ConcurrentModificationException;
25     import java.util.NoSuchElementException;
26     import java.util.concurrent.ConcurrentMap;
27     import java.util.concurrent.ThreadLocalRandom;
28     import java.util.concurrent.locks.LockSupport;
29     import java.util.concurrent.locks.AbstractQueuedSynchronizer;
30     import java.util.concurrent.atomic.AtomicReference;
31    
32 tim 1.1 import java.io.Serializable;
33    
34     /**
35 dl 1.4 * A hash table supporting full concurrency of retrievals and
36 dl 1.119 * high expected concurrency for updates. This class obeys the
37 dl 1.22 * same functional specification as {@link java.util.Hashtable}, and
38 dl 1.19 * includes versions of methods corresponding to each method of
39 dl 1.119 * {@code Hashtable}. However, even though all operations are
40 dl 1.19 * thread-safe, retrieval operations do <em>not</em> entail locking,
41     * and there is <em>not</em> any support for locking the entire table
42     * in a way that prevents all access. This class is fully
43 dl 1.119 * interoperable with {@code Hashtable} in programs that rely on its
44 dl 1.4 * thread safety but not on its synchronization details.
45 tim 1.11 *
46 dl 1.119 * <p> Retrieval operations (including {@code get}) generally do not
47     * block, so may overlap with update operations (including {@code put}
48     * and {@code remove}). Retrievals reflect the results of the most
49     * recently <em>completed</em> update operations holding upon their
50 dl 1.126 * onset. (More formally, an update operation for a given key bears a
51     * <em>happens-before</em> relation with any (non-null) retrieval for
52     * that key reporting the updated value.) For aggregate operations
53     * such as {@code putAll} and {@code clear}, concurrent retrievals may
54     * reflect insertion or removal of only some entries. Similarly,
55     * Iterators and Enumerations return elements reflecting the state of
56     * the hash table at some point at or since the creation of the
57     * iterator/enumeration. They do <em>not</em> throw {@link
58     * ConcurrentModificationException}. However, iterators are designed
59     * to be used by only one thread at a time. Bear in mind that the
60     * results of aggregate status methods including {@code size}, {@code
61     * isEmpty}, and {@code containsValue} are typically useful only when
62     * a map is not undergoing concurrent updates in other threads.
63     * Otherwise the results of these methods reflect transient states
64     * that may be adequate for monitoring or estimation purposes, but not
65     * for program control.
66 tim 1.1 *
67 dl 1.119 * <p> The table is dynamically expanded when there are too many
68     * collisions (i.e., keys that have distinct hash codes but fall into
69     * the same slot modulo the table size), with the expected average
70     * effect of maintaining roughly two bins per mapping (corresponding
71     * to a 0.75 load factor threshold for resizing). There may be much
72     * variance around this average as mappings are added and removed, but
73     * overall, this maintains a commonly accepted time/space tradeoff for
74     * hash tables. However, resizing this or any other kind of hash
75     * table may be a relatively slow operation. When possible, it is a
76     * good idea to provide a size estimate as an optional {@code
77     * initialCapacity} constructor argument. An additional optional
78     * {@code loadFactor} constructor argument provides a further means of
79     * customizing initial table capacity by specifying the table density
80     * to be used in calculating the amount of space to allocate for the
81     * given number of elements. Also, for compatibility with previous
82     * versions of this class, constructors may optionally specify an
83     * expected {@code concurrencyLevel} as an additional hint for
84     * internal sizing. Note that using many keys with exactly the same
85     * {@code hashCode()} is a sure way to slow down performance of any
86     * hash table.
87 tim 1.1 *
88 dl 1.137 * <p> A {@link Set} projection of a ConcurrentHashMap may be created
89     * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
90     * (using {@link #keySet(Object)} when only keys are of interest, and the
91     * mapped values are (perhaps transiently) not used or all take the
92     * same mapping value.
93     *
94     * <p> A ConcurrentHashMap can be used as scalable frequency map (a
95     * form of histogram or multiset) by using {@link LongAdder} values
96     * and initializing via {@link #computeIfAbsent}. For example, to add
97     * a count to a {@code ConcurrentHashMap<String,LongAdder> freqs}, you
98     * can use {@code freqs.computeIfAbsent(k -> new
99     * LongAdder()).increment();}
100     *
101 dl 1.45 * <p>This class and its views and iterators implement all of the
102     * <em>optional</em> methods of the {@link Map} and {@link Iterator}
103     * interfaces.
104 dl 1.23 *
105 jsr166 1.68 * <p> Like {@link Hashtable} but unlike {@link HashMap}, this class
106 dl 1.119 * does <em>not</em> allow {@code null} to be used as a key or value.
107 tim 1.1 *
108 dl 1.137 * <p>ConcurrentHashMaps support parallel operations using the {@link
109     * ForkJoinPool#commonPool}. (Task that may be used in other contexts
110     * are available in class {@link ForkJoinTasks}). These operations are
111     * designed to be safely, and often sensibly, applied even with maps
112     * that are being concurrently updated by other threads; for example,
113     * when computing a snapshot summary of the values in a shared
114     * registry. There are three kinds of operation, each with four
115     * forms, accepting functions with Keys, Values, Entries, and (Key,
116     * Value) arguments and/or return values. Because the elements of a
117     * ConcurrentHashMap are not ordered in any particular way, and may be
118     * processed in different orders in different parallel executions, the
119     * correctness of supplied functions should not depend on any
120     * ordering, or on any other objects or values that may transiently
121     * change while computation is in progress; and except for forEach
122     * actions, should ideally be side-effect-free.
123     *
124     * <ul>
125     * <li> forEach: Perform a given action on each element.
126     * A variant form applies a given transformation on each element
127     * before performing the action.</li>
128     *
129     * <li> search: Return the first available non-null result of
130     * applying a given function on each element; skipping further
131     * search when a result is found.</li>
132     *
133     * <li> reduce: Accumulate each element. The supplied reduction
134     * function cannot rely on ordering (more formally, it should be
135     * both associative and commutative). There are five variants:
136     *
137     * <ul>
138     *
139     * <li> Plain reductions. (There is not a form of this method for
140     * (key, value) function arguments since there is no corresponding
141     * return type.)</li>
142     *
143     * <li> Mapped reductions that accumulate the results of a given
144     * function applied to each element.</li>
145     *
146     * <li> Reductions to scalar doubles, longs, and ints, using a
147     * given basis value.</li>
148     *
149     * </li>
150     * </ul>
151     * </ul>
152     *
153     * <p>The concurrency properties of bulk operations follow
154     * from those of ConcurrentHashMap: Any non-null result returned
155     * from {@code get(key)} and related access methods bears a
156     * happens-before relation with the associated insertion or
157     * update. The result of any bulk operation reflects the
158     * composition of these per-element relations (but is not
159     * necessarily atomic with respect to the map as a whole unless it
160     * is somehow known to be quiescent). Conversely, because keys
161     * and values in the map are never null, null serves as a reliable
162     * atomic indicator of the current lack of any result. To
163     * maintain this property, null serves as an implicit basis for
164     * all non-scalar reduction operations. For the double, long, and
165     * int versions, the basis should be one that, when combined with
166     * any other value, returns that other value (more formally, it
167     * should be the identity element for the reduction). Most common
168     * reductions have these properties; for example, computing a sum
169     * with basis 0 or a minimum with basis MAX_VALUE.
170     *
171     * <p>Search and transformation functions provided as arguments
172     * should similarly return null to indicate the lack of any result
173     * (in which case it is not used). In the case of mapped
174     * reductions, this also enables transformations to serve as
175     * filters, returning null (or, in the case of primitive
176     * specializations, the identity basis) if the element should not
177     * be combined. You can create compound transformations and
178     * filterings by composing them yourself under this "null means
179     * there is nothing there now" rule before using them in search or
180     * reduce operations.
181     *
182     * <p>Methods accepting and/or returning Entry arguments maintain
183     * key-value associations. They may be useful for example when
184     * finding the key for the greatest value. Note that "plain" Entry
185     * arguments can be supplied using {@code new
186     * AbstractMap.SimpleEntry(k,v)}.
187     *
188     * <p> Bulk operations may complete abruptly, throwing an
189     * exception encountered in the application of a supplied
190     * function. Bear in mind when handling such exceptions that other
191     * concurrently executing functions could also have thrown
192     * exceptions, or would have done so if the first exception had
193     * not occurred.
194     *
195     * <p>Parallel speedups for bulk operations compared to sequential
196     * processing are common but not guaranteed. Operations involving
197     * brief functions on small maps may execute more slowly than
198     * sequential loops if the underlying work to parallelize the
199     * computation is more expensive than the computation
200     * itself. Similarly, parallelization may not lead to much actual
201     * parallelism if all processors are busy performing unrelated tasks.
202     *
203     * <p> All arguments to all task methods must be non-null.
204     *
205     * <p><em>jsr166e note: During transition, this class
206     * uses nested functional interfaces with different names but the
207     * same forms as those expected for JDK8.<em>
208     *
209 dl 1.42 * <p>This class is a member of the
210 jsr166 1.88 * <a href="{@docRoot}/../technotes/guides/collections/index.html">
211 dl 1.42 * Java Collections Framework</a>.
212     *
213 dl 1.8 * @since 1.5
214     * @author Doug Lea
215 dl 1.27 * @param <K> the type of keys maintained by this map
216 jsr166 1.64 * @param <V> the type of mapped values
217 dl 1.8 */
218 dl 1.119 public class ConcurrentHashMap<K, V>
219     implements ConcurrentMap<K, V>, Serializable {
220 dl 1.20 private static final long serialVersionUID = 7249069246763182397L;
221 tim 1.1
222 dl 1.119 /**
223     * A partitionable iterator. A Spliterator can be traversed
224     * directly, but can also be partitioned (before traversal) by
225     * creating another Spliterator that covers a non-overlapping
226     * portion of the elements, and so may be amenable to parallel
227     * execution.
228     *
229     * <p> This interface exports a subset of expected JDK8
230     * functionality.
231     *
232     * <p>Sample usage: Here is one (of the several) ways to compute
233     * the sum of the values held in a map using the ForkJoin
234     * framework. As illustrated here, Spliterators are well suited to
235     * designs in which a task repeatedly splits off half its work
236     * into forked subtasks until small enough to process directly,
237     * and then joins these subtasks. Variants of this style can also
238     * be used in completion-based designs.
239     *
240     * <pre>
241     * {@code ConcurrentHashMap<String, Long> m = ...
242     * // split as if have 8 * parallelism, for load balance
243     * int n = m.size();
244     * int p = aForkJoinPool.getParallelism() * 8;
245     * int split = (n < p)? n : p;
246     * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), split, null));
247     * // ...
248     * static class SumValues extends RecursiveTask<Long> {
249     * final Spliterator<Long> s;
250     * final int split; // split while > 1
251     * final SumValues nextJoin; // records forked subtasks to join
252     * SumValues(Spliterator<Long> s, int depth, SumValues nextJoin) {
253     * this.s = s; this.depth = depth; this.nextJoin = nextJoin;
254     * }
255     * public Long compute() {
256     * long sum = 0;
257     * SumValues subtasks = null; // fork subtasks
258     * for (int s = split >>> 1; s > 0; s >>>= 1)
259     * (subtasks = new SumValues(s.split(), s, subtasks)).fork();
260     * while (s.hasNext()) // directly process remaining elements
261     * sum += s.next();
262     * for (SumValues t = subtasks; t != null; t = t.nextJoin)
263     * sum += t.join(); // collect subtask results
264     * return sum;
265     * }
266     * }
267     * }</pre>
268     */
269     public static interface Spliterator<T> extends Iterator<T> {
270     /**
271     * Returns a Spliterator covering approximately half of the
272     * elements, guaranteed not to overlap with those subsequently
273     * returned by this Spliterator. After invoking this method,
274     * the current Spliterator will <em>not</em> produce any of
275     * the elements of the returned Spliterator, but the two
276     * Spliterators together will produce all of the elements that
277     * would have been produced by this Spliterator had this
278     * method not been called. The exact number of elements
279     * produced by the returned Spliterator is not guaranteed, and
280     * may be zero (i.e., with {@code hasNext()} reporting {@code
281     * false}) if this Spliterator cannot be further split.
282     *
283     * @return a Spliterator covering approximately half of the
284     * elements
285     * @throws IllegalStateException if this Spliterator has
286     * already commenced traversing elements
287     */
288     Spliterator<T> split();
289     }
290    
291 dl 1.137 /**
292     * A view of a ConcurrentHashMap as a {@link Set} of keys, in
293     * which additions may optionally be enabled by mapping to a
294     * common value. This class cannot be directly instantiated. See
295     * {@link #keySet}, {@link #keySet(Object)}, {@link #newKeySet()},
296     * {@link #newKeySet(int)}.
297     *
298     * <p>The view's {@code iterator} is a "weakly consistent" iterator
299     * that will never throw {@link ConcurrentModificationException},
300     * and guarantees to traverse elements as they existed upon
301     * construction of the iterator, and may (but is not guaranteed to)
302     * reflect any modifications subsequent to construction.
303     */
304     public static class KeySetView<K,V> extends CHMView<K,V> implements Set<K>, java.io.Serializable {
305     private static final long serialVersionUID = 7249069246763182397L;
306     private final V value;
307     KeySetView(ConcurrentHashMap<K, V> map, V value) { // non-public
308     super(map);
309     this.value = value;
310     }
311    
312     /**
313     * Returns the map backing this view.
314     *
315     * @return the map backing this view
316     */
317     public ConcurrentHashMap<K,V> getMap() { return map; }
318    
319     /**
320     * Returns the default mapped value for additions,
321     * or {@code null} if additions are not supported.
322     *
323     * @return the default mapped value for additions, or {@code null}
324     * if not supported.
325     */
326     public V getMappedValue() { return value; }
327    
328     // implement Set API
329    
330     public boolean contains(Object o) { return map.containsKey(o); }
331     public boolean remove(Object o) { return map.remove(o) != null; }
332     public Iterator<K> iterator() { return new KeyIterator<K,V>(map); }
333     public boolean add(K e) {
334     V v;
335     if ((v = value) == null)
336     throw new UnsupportedOperationException();
337     if (e == null)
338     throw new NullPointerException();
339     return map.internalPutIfAbsent(e, v) == null;
340     }
341     public boolean addAll(Collection<? extends K> c) {
342     boolean added = false;
343     V v;
344     if ((v = value) == null)
345     throw new UnsupportedOperationException();
346     for (K e : c) {
347     if (e == null)
348     throw new NullPointerException();
349     if (map.internalPutIfAbsent(e, v) == null)
350     added = true;
351     }
352     return added;
353     }
354     public boolean equals(Object o) {
355     Set<?> c;
356     return ((o instanceof Set) &&
357     ((c = (Set<?>)o) == this ||
358     (containsAll(c) && c.containsAll(this))));
359     }
360     }
361    
362 tim 1.1 /*
363 dl 1.119 * Overview:
364     *
365     * The primary design goal of this hash table is to maintain
366     * concurrent readability (typically method get(), but also
367     * iterators and related methods) while minimizing update
368     * contention. Secondary goals are to keep space consumption about
369     * the same or better than java.util.HashMap, and to support high
370     * initial insertion rates on an empty table by many threads.
371     *
372     * Each key-value mapping is held in a Node. Because Node fields
373     * can contain special values, they are defined using plain Object
374     * types. Similarly in turn, all internal methods that use them
375     * work off Object types. And similarly, so do the internal
376     * methods of auxiliary iterator and view classes. All public
377     * generic typed methods relay in/out of these internal methods,
378     * supplying null-checks and casts as needed. This also allows
379     * many of the public methods to be factored into a smaller number
380     * of internal methods (although sadly not so for the five
381     * variants of put-related operations). The validation-based
382     * approach explained below leads to a lot of code sprawl because
383     * retry-control precludes factoring into smaller methods.
384     *
385     * The table is lazily initialized to a power-of-two size upon the
386     * first insertion. Each bin in the table normally contains a
387     * list of Nodes (most often, the list has only zero or one Node).
388     * Table accesses require volatile/atomic reads, writes, and
389     * CASes. Because there is no other way to arrange this without
390     * adding further indirections, we use intrinsics
391     * (sun.misc.Unsafe) operations. The lists of nodes within bins
392     * are always accurately traversable under volatile reads, so long
393     * as lookups check hash code and non-nullness of value before
394     * checking key equality.
395     *
396     * We use the top two bits of Node hash fields for control
397     * purposes -- they are available anyway because of addressing
398     * constraints. As explained further below, these top bits are
399     * used as follows:
400     * 00 - Normal
401     * 01 - Locked
402     * 11 - Locked and may have a thread waiting for lock
403     * 10 - Node is a forwarding node
404     *
405     * The lower 30 bits of each Node's hash field contain a
406     * transformation of the key's hash code, except for forwarding
407     * nodes, for which the lower bits are zero (and so always have
408     * hash field == MOVED).
409     *
410     * Insertion (via put or its variants) of the first node in an
411     * empty bin is performed by just CASing it to the bin. This is
412     * by far the most common case for put operations under most
413     * key/hash distributions. Other update operations (insert,
414     * delete, and replace) require locks. We do not want to waste
415     * the space required to associate a distinct lock object with
416     * each bin, so instead use the first node of a bin list itself as
417     * a lock. Blocking support for these locks relies on the builtin
418     * "synchronized" monitors. However, we also need a tryLock
419     * construction, so we overlay these by using bits of the Node
420     * hash field for lock control (see above), and so normally use
421     * builtin monitors only for blocking and signalling using
422     * wait/notifyAll constructions. See Node.tryAwaitLock.
423     *
424     * Using the first node of a list as a lock does not by itself
425     * suffice though: When a node is locked, any update must first
426     * validate that it is still the first node after locking it, and
427     * retry if not. Because new nodes are always appended to lists,
428     * once a node is first in a bin, it remains first until deleted
429     * or the bin becomes invalidated (upon resizing). However,
430     * operations that only conditionally update may inspect nodes
431     * until the point of update. This is a converse of sorts to the
432     * lazy locking technique described by Herlihy & Shavit.
433     *
434     * The main disadvantage of per-bin locks is that other update
435     * operations on other nodes in a bin list protected by the same
436     * lock can stall, for example when user equals() or mapping
437     * functions take a long time. However, statistically, under
438     * random hash codes, this is not a common problem. Ideally, the
439     * frequency of nodes in bins follows a Poisson distribution
440     * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
441     * parameter of about 0.5 on average, given the resizing threshold
442     * of 0.75, although with a large variance because of resizing
443     * granularity. Ignoring variance, the expected occurrences of
444     * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
445     * first values are:
446     *
447     * 0: 0.60653066
448     * 1: 0.30326533
449     * 2: 0.07581633
450     * 3: 0.01263606
451     * 4: 0.00157952
452     * 5: 0.00015795
453     * 6: 0.00001316
454     * 7: 0.00000094
455     * 8: 0.00000006
456     * more: less than 1 in ten million
457     *
458     * Lock contention probability for two threads accessing distinct
459     * elements is roughly 1 / (8 * #elements) under random hashes.
460     *
461     * Actual hash code distributions encountered in practice
462     * sometimes deviate significantly from uniform randomness. This
463     * includes the case when N > (1<<30), so some keys MUST collide.
464     * Similarly for dumb or hostile usages in which multiple keys are
465     * designed to have identical hash codes. Also, although we guard
466     * against the worst effects of this (see method spread), sets of
467     * hashes may differ only in bits that do not impact their bin
468     * index for a given power-of-two mask. So we use a secondary
469     * strategy that applies when the number of nodes in a bin exceeds
470     * a threshold, and at least one of the keys implements
471     * Comparable. These TreeBins use a balanced tree to hold nodes
472     * (a specialized form of red-black trees), bounding search time
473     * to O(log N). Each search step in a TreeBin is around twice as
474     * slow as in a regular list, but given that N cannot exceed
475     * (1<<64) (before running out of addresses) this bounds search
476     * steps, lock hold times, etc, to reasonable constants (roughly
477     * 100 nodes inspected per operation worst case) so long as keys
478     * are Comparable (which is very common -- String, Long, etc).
479     * TreeBin nodes (TreeNodes) also maintain the same "next"
480     * traversal pointers as regular nodes, so can be traversed in
481     * iterators in the same way.
482     *
483     * The table is resized when occupancy exceeds a percentage
484     * threshold (nominally, 0.75, but see below). Only a single
485     * thread performs the resize (using field "sizeCtl", to arrange
486     * exclusion), but the table otherwise remains usable for reads
487     * and updates. Resizing proceeds by transferring bins, one by
488     * one, from the table to the next table. Because we are using
489     * power-of-two expansion, the elements from each bin must either
490     * stay at same index, or move with a power of two offset. We
491     * eliminate unnecessary node creation by catching cases where old
492     * nodes can be reused because their next fields won't change. On
493     * average, only about one-sixth of them need cloning when a table
494     * doubles. The nodes they replace will be garbage collectable as
495     * soon as they are no longer referenced by any reader thread that
496     * may be in the midst of concurrently traversing table. Upon
497     * transfer, the old table bin contains only a special forwarding
498     * node (with hash field "MOVED") that contains the next table as
499     * its key. On encountering a forwarding node, access and update
500     * operations restart, using the new table.
501     *
502     * Each bin transfer requires its bin lock. However, unlike other
503     * cases, a transfer can skip a bin if it fails to acquire its
504     * lock, and revisit it later (unless it is a TreeBin). Method
505     * rebuild maintains a buffer of TRANSFER_BUFFER_SIZE bins that
506     * have been skipped because of failure to acquire a lock, and
507     * blocks only if none are available (i.e., only very rarely).
508     * The transfer operation must also ensure that all accessible
509     * bins in both the old and new table are usable by any traversal.
510     * When there are no lock acquisition failures, this is arranged
511     * simply by proceeding from the last bin (table.length - 1) up
512     * towards the first. Upon seeing a forwarding node, traversals
513     * (see class Iter) arrange to move to the new table
514     * without revisiting nodes. However, when any node is skipped
515     * during a transfer, all earlier table bins may have become
516     * visible, so are initialized with a reverse-forwarding node back
517     * to the old table until the new ones are established. (This
518     * sometimes requires transiently locking a forwarding node, which
519     * is possible under the above encoding.) These more expensive
520     * mechanics trigger only when necessary.
521     *
522     * The traversal scheme also applies to partial traversals of
523     * ranges of bins (via an alternate Traverser constructor)
524     * to support partitioned aggregate operations. Also, read-only
525     * operations give up if ever forwarded to a null table, which
526     * provides support for shutdown-style clearing, which is also not
527     * currently implemented.
528     *
529     * Lazy table initialization minimizes footprint until first use,
530     * and also avoids resizings when the first operation is from a
531     * putAll, constructor with map argument, or deserialization.
532     * These cases attempt to override the initial capacity settings,
533     * but harmlessly fail to take effect in cases of races.
534     *
535     * The element count is maintained using a LongAdder, which avoids
536     * contention on updates but can encounter cache thrashing if read
537     * too frequently during concurrent access. To avoid reading so
538     * often, resizing is attempted either when a bin lock is
539     * contended, or upon adding to a bin already holding two or more
540     * nodes (checked before adding in the xIfAbsent methods, after
541     * adding in others). Under uniform hash distributions, the
542     * probability of this occurring at threshold is around 13%,
543     * meaning that only about 1 in 8 puts check threshold (and after
544     * resizing, many fewer do so). But this approximation has high
545     * variance for small table sizes, so we check on any collision
546     * for sizes <= 64. The bulk putAll operation further reduces
547     * contention by only committing count updates upon these size
548     * checks.
549     *
550     * Maintaining API and serialization compatibility with previous
551     * versions of this class introduces several oddities. Mainly: We
552     * leave untouched but unused constructor arguments refering to
553     * concurrencyLevel. We accept a loadFactor constructor argument,
554     * but apply it only to initial table capacity (which is the only
555     * time that we can guarantee to honor it.) We also declare an
556     * unused "Segment" class that is instantiated in minimal form
557     * only when serializing.
558 dl 1.4 */
559 tim 1.1
560 dl 1.4 /* ---------------- Constants -------------- */
561 tim 1.11
562 dl 1.4 /**
563 dl 1.119 * The largest possible table capacity. This value must be
564     * exactly 1<<30 to stay within Java array allocation and indexing
565     * bounds for power of two table sizes, and is further required
566     * because the top two bits of 32bit hash fields are used for
567     * control purposes.
568 dl 1.4 */
569 dl 1.119 private static final int MAXIMUM_CAPACITY = 1 << 30;
570 dl 1.56
571     /**
572 dl 1.119 * The default initial table capacity. Must be a power of 2
573     * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
574 dl 1.56 */
575 dl 1.119 private static final int DEFAULT_CAPACITY = 16;
576 dl 1.56
577     /**
578 dl 1.119 * The largest possible (non-power of two) array size.
579     * Needed by toArray and related methods.
580 jsr166 1.59 */
581 dl 1.119 static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
582 tim 1.1
583     /**
584 dl 1.119 * The default concurrency level for this table. Unused but
585     * defined for compatibility with previous versions of this class.
586 dl 1.4 */
587 dl 1.119 private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
588 tim 1.11
589 tim 1.1 /**
590 dl 1.119 * The load factor for this table. Overrides of this value in
591     * constructors affect only the initial table capacity. The
592     * actual floating point value isn't normally used -- it is
593     * simpler to use expressions such as {@code n - (n >>> 2)} for
594     * the associated resizing threshold.
595 dl 1.99 */
596 dl 1.119 private static final float LOAD_FACTOR = 0.75f;
597 dl 1.99
598     /**
599 dl 1.119 * The buffer size for skipped bins during transfers. The
600     * value is arbitrary but should be large enough to avoid
601     * most locking stalls during resizes.
602 dl 1.21 */
603 dl 1.119 private static final int TRANSFER_BUFFER_SIZE = 32;
604 dl 1.21
605 dl 1.46 /**
606 dl 1.119 * The bin count threshold for using a tree rather than list for a
607     * bin. The value reflects the approximate break-even point for
608     * using tree-based operations.
609     */
610     private static final int TREE_THRESHOLD = 8;
611    
612     /*
613     * Encodings for special uses of Node hash fields. See above for
614     * explanation.
615 dl 1.46 */
616 dl 1.119 static final int MOVED = 0x80000000; // hash field for forwarding nodes
617     static final int LOCKED = 0x40000000; // set/tested only as a bit
618     static final int WAITING = 0xc0000000; // both bits set/tested together
619     static final int HASH_BITS = 0x3fffffff; // usable bits of normal node hash
620 dl 1.46
621 dl 1.4 /* ---------------- Fields -------------- */
622 tim 1.1
623     /**
624 dl 1.119 * The array of bins. Lazily initialized upon first insertion.
625     * Size is always a power of two. Accessed directly by iterators.
626 jsr166 1.59 */
627 dl 1.119 transient volatile Node[] table;
628 tim 1.1
629     /**
630 dl 1.119 * The counter maintaining number of elements.
631 jsr166 1.59 */
632 dl 1.119 private transient final LongAdder counter;
633 tim 1.1
634     /**
635 dl 1.119 * Table initialization and resizing control. When negative, the
636     * table is being initialized or resized. Otherwise, when table is
637     * null, holds the initial table size to use upon creation, or 0
638     * for default. After initialization, holds the next element count
639     * value upon which to resize the table.
640 tim 1.1 */
641 dl 1.119 private transient volatile int sizeCtl;
642 dl 1.4
643 dl 1.119 // views
644 dl 1.137 private transient KeySetView<K,V> keySet;
645 dl 1.119 private transient Values<K,V> values;
646     private transient EntrySet<K,V> entrySet;
647    
648     /** For serialization compatibility. Null unless serialized; see below */
649     private Segment<K,V>[] segments;
650    
651     /* ---------------- Table element access -------------- */
652    
653     /*
654     * Volatile access methods are used for table elements as well as
655     * elements of in-progress next table while resizing. Uses are
656     * null checked by callers, and implicitly bounds-checked, relying
657     * on the invariants that tab arrays have non-zero size, and all
658     * indices are masked with (tab.length - 1) which is never
659     * negative and always less than length. Note that, to be correct
660     * wrt arbitrary concurrency errors by users, bounds checks must
661     * operate on local variables, which accounts for some odd-looking
662     * inline assignments below.
663     */
664    
665     static final Node tabAt(Node[] tab, int i) { // used by Iter
666     return (Node)UNSAFE.getObjectVolatile(tab, ((long)i<<ASHIFT)+ABASE);
667     }
668    
669     private static final boolean casTabAt(Node[] tab, int i, Node c, Node v) {
670     return UNSAFE.compareAndSwapObject(tab, ((long)i<<ASHIFT)+ABASE, c, v);
671     }
672    
673     private static final void setTabAt(Node[] tab, int i, Node v) {
674     UNSAFE.putObjectVolatile(tab, ((long)i<<ASHIFT)+ABASE, v);
675     }
676    
677     /* ---------------- Nodes -------------- */
678 dl 1.4
679 dl 1.99 /**
680 dl 1.119 * Key-value entry. Note that this is never exported out as a
681     * user-visible Map.Entry (see MapEntry below). Nodes with a hash
682     * field of MOVED are special, and do not contain user keys or
683     * values. Otherwise, keys are never null, and null val fields
684     * indicate that a node is in the process of being deleted or
685     * created. For purposes of read-only access, a key may be read
686     * before a val, but can only be used after checking val to be
687     * non-null.
688 dl 1.99 */
689 dl 1.119 static class Node {
690     volatile int hash;
691     final Object key;
692     volatile Object val;
693     volatile Node next;
694 dl 1.99
695 dl 1.119 Node(int hash, Object key, Object val, Node next) {
696 dl 1.99 this.hash = hash;
697     this.key = key;
698 dl 1.119 this.val = val;
699 dl 1.99 this.next = next;
700     }
701    
702 dl 1.119 /** CompareAndSet the hash field */
703     final boolean casHash(int cmp, int val) {
704     return UNSAFE.compareAndSwapInt(this, hashOffset, cmp, val);
705     }
706    
707     /** The number of spins before blocking for a lock */
708     static final int MAX_SPINS =
709     Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1;
710    
711 dl 1.99 /**
712 dl 1.119 * Spins a while if LOCKED bit set and this node is the first
713     * of its bin, and then sets WAITING bits on hash field and
714     * blocks (once) if they are still set. It is OK for this
715     * method to return even if lock is not available upon exit,
716     * which enables these simple single-wait mechanics.
717     *
718     * The corresponding signalling operation is performed within
719     * callers: Upon detecting that WAITING has been set when
720     * unlocking lock (via a failed CAS from non-waiting LOCKED
721     * state), unlockers acquire the sync lock and perform a
722     * notifyAll.
723 dl 1.128 *
724     * The initial sanity check on tab and bounds is not currently
725     * necessary in the only usages of this method, but enables
726     * use in other future contexts.
727 dl 1.99 */
728 dl 1.119 final void tryAwaitLock(Node[] tab, int i) {
729 dl 1.128 if (tab != null && i >= 0 && i < tab.length) { // sanity check
730 dl 1.119 int r = ThreadLocalRandom.current().nextInt(); // randomize spins
731     int spins = MAX_SPINS, h;
732     while (tabAt(tab, i) == this && ((h = hash) & LOCKED) != 0) {
733     if (spins >= 0) {
734     r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift
735     if (r >= 0 && --spins == 0)
736     Thread.yield(); // yield before block
737     }
738     else if (casHash(h, h | WAITING)) {
739     synchronized (this) {
740     if (tabAt(tab, i) == this &&
741     (hash & WAITING) == WAITING) {
742     try {
743     wait();
744     } catch (InterruptedException ie) {
745     Thread.currentThread().interrupt();
746     }
747     }
748     else
749     notifyAll(); // possibly won race vs signaller
750     }
751     break;
752     }
753     }
754     }
755 dl 1.99 }
756    
757 dl 1.119 // Unsafe mechanics for casHash
758     private static final sun.misc.Unsafe UNSAFE;
759     private static final long hashOffset;
760    
761 dl 1.99 static {
762     try {
763     UNSAFE = sun.misc.Unsafe.getUnsafe();
764 dl 1.119 Class<?> k = Node.class;
765     hashOffset = UNSAFE.objectFieldOffset
766     (k.getDeclaredField("hash"));
767 dl 1.99 } catch (Exception e) {
768     throw new Error(e);
769     }
770     }
771     }
772    
773 dl 1.119 /* ---------------- TreeBins -------------- */
774    
775 dl 1.99 /**
776 dl 1.119 * Nodes for use in TreeBins
777 dl 1.99 */
778 dl 1.119 static final class TreeNode extends Node {
779     TreeNode parent; // red-black tree links
780     TreeNode left;
781     TreeNode right;
782     TreeNode prev; // needed to unlink next upon deletion
783     boolean red;
784 dl 1.99
785 dl 1.119 TreeNode(int hash, Object key, Object val, Node next, TreeNode parent) {
786     super(hash, key, val, next);
787     this.parent = parent;
788     }
789 dl 1.99 }
790 tim 1.1
791     /**
792 dl 1.119 * A specialized form of red-black tree for use in bins
793     * whose size exceeds a threshold.
794     *
795     * TreeBins use a special form of comparison for search and
796     * related operations (which is the main reason we cannot use
797     * existing collections such as TreeMaps). TreeBins contain
798     * Comparable elements, but may contain others, as well as
799     * elements that are Comparable but not necessarily Comparable<T>
800     * for the same T, so we cannot invoke compareTo among them. To
801     * handle this, the tree is ordered primarily by hash value, then
802     * by getClass().getName() order, and then by Comparator order
803     * among elements of the same class. On lookup at a node, if
804     * elements are not comparable or compare as 0, both left and
805     * right children may need to be searched in the case of tied hash
806     * values. (This corresponds to the full list search that would be
807     * necessary if all elements were non-Comparable and had tied
808     * hashes.) The red-black balancing code is updated from
809     * pre-jdk-collections
810     * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
811     * based in turn on Cormen, Leiserson, and Rivest "Introduction to
812     * Algorithms" (CLR).
813     *
814     * TreeBins also maintain a separate locking discipline than
815     * regular bins. Because they are forwarded via special MOVED
816     * nodes at bin heads (which can never change once established),
817     * we cannot use those nodes as locks. Instead, TreeBin
818     * extends AbstractQueuedSynchronizer to support a simple form of
819     * read-write lock. For update operations and table validation,
820     * the exclusive form of lock behaves in the same way as bin-head
821     * locks. However, lookups use shared read-lock mechanics to allow
822     * multiple readers in the absence of writers. Additionally,
823     * these lookups do not ever block: While the lock is not
824     * available, they proceed along the slow traversal path (via
825     * next-pointers) until the lock becomes available or the list is
826     * exhausted, whichever comes first. (These cases are not fast,
827     * but maximize aggregate expected throughput.) The AQS mechanics
828     * for doing this are straightforward. The lock state is held as
829     * AQS getState(). Read counts are negative; the write count (1)
830     * is positive. There are no signalling preferences among readers
831     * and writers. Since we don't need to export full Lock API, we
832     * just override the minimal AQS methods and use them directly.
833     */
834     static final class TreeBin extends AbstractQueuedSynchronizer {
835 dl 1.24 private static final long serialVersionUID = 2249069246763182397L;
836 dl 1.119 transient TreeNode root; // root of tree
837     transient TreeNode first; // head of next-pointer list
838 dl 1.24
839 dl 1.119 /* AQS overrides */
840     public final boolean isHeldExclusively() { return getState() > 0; }
841     public final boolean tryAcquire(int ignore) {
842     if (compareAndSetState(0, 1)) {
843     setExclusiveOwnerThread(Thread.currentThread());
844     return true;
845     }
846     return false;
847     }
848     public final boolean tryRelease(int ignore) {
849     setExclusiveOwnerThread(null);
850     setState(0);
851     return true;
852     }
853     public final int tryAcquireShared(int ignore) {
854     for (int c;;) {
855     if ((c = getState()) > 0)
856     return -1;
857     if (compareAndSetState(c, c -1))
858     return 1;
859     }
860     }
861     public final boolean tryReleaseShared(int ignore) {
862     int c;
863     do {} while (!compareAndSetState(c = getState(), c + 1));
864     return c == -1;
865     }
866    
867     /** From CLR */
868     private void rotateLeft(TreeNode p) {
869     if (p != null) {
870     TreeNode r = p.right, pp, rl;
871     if ((rl = p.right = r.left) != null)
872     rl.parent = p;
873     if ((pp = r.parent = p.parent) == null)
874     root = r;
875     else if (pp.left == p)
876     pp.left = r;
877     else
878     pp.right = r;
879     r.left = p;
880     p.parent = r;
881     }
882     }
883 dl 1.4
884 dl 1.119 /** From CLR */
885     private void rotateRight(TreeNode p) {
886     if (p != null) {
887     TreeNode l = p.left, pp, lr;
888     if ((lr = p.left = l.right) != null)
889     lr.parent = p;
890     if ((pp = l.parent = p.parent) == null)
891     root = l;
892     else if (pp.right == p)
893     pp.right = l;
894     else
895     pp.left = l;
896     l.right = p;
897     p.parent = l;
898     }
899     }
900 dl 1.4
901     /**
902 jsr166 1.123 * Returns the TreeNode (or null if not found) for the given key
903 dl 1.119 * starting at given root.
904 dl 1.4 */
905 dl 1.128 @SuppressWarnings("unchecked") final TreeNode getTreeNode
906     (int h, Object k, TreeNode p) {
907 dl 1.119 Class<?> c = k.getClass();
908     while (p != null) {
909     int dir, ph; Object pk; Class<?> pc;
910     if ((ph = p.hash) == h) {
911     if ((pk = p.key) == k || k.equals(pk))
912     return p;
913     if (c != (pc = pk.getClass()) ||
914     !(k instanceof Comparable) ||
915     (dir = ((Comparable)k).compareTo((Comparable)pk)) == 0) {
916     dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName());
917     TreeNode r = null, s = null, pl, pr;
918     if (dir >= 0) {
919     if ((pl = p.left) != null && h <= pl.hash)
920     s = pl;
921 dl 1.99 }
922 dl 1.119 else if ((pr = p.right) != null && h >= pr.hash)
923     s = pr;
924     if (s != null && (r = getTreeNode(h, k, s)) != null)
925     return r;
926 dl 1.45 }
927     }
928 dl 1.119 else
929     dir = (h < ph) ? -1 : 1;
930     p = (dir > 0) ? p.right : p.left;
931 dl 1.33 }
932 dl 1.119 return null;
933 dl 1.33 }
934    
935 dl 1.99 /**
936 dl 1.119 * Wrapper for getTreeNode used by CHM.get. Tries to obtain
937     * read-lock to call getTreeNode, but during failure to get
938     * lock, searches along next links.
939 dl 1.99 */
940 dl 1.119 final Object getValue(int h, Object k) {
941     Node r = null;
942     int c = getState(); // Must read lock state first
943     for (Node e = first; e != null; e = e.next) {
944     if (c <= 0 && compareAndSetState(c, c - 1)) {
945     try {
946     r = getTreeNode(h, k, root);
947     } finally {
948     releaseShared(0);
949 dl 1.99 }
950     break;
951     }
952 dl 1.119 else if ((e.hash & HASH_BITS) == h && k.equals(e.key)) {
953     r = e;
954 dl 1.99 break;
955     }
956 dl 1.119 else
957     c = getState();
958 dl 1.99 }
959 dl 1.119 return r == null ? null : r.val;
960 dl 1.99 }
961    
962     /**
963 dl 1.119 * Finds or adds a node.
964     * @return null if added
965 dl 1.6 */
966 dl 1.128 @SuppressWarnings("unchecked") final TreeNode putTreeNode
967     (int h, Object k, Object v) {
968 dl 1.119 Class<?> c = k.getClass();
969     TreeNode pp = root, p = null;
970     int dir = 0;
971     while (pp != null) { // find existing node or leaf to insert at
972     int ph; Object pk; Class<?> pc;
973     p = pp;
974     if ((ph = p.hash) == h) {
975     if ((pk = p.key) == k || k.equals(pk))
976     return p;
977     if (c != (pc = pk.getClass()) ||
978     !(k instanceof Comparable) ||
979     (dir = ((Comparable)k).compareTo((Comparable)pk)) == 0) {
980     dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName());
981     TreeNode r = null, s = null, pl, pr;
982     if (dir >= 0) {
983     if ((pl = p.left) != null && h <= pl.hash)
984     s = pl;
985 dl 1.99 }
986 dl 1.119 else if ((pr = p.right) != null && h >= pr.hash)
987     s = pr;
988     if (s != null && (r = getTreeNode(h, k, s)) != null)
989     return r;
990 dl 1.99 }
991     }
992 dl 1.119 else
993     dir = (h < ph) ? -1 : 1;
994     pp = (dir > 0) ? p.right : p.left;
995 dl 1.99 }
996    
997 dl 1.119 TreeNode f = first;
998     TreeNode x = first = new TreeNode(h, k, v, f, p);
999     if (p == null)
1000     root = x;
1001     else { // attach and rebalance; adapted from CLR
1002     TreeNode xp, xpp;
1003     if (f != null)
1004     f.prev = x;
1005     if (dir <= 0)
1006     p.left = x;
1007     else
1008     p.right = x;
1009     x.red = true;
1010     while (x != null && (xp = x.parent) != null && xp.red &&
1011     (xpp = xp.parent) != null) {
1012     TreeNode xppl = xpp.left;
1013     if (xp == xppl) {
1014     TreeNode y = xpp.right;
1015     if (y != null && y.red) {
1016     y.red = false;
1017     xp.red = false;
1018     xpp.red = true;
1019     x = xpp;
1020     }
1021     else {
1022     if (x == xp.right) {
1023     rotateLeft(x = xp);
1024     xpp = (xp = x.parent) == null ? null : xp.parent;
1025     }
1026     if (xp != null) {
1027     xp.red = false;
1028     if (xpp != null) {
1029     xpp.red = true;
1030     rotateRight(xpp);
1031     }
1032     }
1033     }
1034     }
1035     else {
1036     TreeNode y = xppl;
1037     if (y != null && y.red) {
1038     y.red = false;
1039     xp.red = false;
1040     xpp.red = true;
1041     x = xpp;
1042     }
1043     else {
1044     if (x == xp.left) {
1045     rotateRight(x = xp);
1046     xpp = (xp = x.parent) == null ? null : xp.parent;
1047     }
1048     if (xp != null) {
1049     xp.red = false;
1050     if (xpp != null) {
1051     xpp.red = true;
1052     rotateLeft(xpp);
1053     }
1054     }
1055 dl 1.99 }
1056     }
1057     }
1058 dl 1.119 TreeNode r = root;
1059     if (r != null && r.red)
1060     r.red = false;
1061 dl 1.99 }
1062 dl 1.119 return null;
1063 dl 1.99 }
1064 dl 1.45
1065 dl 1.119 /**
1066     * Removes the given node, that must be present before this
1067     * call. This is messier than typical red-black deletion code
1068     * because we cannot swap the contents of an interior node
1069     * with a leaf successor that is pinned by "next" pointers
1070     * that are accessible independently of lock. So instead we
1071     * swap the tree linkages.
1072     */
1073     final void deleteTreeNode(TreeNode p) {
1074     TreeNode next = (TreeNode)p.next; // unlink traversal pointers
1075     TreeNode pred = p.prev;
1076     if (pred == null)
1077     first = next;
1078     else
1079     pred.next = next;
1080     if (next != null)
1081     next.prev = pred;
1082     TreeNode replacement;
1083     TreeNode pl = p.left;
1084     TreeNode pr = p.right;
1085     if (pl != null && pr != null) {
1086     TreeNode s = pr, sl;
1087     while ((sl = s.left) != null) // find successor
1088     s = sl;
1089     boolean c = s.red; s.red = p.red; p.red = c; // swap colors
1090     TreeNode sr = s.right;
1091     TreeNode pp = p.parent;
1092     if (s == pr) { // p was s's direct parent
1093     p.parent = s;
1094     s.right = p;
1095     }
1096     else {
1097     TreeNode sp = s.parent;
1098     if ((p.parent = sp) != null) {
1099     if (s == sp.left)
1100     sp.left = p;
1101     else
1102     sp.right = p;
1103 dl 1.45 }
1104 dl 1.119 if ((s.right = pr) != null)
1105     pr.parent = s;
1106 dl 1.4 }
1107 dl 1.119 p.left = null;
1108     if ((p.right = sr) != null)
1109     sr.parent = p;
1110     if ((s.left = pl) != null)
1111     pl.parent = s;
1112     if ((s.parent = pp) == null)
1113     root = s;
1114     else if (p == pp.left)
1115     pp.left = s;
1116     else
1117     pp.right = s;
1118     replacement = sr;
1119     }
1120     else
1121     replacement = (pl != null) ? pl : pr;
1122     TreeNode pp = p.parent;
1123     if (replacement == null) {
1124     if (pp == null) {
1125     root = null;
1126     return;
1127     }
1128     replacement = p;
1129 dl 1.99 }
1130 dl 1.119 else {
1131     replacement.parent = pp;
1132     if (pp == null)
1133     root = replacement;
1134     else if (p == pp.left)
1135     pp.left = replacement;
1136     else
1137     pp.right = replacement;
1138     p.left = p.right = p.parent = null;
1139 dl 1.4 }
1140 dl 1.119 if (!p.red) { // rebalance, from CLR
1141     TreeNode x = replacement;
1142     while (x != null) {
1143     TreeNode xp, xpl;
1144     if (x.red || (xp = x.parent) == null) {
1145     x.red = false;
1146 dl 1.99 break;
1147 dl 1.119 }
1148     if (x == (xpl = xp.left)) {
1149     TreeNode sib = xp.right;
1150     if (sib != null && sib.red) {
1151     sib.red = false;
1152     xp.red = true;
1153     rotateLeft(xp);
1154     sib = (xp = x.parent) == null ? null : xp.right;
1155     }
1156     if (sib == null)
1157     x = xp;
1158     else {
1159     TreeNode sl = sib.left, sr = sib.right;
1160     if ((sr == null || !sr.red) &&
1161     (sl == null || !sl.red)) {
1162     sib.red = true;
1163     x = xp;
1164     }
1165     else {
1166     if (sr == null || !sr.red) {
1167     if (sl != null)
1168     sl.red = false;
1169     sib.red = true;
1170     rotateRight(sib);
1171     sib = (xp = x.parent) == null ? null : xp.right;
1172     }
1173     if (sib != null) {
1174     sib.red = (xp == null) ? false : xp.red;
1175     if ((sr = sib.right) != null)
1176     sr.red = false;
1177     }
1178     if (xp != null) {
1179     xp.red = false;
1180     rotateLeft(xp);
1181     }
1182     x = root;
1183     }
1184     }
1185     }
1186     else { // symmetric
1187     TreeNode sib = xpl;
1188     if (sib != null && sib.red) {
1189     sib.red = false;
1190     xp.red = true;
1191     rotateRight(xp);
1192     sib = (xp = x.parent) == null ? null : xp.left;
1193     }
1194     if (sib == null)
1195     x = xp;
1196     else {
1197     TreeNode sl = sib.left, sr = sib.right;
1198     if ((sl == null || !sl.red) &&
1199     (sr == null || !sr.red)) {
1200     sib.red = true;
1201     x = xp;
1202     }
1203     else {
1204     if (sl == null || !sl.red) {
1205     if (sr != null)
1206     sr.red = false;
1207     sib.red = true;
1208     rotateLeft(sib);
1209     sib = (xp = x.parent) == null ? null : xp.left;
1210     }
1211     if (sib != null) {
1212     sib.red = (xp == null) ? false : xp.red;
1213     if ((sl = sib.left) != null)
1214     sl.red = false;
1215     }
1216     if (xp != null) {
1217     xp.red = false;
1218     rotateRight(xp);
1219     }
1220     x = root;
1221     }
1222     }
1223     }
1224 dl 1.45 }
1225 dl 1.4 }
1226 dl 1.119 if (p == replacement && (pp = p.parent) != null) {
1227     if (p == pp.left) // detach pointers
1228     pp.left = null;
1229     else if (p == pp.right)
1230     pp.right = null;
1231     p.parent = null;
1232     }
1233 dl 1.4 }
1234 tim 1.1 }
1235    
1236 dl 1.119 /* ---------------- Collision reduction methods -------------- */
1237 tim 1.1
1238 dl 1.99 /**
1239 dl 1.119 * Spreads higher bits to lower, and also forces top 2 bits to 0.
1240     * Because the table uses power-of-two masking, sets of hashes
1241     * that vary only in bits above the current mask will always
1242     * collide. (Among known examples are sets of Float keys holding
1243     * consecutive whole numbers in small tables.) To counter this,
1244     * we apply a transform that spreads the impact of higher bits
1245     * downward. There is a tradeoff between speed, utility, and
1246     * quality of bit-spreading. Because many common sets of hashes
1247     * are already reasonably distributed across bits (so don't benefit
1248     * from spreading), and because we use trees to handle large sets
1249     * of collisions in bins, we don't need excessively high quality.
1250     */
1251     private static final int spread(int h) {
1252     h ^= (h >>> 18) ^ (h >>> 12);
1253     return (h ^ (h >>> 10)) & HASH_BITS;
1254 dl 1.99 }
1255    
1256     /**
1257 dl 1.119 * Replaces a list bin with a tree bin. Call only when locked.
1258     * Fails to replace if the given key is non-comparable or table
1259     * is, or needs, resizing.
1260     */
1261     private final void replaceWithTreeBin(Node[] tab, int index, Object key) {
1262     if ((key instanceof Comparable) &&
1263     (tab.length >= MAXIMUM_CAPACITY || counter.sum() < (long)sizeCtl)) {
1264     TreeBin t = new TreeBin();
1265     for (Node e = tabAt(tab, index); e != null; e = e.next)
1266     t.putTreeNode(e.hash & HASH_BITS, e.key, e.val);
1267     setTabAt(tab, index, new Node(MOVED, t, null, null));
1268     }
1269 dl 1.99 }
1270 tim 1.11
1271 dl 1.119 /* ---------------- Internal access and update methods -------------- */
1272    
1273     /** Implementation for get and containsKey */
1274     private final Object internalGet(Object k) {
1275     int h = spread(k.hashCode());
1276     retry: for (Node[] tab = table; tab != null;) {
1277     Node e, p; Object ek, ev; int eh; // locals to read fields once
1278     for (e = tabAt(tab, (tab.length - 1) & h); e != null; e = e.next) {
1279     if ((eh = e.hash) == MOVED) {
1280     if ((ek = e.key) instanceof TreeBin) // search TreeBin
1281     return ((TreeBin)ek).getValue(h, k);
1282     else { // restart with new table
1283     tab = (Node[])ek;
1284     continue retry;
1285     }
1286     }
1287     else if ((eh & HASH_BITS) == h && (ev = e.val) != null &&
1288     ((ek = e.key) == k || k.equals(ek)))
1289     return ev;
1290     }
1291     break;
1292     }
1293     return null;
1294 tim 1.1 }
1295    
1296     /**
1297 dl 1.119 * Implementation for the four public remove/replace methods:
1298     * Replaces node value with v, conditional upon match of cv if
1299     * non-null. If resulting value is null, delete.
1300     */
1301     private final Object internalReplace(Object k, Object v, Object cv) {
1302     int h = spread(k.hashCode());
1303     Object oldVal = null;
1304     for (Node[] tab = table;;) {
1305     Node f; int i, fh; Object fk;
1306     if (tab == null ||
1307     (f = tabAt(tab, i = (tab.length - 1) & h)) == null)
1308     break;
1309     else if ((fh = f.hash) == MOVED) {
1310     if ((fk = f.key) instanceof TreeBin) {
1311     TreeBin t = (TreeBin)fk;
1312     boolean validated = false;
1313     boolean deleted = false;
1314     t.acquire(0);
1315     try {
1316     if (tabAt(tab, i) == f) {
1317     validated = true;
1318     TreeNode p = t.getTreeNode(h, k, t.root);
1319     if (p != null) {
1320     Object pv = p.val;
1321     if (cv == null || cv == pv || cv.equals(pv)) {
1322     oldVal = pv;
1323     if ((p.val = v) == null) {
1324     deleted = true;
1325     t.deleteTreeNode(p);
1326     }
1327     }
1328     }
1329     }
1330     } finally {
1331     t.release(0);
1332     }
1333     if (validated) {
1334     if (deleted)
1335     counter.add(-1L);
1336     break;
1337     }
1338     }
1339     else
1340     tab = (Node[])fk;
1341     }
1342     else if ((fh & HASH_BITS) != h && f.next == null) // precheck
1343     break; // rules out possible existence
1344     else if ((fh & LOCKED) != 0) {
1345     checkForResize(); // try resizing if can't get lock
1346     f.tryAwaitLock(tab, i);
1347     }
1348     else if (f.casHash(fh, fh | LOCKED)) {
1349     boolean validated = false;
1350     boolean deleted = false;
1351     try {
1352     if (tabAt(tab, i) == f) {
1353     validated = true;
1354     for (Node e = f, pred = null;;) {
1355     Object ek, ev;
1356     if ((e.hash & HASH_BITS) == h &&
1357     ((ev = e.val) != null) &&
1358     ((ek = e.key) == k || k.equals(ek))) {
1359     if (cv == null || cv == ev || cv.equals(ev)) {
1360     oldVal = ev;
1361     if ((e.val = v) == null) {
1362     deleted = true;
1363     Node en = e.next;
1364     if (pred != null)
1365     pred.next = en;
1366     else
1367     setTabAt(tab, i, en);
1368     }
1369     }
1370     break;
1371     }
1372     pred = e;
1373     if ((e = e.next) == null)
1374     break;
1375     }
1376     }
1377     } finally {
1378     if (!f.casHash(fh | LOCKED, fh)) {
1379     f.hash = fh;
1380     synchronized (f) { f.notifyAll(); };
1381     }
1382     }
1383     if (validated) {
1384     if (deleted)
1385     counter.add(-1L);
1386     break;
1387     }
1388     }
1389     }
1390     return oldVal;
1391 dl 1.55 }
1392    
1393 dl 1.119 /*
1394 dl 1.126 * Internal versions of the six insertion methods, each a
1395 dl 1.119 * little more complicated than the last. All have
1396     * the same basic structure as the first (internalPut):
1397     * 1. If table uninitialized, create
1398     * 2. If bin empty, try to CAS new node
1399     * 3. If bin stale, use new table
1400     * 4. if bin converted to TreeBin, validate and relay to TreeBin methods
1401     * 5. Lock and validate; if valid, scan and add or update
1402 tim 1.1 *
1403 dl 1.119 * The others interweave other checks and/or alternative actions:
1404     * * Plain put checks for and performs resize after insertion.
1405     * * putIfAbsent prescans for mapping without lock (and fails to add
1406     * if present), which also makes pre-emptive resize checks worthwhile.
1407     * * computeIfAbsent extends form used in putIfAbsent with additional
1408     * mechanics to deal with, calls, potential exceptions and null
1409     * returns from function call.
1410     * * compute uses the same function-call mechanics, but without
1411     * the prescans
1412 dl 1.126 * * merge acts as putIfAbsent in the absent case, but invokes the
1413     * update function if present
1414 dl 1.119 * * putAll attempts to pre-allocate enough table space
1415     * and more lazily performs count updates and checks.
1416 jsr166 1.76 *
1417 dl 1.119 * Someday when details settle down a bit more, it might be worth
1418     * some factoring to reduce sprawl.
1419 tim 1.1 */
1420    
1421 dl 1.119 /** Implementation for put */
1422     private final Object internalPut(Object k, Object v) {
1423     int h = spread(k.hashCode());
1424     int count = 0;
1425     for (Node[] tab = table;;) {
1426     int i; Node f; int fh; Object fk;
1427     if (tab == null)
1428     tab = initTable();
1429     else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1430     if (casTabAt(tab, i, null, new Node(h, k, v, null)))
1431     break; // no lock when adding to empty bin
1432 dl 1.99 }
1433 dl 1.119 else if ((fh = f.hash) == MOVED) {
1434     if ((fk = f.key) instanceof TreeBin) {
1435     TreeBin t = (TreeBin)fk;
1436     Object oldVal = null;
1437     t.acquire(0);
1438     try {
1439     if (tabAt(tab, i) == f) {
1440     count = 2;
1441     TreeNode p = t.putTreeNode(h, k, v);
1442     if (p != null) {
1443     oldVal = p.val;
1444     p.val = v;
1445     }
1446     }
1447     } finally {
1448     t.release(0);
1449     }
1450     if (count != 0) {
1451     if (oldVal != null)
1452     return oldVal;
1453     break;
1454     }
1455     }
1456     else
1457     tab = (Node[])fk;
1458     }
1459     else if ((fh & LOCKED) != 0) {
1460     checkForResize();
1461     f.tryAwaitLock(tab, i);
1462     }
1463     else if (f.casHash(fh, fh | LOCKED)) {
1464     Object oldVal = null;
1465     try { // needed in case equals() throws
1466     if (tabAt(tab, i) == f) {
1467     count = 1;
1468     for (Node e = f;; ++count) {
1469     Object ek, ev;
1470     if ((e.hash & HASH_BITS) == h &&
1471     (ev = e.val) != null &&
1472     ((ek = e.key) == k || k.equals(ek))) {
1473     oldVal = ev;
1474     e.val = v;
1475     break;
1476     }
1477     Node last = e;
1478     if ((e = e.next) == null) {
1479     last.next = new Node(h, k, v, null);
1480     if (count >= TREE_THRESHOLD)
1481     replaceWithTreeBin(tab, i, k);
1482     break;
1483     }
1484     }
1485     }
1486     } finally { // unlock and signal if needed
1487     if (!f.casHash(fh | LOCKED, fh)) {
1488     f.hash = fh;
1489     synchronized (f) { f.notifyAll(); };
1490     }
1491     }
1492     if (count != 0) {
1493     if (oldVal != null)
1494     return oldVal;
1495     if (tab.length <= 64)
1496     count = 2;
1497     break;
1498 dl 1.99 }
1499 dl 1.21 }
1500     }
1501 dl 1.119 counter.add(1L);
1502     if (count > 1)
1503     checkForResize();
1504     return null;
1505 tim 1.1 }
1506    
1507 dl 1.119 /** Implementation for putIfAbsent */
1508     private final Object internalPutIfAbsent(Object k, Object v) {
1509     int h = spread(k.hashCode());
1510     int count = 0;
1511     for (Node[] tab = table;;) {
1512     int i; Node f; int fh; Object fk, fv;
1513     if (tab == null)
1514     tab = initTable();
1515     else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1516     if (casTabAt(tab, i, null, new Node(h, k, v, null)))
1517     break;
1518     }
1519     else if ((fh = f.hash) == MOVED) {
1520     if ((fk = f.key) instanceof TreeBin) {
1521     TreeBin t = (TreeBin)fk;
1522     Object oldVal = null;
1523     t.acquire(0);
1524     try {
1525     if (tabAt(tab, i) == f) {
1526     count = 2;
1527     TreeNode p = t.putTreeNode(h, k, v);
1528     if (p != null)
1529     oldVal = p.val;
1530     }
1531     } finally {
1532     t.release(0);
1533     }
1534     if (count != 0) {
1535     if (oldVal != null)
1536     return oldVal;
1537     break;
1538     }
1539     }
1540     else
1541     tab = (Node[])fk;
1542     }
1543     else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
1544     ((fk = f.key) == k || k.equals(fk)))
1545     return fv;
1546     else {
1547     Node g = f.next;
1548     if (g != null) { // at least 2 nodes -- search and maybe resize
1549     for (Node e = g;;) {
1550     Object ek, ev;
1551     if ((e.hash & HASH_BITS) == h && (ev = e.val) != null &&
1552     ((ek = e.key) == k || k.equals(ek)))
1553     return ev;
1554     if ((e = e.next) == null) {
1555     checkForResize();
1556     break;
1557     }
1558     }
1559     }
1560     if (((fh = f.hash) & LOCKED) != 0) {
1561     checkForResize();
1562     f.tryAwaitLock(tab, i);
1563     }
1564     else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
1565     Object oldVal = null;
1566     try {
1567     if (tabAt(tab, i) == f) {
1568     count = 1;
1569     for (Node e = f;; ++count) {
1570     Object ek, ev;
1571     if ((e.hash & HASH_BITS) == h &&
1572     (ev = e.val) != null &&
1573     ((ek = e.key) == k || k.equals(ek))) {
1574     oldVal = ev;
1575     break;
1576     }
1577     Node last = e;
1578     if ((e = e.next) == null) {
1579     last.next = new Node(h, k, v, null);
1580     if (count >= TREE_THRESHOLD)
1581     replaceWithTreeBin(tab, i, k);
1582     break;
1583     }
1584     }
1585     }
1586     } finally {
1587     if (!f.casHash(fh | LOCKED, fh)) {
1588     f.hash = fh;
1589     synchronized (f) { f.notifyAll(); };
1590     }
1591     }
1592     if (count != 0) {
1593     if (oldVal != null)
1594     return oldVal;
1595     if (tab.length <= 64)
1596     count = 2;
1597     break;
1598     }
1599     }
1600     }
1601 dl 1.45 }
1602 dl 1.119 counter.add(1L);
1603     if (count > 1)
1604     checkForResize();
1605     return null;
1606 dl 1.21 }
1607    
1608 dl 1.119 /** Implementation for computeIfAbsent */
1609     private final Object internalComputeIfAbsent(K k,
1610     Fun<? super K, ?> mf) {
1611     int h = spread(k.hashCode());
1612     Object val = null;
1613     int count = 0;
1614     for (Node[] tab = table;;) {
1615     Node f; int i, fh; Object fk, fv;
1616     if (tab == null)
1617     tab = initTable();
1618     else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1619     Node node = new Node(fh = h | LOCKED, k, null, null);
1620     if (casTabAt(tab, i, null, node)) {
1621     count = 1;
1622     try {
1623     if ((val = mf.apply(k)) != null)
1624     node.val = val;
1625     } finally {
1626     if (val == null)
1627     setTabAt(tab, i, null);
1628     if (!node.casHash(fh, h)) {
1629     node.hash = h;
1630     synchronized (node) { node.notifyAll(); };
1631     }
1632     }
1633     }
1634     if (count != 0)
1635     break;
1636     }
1637     else if ((fh = f.hash) == MOVED) {
1638     if ((fk = f.key) instanceof TreeBin) {
1639     TreeBin t = (TreeBin)fk;
1640     boolean added = false;
1641     t.acquire(0);
1642     try {
1643     if (tabAt(tab, i) == f) {
1644     count = 1;
1645     TreeNode p = t.getTreeNode(h, k, t.root);
1646     if (p != null)
1647     val = p.val;
1648     else if ((val = mf.apply(k)) != null) {
1649     added = true;
1650     count = 2;
1651     t.putTreeNode(h, k, val);
1652     }
1653     }
1654     } finally {
1655     t.release(0);
1656     }
1657     if (count != 0) {
1658     if (!added)
1659     return val;
1660     break;
1661     }
1662     }
1663     else
1664     tab = (Node[])fk;
1665     }
1666     else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
1667     ((fk = f.key) == k || k.equals(fk)))
1668     return fv;
1669     else {
1670     Node g = f.next;
1671     if (g != null) {
1672     for (Node e = g;;) {
1673     Object ek, ev;
1674     if ((e.hash & HASH_BITS) == h && (ev = e.val) != null &&
1675     ((ek = e.key) == k || k.equals(ek)))
1676     return ev;
1677     if ((e = e.next) == null) {
1678     checkForResize();
1679     break;
1680     }
1681     }
1682     }
1683     if (((fh = f.hash) & LOCKED) != 0) {
1684     checkForResize();
1685     f.tryAwaitLock(tab, i);
1686     }
1687     else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
1688     boolean added = false;
1689     try {
1690     if (tabAt(tab, i) == f) {
1691     count = 1;
1692     for (Node e = f;; ++count) {
1693     Object ek, ev;
1694     if ((e.hash & HASH_BITS) == h &&
1695     (ev = e.val) != null &&
1696     ((ek = e.key) == k || k.equals(ek))) {
1697     val = ev;
1698     break;
1699     }
1700     Node last = e;
1701     if ((e = e.next) == null) {
1702     if ((val = mf.apply(k)) != null) {
1703     added = true;
1704     last.next = new Node(h, k, val, null);
1705     if (count >= TREE_THRESHOLD)
1706     replaceWithTreeBin(tab, i, k);
1707     }
1708     break;
1709     }
1710     }
1711     }
1712     } finally {
1713     if (!f.casHash(fh | LOCKED, fh)) {
1714     f.hash = fh;
1715     synchronized (f) { f.notifyAll(); };
1716     }
1717     }
1718     if (count != 0) {
1719     if (!added)
1720     return val;
1721     if (tab.length <= 64)
1722     count = 2;
1723     break;
1724     }
1725     }
1726 dl 1.105 }
1727 dl 1.99 }
1728 dl 1.119 if (val != null) {
1729     counter.add(1L);
1730     if (count > 1)
1731     checkForResize();
1732     }
1733     return val;
1734 tim 1.1 }
1735    
1736 dl 1.119 /** Implementation for compute */
1737 dl 1.128 @SuppressWarnings("unchecked") private final Object internalCompute
1738     (K k, boolean onlyIfPresent, BiFun<? super K, ? super V, ? extends V> mf) {
1739 dl 1.119 int h = spread(k.hashCode());
1740     Object val = null;
1741     int delta = 0;
1742     int count = 0;
1743     for (Node[] tab = table;;) {
1744     Node f; int i, fh; Object fk;
1745     if (tab == null)
1746     tab = initTable();
1747     else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1748     if (onlyIfPresent)
1749     break;
1750     Node node = new Node(fh = h | LOCKED, k, null, null);
1751     if (casTabAt(tab, i, null, node)) {
1752     try {
1753     count = 1;
1754     if ((val = mf.apply(k, null)) != null) {
1755     node.val = val;
1756     delta = 1;
1757     }
1758     } finally {
1759     if (delta == 0)
1760     setTabAt(tab, i, null);
1761     if (!node.casHash(fh, h)) {
1762     node.hash = h;
1763     synchronized (node) { node.notifyAll(); };
1764     }
1765     }
1766     }
1767     if (count != 0)
1768     break;
1769     }
1770     else if ((fh = f.hash) == MOVED) {
1771     if ((fk = f.key) instanceof TreeBin) {
1772     TreeBin t = (TreeBin)fk;
1773     t.acquire(0);
1774     try {
1775     if (tabAt(tab, i) == f) {
1776     count = 1;
1777     TreeNode p = t.getTreeNode(h, k, t.root);
1778     Object pv = (p == null) ? null : p.val;
1779     if ((val = mf.apply(k, (V)pv)) != null) {
1780     if (p != null)
1781     p.val = val;
1782     else {
1783     count = 2;
1784     delta = 1;
1785     t.putTreeNode(h, k, val);
1786     }
1787     }
1788     else if (p != null) {
1789     delta = -1;
1790     t.deleteTreeNode(p);
1791     }
1792     }
1793     } finally {
1794     t.release(0);
1795     }
1796     if (count != 0)
1797     break;
1798     }
1799     else
1800     tab = (Node[])fk;
1801     }
1802     else if ((fh & LOCKED) != 0) {
1803     checkForResize();
1804     f.tryAwaitLock(tab, i);
1805     }
1806     else if (f.casHash(fh, fh | LOCKED)) {
1807     try {
1808     if (tabAt(tab, i) == f) {
1809     count = 1;
1810     for (Node e = f, pred = null;; ++count) {
1811     Object ek, ev;
1812     if ((e.hash & HASH_BITS) == h &&
1813     (ev = e.val) != null &&
1814     ((ek = e.key) == k || k.equals(ek))) {
1815     val = mf.apply(k, (V)ev);
1816     if (val != null)
1817     e.val = val;
1818     else {
1819     delta = -1;
1820     Node en = e.next;
1821     if (pred != null)
1822     pred.next = en;
1823     else
1824     setTabAt(tab, i, en);
1825     }
1826     break;
1827     }
1828     pred = e;
1829     if ((e = e.next) == null) {
1830     if (!onlyIfPresent && (val = mf.apply(k, null)) != null) {
1831     pred.next = new Node(h, k, val, null);
1832     delta = 1;
1833     if (count >= TREE_THRESHOLD)
1834     replaceWithTreeBin(tab, i, k);
1835     }
1836     break;
1837     }
1838     }
1839     }
1840     } finally {
1841     if (!f.casHash(fh | LOCKED, fh)) {
1842     f.hash = fh;
1843     synchronized (f) { f.notifyAll(); };
1844     }
1845     }
1846     if (count != 0) {
1847     if (tab.length <= 64)
1848     count = 2;
1849     break;
1850     }
1851     }
1852     }
1853     if (delta != 0) {
1854     counter.add((long)delta);
1855     if (count > 1)
1856     checkForResize();
1857     }
1858     return val;
1859     }
1860    
1861 dl 1.126 /** Implementation for merge */
1862 dl 1.128 @SuppressWarnings("unchecked") private final Object internalMerge
1863     (K k, V v, BiFun<? super V, ? super V, ? extends V> mf) {
1864 dl 1.119 int h = spread(k.hashCode());
1865     Object val = null;
1866     int delta = 0;
1867     int count = 0;
1868     for (Node[] tab = table;;) {
1869     int i; Node f; int fh; Object fk, fv;
1870     if (tab == null)
1871     tab = initTable();
1872     else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1873     if (casTabAt(tab, i, null, new Node(h, k, v, null))) {
1874     delta = 1;
1875     val = v;
1876     break;
1877     }
1878     }
1879     else if ((fh = f.hash) == MOVED) {
1880     if ((fk = f.key) instanceof TreeBin) {
1881     TreeBin t = (TreeBin)fk;
1882     t.acquire(0);
1883     try {
1884     if (tabAt(tab, i) == f) {
1885     count = 1;
1886     TreeNode p = t.getTreeNode(h, k, t.root);
1887     val = (p == null) ? v : mf.apply((V)p.val, v);
1888     if (val != null) {
1889     if (p != null)
1890     p.val = val;
1891     else {
1892     count = 2;
1893     delta = 1;
1894     t.putTreeNode(h, k, val);
1895     }
1896     }
1897     else if (p != null) {
1898     delta = -1;
1899     t.deleteTreeNode(p);
1900     }
1901     }
1902     } finally {
1903     t.release(0);
1904     }
1905     if (count != 0)
1906     break;
1907     }
1908     else
1909     tab = (Node[])fk;
1910     }
1911     else if ((fh & LOCKED) != 0) {
1912     checkForResize();
1913     f.tryAwaitLock(tab, i);
1914     }
1915     else if (f.casHash(fh, fh | LOCKED)) {
1916     try {
1917     if (tabAt(tab, i) == f) {
1918     count = 1;
1919     for (Node e = f, pred = null;; ++count) {
1920     Object ek, ev;
1921     if ((e.hash & HASH_BITS) == h &&
1922     (ev = e.val) != null &&
1923     ((ek = e.key) == k || k.equals(ek))) {
1924     val = mf.apply(v, (V)ev);
1925     if (val != null)
1926     e.val = val;
1927     else {
1928     delta = -1;
1929     Node en = e.next;
1930     if (pred != null)
1931     pred.next = en;
1932     else
1933     setTabAt(tab, i, en);
1934     }
1935     break;
1936     }
1937     pred = e;
1938     if ((e = e.next) == null) {
1939     val = v;
1940     pred.next = new Node(h, k, val, null);
1941     delta = 1;
1942     if (count >= TREE_THRESHOLD)
1943     replaceWithTreeBin(tab, i, k);
1944     break;
1945     }
1946     }
1947     }
1948     } finally {
1949     if (!f.casHash(fh | LOCKED, fh)) {
1950     f.hash = fh;
1951     synchronized (f) { f.notifyAll(); };
1952     }
1953     }
1954     if (count != 0) {
1955     if (tab.length <= 64)
1956     count = 2;
1957     break;
1958     }
1959 dl 1.105 }
1960 dl 1.99 }
1961 dl 1.119 if (delta != 0) {
1962     counter.add((long)delta);
1963     if (count > 1)
1964     checkForResize();
1965     }
1966     return val;
1967     }
1968    
1969     /** Implementation for putAll */
1970     private final void internalPutAll(Map<?, ?> m) {
1971     tryPresize(m.size());
1972     long delta = 0L; // number of uncommitted additions
1973     boolean npe = false; // to throw exception on exit for nulls
1974     try { // to clean up counts on other exceptions
1975     for (Map.Entry<?, ?> entry : m.entrySet()) {
1976     Object k, v;
1977     if (entry == null || (k = entry.getKey()) == null ||
1978     (v = entry.getValue()) == null) {
1979     npe = true;
1980     break;
1981     }
1982     int h = spread(k.hashCode());
1983     for (Node[] tab = table;;) {
1984     int i; Node f; int fh; Object fk;
1985     if (tab == null)
1986     tab = initTable();
1987     else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null){
1988     if (casTabAt(tab, i, null, new Node(h, k, v, null))) {
1989     ++delta;
1990     break;
1991     }
1992     }
1993     else if ((fh = f.hash) == MOVED) {
1994     if ((fk = f.key) instanceof TreeBin) {
1995     TreeBin t = (TreeBin)fk;
1996     boolean validated = false;
1997     t.acquire(0);
1998     try {
1999     if (tabAt(tab, i) == f) {
2000     validated = true;
2001     TreeNode p = t.getTreeNode(h, k, t.root);
2002     if (p != null)
2003     p.val = v;
2004     else {
2005     t.putTreeNode(h, k, v);
2006     ++delta;
2007     }
2008     }
2009     } finally {
2010     t.release(0);
2011     }
2012     if (validated)
2013     break;
2014     }
2015     else
2016     tab = (Node[])fk;
2017     }
2018     else if ((fh & LOCKED) != 0) {
2019     counter.add(delta);
2020     delta = 0L;
2021     checkForResize();
2022     f.tryAwaitLock(tab, i);
2023     }
2024     else if (f.casHash(fh, fh | LOCKED)) {
2025     int count = 0;
2026     try {
2027     if (tabAt(tab, i) == f) {
2028     count = 1;
2029     for (Node e = f;; ++count) {
2030     Object ek, ev;
2031     if ((e.hash & HASH_BITS) == h &&
2032     (ev = e.val) != null &&
2033     ((ek = e.key) == k || k.equals(ek))) {
2034     e.val = v;
2035     break;
2036     }
2037     Node last = e;
2038     if ((e = e.next) == null) {
2039     ++delta;
2040     last.next = new Node(h, k, v, null);
2041     if (count >= TREE_THRESHOLD)
2042     replaceWithTreeBin(tab, i, k);
2043     break;
2044     }
2045     }
2046     }
2047     } finally {
2048     if (!f.casHash(fh | LOCKED, fh)) {
2049     f.hash = fh;
2050     synchronized (f) { f.notifyAll(); };
2051     }
2052     }
2053     if (count != 0) {
2054     if (count > 1) {
2055     counter.add(delta);
2056     delta = 0L;
2057     checkForResize();
2058 dl 1.99 }
2059 dl 1.119 break;
2060 dl 1.99 }
2061 dl 1.21 }
2062     }
2063 dl 1.45 }
2064     } finally {
2065 dl 1.119 if (delta != 0)
2066     counter.add(delta);
2067 dl 1.45 }
2068 dl 1.119 if (npe)
2069     throw new NullPointerException();
2070 tim 1.1 }
2071 dl 1.19
2072 dl 1.119 /* ---------------- Table Initialization and Resizing -------------- */
2073    
2074 tim 1.1 /**
2075 dl 1.119 * Returns a power of two table size for the given desired capacity.
2076     * See Hackers Delight, sec 3.2
2077 tim 1.1 */
2078 dl 1.119 private static final int tableSizeFor(int c) {
2079     int n = c - 1;
2080     n |= n >>> 1;
2081     n |= n >>> 2;
2082     n |= n >>> 4;
2083     n |= n >>> 8;
2084     n |= n >>> 16;
2085     return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
2086 tim 1.1 }
2087    
2088     /**
2089 dl 1.119 * Initializes table, using the size recorded in sizeCtl.
2090     */
2091     private final Node[] initTable() {
2092     Node[] tab; int sc;
2093     while ((tab = table) == null) {
2094     if ((sc = sizeCtl) < 0)
2095     Thread.yield(); // lost initialization race; just spin
2096     else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
2097     try {
2098     if ((tab = table) == null) {
2099     int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
2100     tab = table = new Node[n];
2101     sc = n - (n >>> 2);
2102     }
2103     } finally {
2104     sizeCtl = sc;
2105     }
2106     break;
2107     }
2108     }
2109     return tab;
2110 dl 1.4 }
2111    
2112     /**
2113 dl 1.119 * If table is too small and not already resizing, creates next
2114     * table and transfers bins. Rechecks occupancy after a transfer
2115     * to see if another resize is already needed because resizings
2116     * are lagging additions.
2117 dl 1.51 */
2118 dl 1.119 private final void checkForResize() {
2119     Node[] tab; int n, sc;
2120     while ((tab = table) != null &&
2121     (n = tab.length) < MAXIMUM_CAPACITY &&
2122     (sc = sizeCtl) >= 0 && counter.sum() >= (long)sc &&
2123     UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
2124     try {
2125     if (tab == table) {
2126     table = rebuild(tab);
2127     sc = (n << 1) - (n >>> 1);
2128     }
2129     } finally {
2130     sizeCtl = sc;
2131     }
2132     }
2133 dl 1.4 }
2134    
2135     /**
2136 dl 1.119 * Tries to presize table to accommodate the given number of elements.
2137 tim 1.1 *
2138 dl 1.119 * @param size number of elements (doesn't need to be perfectly accurate)
2139 tim 1.1 */
2140 dl 1.119 private final void tryPresize(int size) {
2141     int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
2142     tableSizeFor(size + (size >>> 1) + 1);
2143     int sc;
2144     while ((sc = sizeCtl) >= 0) {
2145     Node[] tab = table; int n;
2146     if (tab == null || (n = tab.length) == 0) {
2147     n = (sc > c) ? sc : c;
2148     if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
2149     try {
2150     if (table == tab) {
2151     table = new Node[n];
2152     sc = n - (n >>> 2);
2153     }
2154     } finally {
2155     sizeCtl = sc;
2156     }
2157     }
2158     }
2159     else if (c <= sc || n >= MAXIMUM_CAPACITY)
2160     break;
2161     else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
2162     try {
2163     if (table == tab) {
2164     table = rebuild(tab);
2165     sc = (n << 1) - (n >>> 1);
2166     }
2167     } finally {
2168     sizeCtl = sc;
2169     }
2170     }
2171     }
2172 dl 1.4 }
2173    
2174 dl 1.119 /*
2175     * Moves and/or copies the nodes in each bin to new table. See
2176     * above for explanation.
2177 dl 1.4 *
2178 dl 1.119 * @return the new table
2179 dl 1.4 */
2180 dl 1.119 private static final Node[] rebuild(Node[] tab) {
2181     int n = tab.length;
2182     Node[] nextTab = new Node[n << 1];
2183     Node fwd = new Node(MOVED, nextTab, null, null);
2184     int[] buffer = null; // holds bins to revisit; null until needed
2185     Node rev = null; // reverse forwarder; null until needed
2186     int nbuffered = 0; // the number of bins in buffer list
2187     int bufferIndex = 0; // buffer index of current buffered bin
2188     int bin = n - 1; // current non-buffered bin or -1 if none
2189    
2190     for (int i = bin;;) { // start upwards sweep
2191     int fh; Node f;
2192     if ((f = tabAt(tab, i)) == null) {
2193 dl 1.128 if (bin >= 0) { // Unbuffered; no lock needed (or available)
2194 dl 1.119 if (!casTabAt(tab, i, f, fwd))
2195     continue;
2196     }
2197     else { // transiently use a locked forwarding node
2198     Node g = new Node(MOVED|LOCKED, nextTab, null, null);
2199     if (!casTabAt(tab, i, f, g))
2200     continue;
2201     setTabAt(nextTab, i, null);
2202     setTabAt(nextTab, i + n, null);
2203     setTabAt(tab, i, fwd);
2204     if (!g.casHash(MOVED|LOCKED, MOVED)) {
2205     g.hash = MOVED;
2206     synchronized (g) { g.notifyAll(); }
2207     }
2208     }
2209     }
2210     else if ((fh = f.hash) == MOVED) {
2211     Object fk = f.key;
2212     if (fk instanceof TreeBin) {
2213     TreeBin t = (TreeBin)fk;
2214     boolean validated = false;
2215     t.acquire(0);
2216     try {
2217     if (tabAt(tab, i) == f) {
2218     validated = true;
2219     splitTreeBin(nextTab, i, t);
2220     setTabAt(tab, i, fwd);
2221     }
2222     } finally {
2223     t.release(0);
2224     }
2225     if (!validated)
2226     continue;
2227     }
2228     }
2229     else if ((fh & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) {
2230     boolean validated = false;
2231     try { // split to lo and hi lists; copying as needed
2232     if (tabAt(tab, i) == f) {
2233     validated = true;
2234     splitBin(nextTab, i, f);
2235     setTabAt(tab, i, fwd);
2236     }
2237     } finally {
2238     if (!f.casHash(fh | LOCKED, fh)) {
2239     f.hash = fh;
2240     synchronized (f) { f.notifyAll(); };
2241     }
2242     }
2243     if (!validated)
2244     continue;
2245     }
2246     else {
2247     if (buffer == null) // initialize buffer for revisits
2248     buffer = new int[TRANSFER_BUFFER_SIZE];
2249     if (bin < 0 && bufferIndex > 0) {
2250     int j = buffer[--bufferIndex];
2251     buffer[bufferIndex] = i;
2252     i = j; // swap with another bin
2253     continue;
2254     }
2255     if (bin < 0 || nbuffered >= TRANSFER_BUFFER_SIZE) {
2256     f.tryAwaitLock(tab, i);
2257     continue; // no other options -- block
2258     }
2259     if (rev == null) // initialize reverse-forwarder
2260     rev = new Node(MOVED, tab, null, null);
2261     if (tabAt(tab, i) != f || (f.hash & LOCKED) == 0)
2262     continue; // recheck before adding to list
2263     buffer[nbuffered++] = i;
2264     setTabAt(nextTab, i, rev); // install place-holders
2265     setTabAt(nextTab, i + n, rev);
2266     }
2267    
2268     if (bin > 0)
2269     i = --bin;
2270     else if (buffer != null && nbuffered > 0) {
2271     bin = -1;
2272     i = buffer[bufferIndex = --nbuffered];
2273     }
2274     else
2275     return nextTab;
2276     }
2277 dl 1.4 }
2278 tim 1.1
2279 dl 1.4 /**
2280 dl 1.119 * Splits a normal bin with list headed by e into lo and hi parts;
2281     * installs in given table.
2282 dl 1.4 */
2283 dl 1.119 private static void splitBin(Node[] nextTab, int i, Node e) {
2284     int bit = nextTab.length >>> 1; // bit to split on
2285     int runBit = e.hash & bit;
2286     Node lastRun = e, lo = null, hi = null;
2287     for (Node p = e.next; p != null; p = p.next) {
2288     int b = p.hash & bit;
2289     if (b != runBit) {
2290     runBit = b;
2291     lastRun = p;
2292     }
2293     }
2294     if (runBit == 0)
2295     lo = lastRun;
2296     else
2297     hi = lastRun;
2298     for (Node p = e; p != lastRun; p = p.next) {
2299     int ph = p.hash & HASH_BITS;
2300     Object pk = p.key, pv = p.val;
2301     if ((ph & bit) == 0)
2302     lo = new Node(ph, pk, pv, lo);
2303     else
2304     hi = new Node(ph, pk, pv, hi);
2305     }
2306     setTabAt(nextTab, i, lo);
2307     setTabAt(nextTab, i + bit, hi);
2308     }
2309    
2310     /**
2311     * Splits a tree bin into lo and hi parts; installs in given table.
2312     */
2313     private static void splitTreeBin(Node[] nextTab, int i, TreeBin t) {
2314     int bit = nextTab.length >>> 1;
2315     TreeBin lt = new TreeBin();
2316     TreeBin ht = new TreeBin();
2317     int lc = 0, hc = 0;
2318     for (Node e = t.first; e != null; e = e.next) {
2319     int h = e.hash & HASH_BITS;
2320     Object k = e.key, v = e.val;
2321     if ((h & bit) == 0) {
2322     ++lc;
2323     lt.putTreeNode(h, k, v);
2324     }
2325     else {
2326     ++hc;
2327     ht.putTreeNode(h, k, v);
2328     }
2329     }
2330     Node ln, hn; // throw away trees if too small
2331     if (lc <= (TREE_THRESHOLD >>> 1)) {
2332     ln = null;
2333     for (Node p = lt.first; p != null; p = p.next)
2334     ln = new Node(p.hash, p.key, p.val, ln);
2335     }
2336     else
2337     ln = new Node(MOVED, lt, null, null);
2338     setTabAt(nextTab, i, ln);
2339     if (hc <= (TREE_THRESHOLD >>> 1)) {
2340     hn = null;
2341     for (Node p = ht.first; p != null; p = p.next)
2342     hn = new Node(p.hash, p.key, p.val, hn);
2343     }
2344     else
2345     hn = new Node(MOVED, ht, null, null);
2346     setTabAt(nextTab, i + bit, hn);
2347     }
2348    
2349     /**
2350     * Implementation for clear. Steps through each bin, removing all
2351     * nodes.
2352     */
2353     private final void internalClear() {
2354     long delta = 0L; // negative number of deletions
2355     int i = 0;
2356     Node[] tab = table;
2357     while (tab != null && i < tab.length) {
2358     int fh; Object fk;
2359     Node f = tabAt(tab, i);
2360     if (f == null)
2361     ++i;
2362     else if ((fh = f.hash) == MOVED) {
2363     if ((fk = f.key) instanceof TreeBin) {
2364     TreeBin t = (TreeBin)fk;
2365     t.acquire(0);
2366     try {
2367     if (tabAt(tab, i) == f) {
2368     for (Node p = t.first; p != null; p = p.next) {
2369 dl 1.128 if (p.val != null) { // (currently always true)
2370     p.val = null;
2371     --delta;
2372     }
2373 dl 1.119 }
2374     t.first = null;
2375     t.root = null;
2376     ++i;
2377     }
2378     } finally {
2379     t.release(0);
2380     }
2381     }
2382     else
2383     tab = (Node[])fk;
2384     }
2385     else if ((fh & LOCKED) != 0) {
2386     counter.add(delta); // opportunistically update count
2387     delta = 0L;
2388     f.tryAwaitLock(tab, i);
2389     }
2390     else if (f.casHash(fh, fh | LOCKED)) {
2391     try {
2392     if (tabAt(tab, i) == f) {
2393     for (Node e = f; e != null; e = e.next) {
2394 dl 1.128 if (e.val != null) { // (currently always true)
2395     e.val = null;
2396     --delta;
2397     }
2398 dl 1.119 }
2399     setTabAt(tab, i, null);
2400     ++i;
2401     }
2402     } finally {
2403     if (!f.casHash(fh | LOCKED, fh)) {
2404     f.hash = fh;
2405     synchronized (f) { f.notifyAll(); };
2406     }
2407     }
2408     }
2409     }
2410     if (delta != 0)
2411     counter.add(delta);
2412     }
2413    
2414     /* ----------------Table Traversal -------------- */
2415    
2416     /**
2417     * Encapsulates traversal for methods such as containsValue; also
2418 dl 1.126 * serves as a base class for other iterators and bulk tasks.
2419 dl 1.119 *
2420     * At each step, the iterator snapshots the key ("nextKey") and
2421     * value ("nextVal") of a valid node (i.e., one that, at point of
2422     * snapshot, has a non-null user value). Because val fields can
2423     * change (including to null, indicating deletion), field nextVal
2424     * might not be accurate at point of use, but still maintains the
2425     * weak consistency property of holding a value that was once
2426 dl 1.137 * valid. To support iterator.remove, the nextKey field is not
2427     * updated (nulled out) when the iterator cannot advance.
2428 dl 1.119 *
2429     * Internal traversals directly access these fields, as in:
2430     * {@code while (it.advance() != null) { process(it.nextKey); }}
2431     *
2432     * Exported iterators must track whether the iterator has advanced
2433     * (in hasNext vs next) (by setting/checking/nulling field
2434     * nextVal), and then extract key, value, or key-value pairs as
2435     * return values of next().
2436     *
2437     * The iterator visits once each still-valid node that was
2438     * reachable upon iterator construction. It might miss some that
2439     * were added to a bin after the bin was visited, which is OK wrt
2440     * consistency guarantees. Maintaining this property in the face
2441     * of possible ongoing resizes requires a fair amount of
2442     * bookkeeping state that is difficult to optimize away amidst
2443     * volatile accesses. Even so, traversal maintains reasonable
2444     * throughput.
2445     *
2446     * Normally, iteration proceeds bin-by-bin traversing lists.
2447     * However, if the table has been resized, then all future steps
2448     * must traverse both the bin at the current index as well as at
2449     * (index + baseSize); and so on for further resizings. To
2450     * paranoically cope with potential sharing by users of iterators
2451     * across threads, iteration terminates if a bounds checks fails
2452     * for a table read.
2453     *
2454     * This class extends ForkJoinTask to streamline parallel
2455     * iteration in bulk operations (see BulkTask). This adds only an
2456     * int of space overhead, which is close enough to negligible in
2457 dl 1.126 * cases where it is not needed to not worry about it. Because
2458     * ForkJoinTask is Serializable, but iterators need not be, we
2459     * need to add warning suppressions.
2460 dl 1.119 */
2461 dl 1.128 @SuppressWarnings("serial") static class Traverser<K,V,R> extends ForkJoinTask<R> {
2462 dl 1.119 final ConcurrentHashMap<K, V> map;
2463     Node next; // the next entry to use
2464     Object nextKey; // cached key field of next
2465     Object nextVal; // cached val field of next
2466     Node[] tab; // current table; updated if resized
2467     int index; // index of bin to use next
2468     int baseIndex; // current index of initial table
2469     int baseLimit; // index bound for initial table
2470 dl 1.130 int baseSize; // initial table size
2471 dl 1.119
2472     /** Creates iterator for all entries in the table. */
2473     Traverser(ConcurrentHashMap<K, V> map) {
2474 dl 1.130 this.map = map;
2475 dl 1.119 }
2476    
2477     /** Creates iterator for split() methods */
2478 dl 1.128 Traverser(Traverser<K,V,?> it) {
2479 dl 1.130 ConcurrentHashMap<K, V> m; Node[] t;
2480     if ((m = this.map = it.map) == null)
2481     t = null;
2482     else if ((t = it.tab) == null && // force parent tab initialization
2483     (t = it.tab = m.table) != null)
2484     it.baseLimit = it.baseSize = t.length;
2485     this.tab = t;
2486 dl 1.119 this.baseSize = it.baseSize;
2487 dl 1.128 it.baseLimit = this.index = this.baseIndex =
2488     ((this.baseLimit = it.baseLimit) + it.baseIndex + 1) >>> 1;
2489 dl 1.119 }
2490    
2491     /**
2492     * Advances next; returns nextVal or null if terminated.
2493     * See above for explanation.
2494     */
2495     final Object advance() {
2496 dl 1.137 Node e = next;
2497 dl 1.119 Object ev = null;
2498     outer: do {
2499     if (e != null) // advance past used/skipped node
2500     e = e.next;
2501     while (e == null) { // get to next non-null bin
2502 dl 1.130 ConcurrentHashMap<K, V> m;
2503 dl 1.119 Node[] t; int b, i, n; Object ek; // checks must use locals
2504 dl 1.130 if ((t = tab) != null)
2505     n = t.length;
2506     else if ((m = map) != null && (t = tab = m.table) != null)
2507     n = baseLimit = baseSize = t.length;
2508     else
2509 dl 1.119 break outer;
2510 dl 1.130 if ((b = baseIndex) >= baseLimit ||
2511     (i = index) < 0 || i >= n)
2512     break outer;
2513     if ((e = tabAt(t, i)) != null && e.hash == MOVED) {
2514 dl 1.119 if ((ek = e.key) instanceof TreeBin)
2515     e = ((TreeBin)ek).first;
2516     else {
2517     tab = (Node[])ek;
2518     continue; // restarts due to null val
2519     }
2520     } // visit upper slots if present
2521     index = (i += baseSize) < n ? i : (baseIndex = b + 1);
2522     }
2523     nextKey = e.key;
2524     } while ((ev = e.val) == null); // skip deleted or special nodes
2525     next = e;
2526     return nextVal = ev;
2527     }
2528    
2529     public final void remove() {
2530 dl 1.137 Object k = nextKey;
2531     if (k == null && (advance() == null || (k = nextKey) == null))
2532 dl 1.119 throw new IllegalStateException();
2533 dl 1.137 map.internalReplace(k, null, null);
2534 dl 1.119 }
2535    
2536     public final boolean hasNext() {
2537     return nextVal != null || advance() != null;
2538     }
2539    
2540     public final boolean hasMoreElements() { return hasNext(); }
2541     public final void setRawResult(Object x) { }
2542     public R getRawResult() { return null; }
2543     public boolean exec() { return true; }
2544     }
2545    
2546     /* ---------------- Public operations -------------- */
2547    
2548     /**
2549     * Creates a new, empty map with the default initial table size (16).
2550     */
2551     public ConcurrentHashMap() {
2552     this.counter = new LongAdder();
2553     }
2554    
2555     /**
2556     * Creates a new, empty map with an initial table size
2557     * accommodating the specified number of elements without the need
2558     * to dynamically resize.
2559     *
2560     * @param initialCapacity The implementation performs internal
2561     * sizing to accommodate this many elements.
2562     * @throws IllegalArgumentException if the initial capacity of
2563     * elements is negative
2564     */
2565     public ConcurrentHashMap(int initialCapacity) {
2566     if (initialCapacity < 0)
2567     throw new IllegalArgumentException();
2568     int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
2569     MAXIMUM_CAPACITY :
2570     tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
2571     this.counter = new LongAdder();
2572     this.sizeCtl = cap;
2573     }
2574    
2575     /**
2576     * Creates a new map with the same mappings as the given map.
2577     *
2578     * @param m the map
2579     */
2580     public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
2581     this.counter = new LongAdder();
2582     this.sizeCtl = DEFAULT_CAPACITY;
2583     internalPutAll(m);
2584     }
2585    
2586     /**
2587     * Creates a new, empty map with an initial table size based on
2588     * the given number of elements ({@code initialCapacity}) and
2589     * initial table density ({@code loadFactor}).
2590     *
2591     * @param initialCapacity the initial capacity. The implementation
2592     * performs internal sizing to accommodate this many elements,
2593     * given the specified load factor.
2594     * @param loadFactor the load factor (table density) for
2595     * establishing the initial table size
2596     * @throws IllegalArgumentException if the initial capacity of
2597     * elements is negative or the load factor is nonpositive
2598     *
2599     * @since 1.6
2600     */
2601     public ConcurrentHashMap(int initialCapacity, float loadFactor) {
2602     this(initialCapacity, loadFactor, 1);
2603     }
2604    
2605     /**
2606     * Creates a new, empty map with an initial table size based on
2607     * the given number of elements ({@code initialCapacity}), table
2608     * density ({@code loadFactor}), and number of concurrently
2609     * updating threads ({@code concurrencyLevel}).
2610     *
2611     * @param initialCapacity the initial capacity. The implementation
2612     * performs internal sizing to accommodate this many elements,
2613     * given the specified load factor.
2614     * @param loadFactor the load factor (table density) for
2615     * establishing the initial table size
2616     * @param concurrencyLevel the estimated number of concurrently
2617     * updating threads. The implementation may use this value as
2618     * a sizing hint.
2619     * @throws IllegalArgumentException if the initial capacity is
2620     * negative or the load factor or concurrencyLevel are
2621     * nonpositive
2622     */
2623     public ConcurrentHashMap(int initialCapacity,
2624     float loadFactor, int concurrencyLevel) {
2625     if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
2626     throw new IllegalArgumentException();
2627     if (initialCapacity < concurrencyLevel) // Use at least as many bins
2628     initialCapacity = concurrencyLevel; // as estimated threads
2629     long size = (long)(1.0 + (long)initialCapacity / loadFactor);
2630     int cap = (size >= (long)MAXIMUM_CAPACITY) ?
2631     MAXIMUM_CAPACITY : tableSizeFor((int)size);
2632     this.counter = new LongAdder();
2633     this.sizeCtl = cap;
2634     }
2635    
2636     /**
2637 dl 1.137 * Creates a new {@link Set} backed by a ConcurrentHashMap
2638     * from the given type to {@code Boolean.TRUE}.
2639     *
2640     * @return the new set
2641     */
2642     public static <K> KeySetView<K,Boolean> newKeySet() {
2643     return new KeySetView<K,Boolean>(new ConcurrentHashMap<K,Boolean>(),
2644     Boolean.TRUE);
2645     }
2646    
2647     /**
2648     * Creates a new {@link Set} backed by a ConcurrentHashMap
2649     * from the given type to {@code Boolean.TRUE}.
2650     *
2651     * @param initialCapacity The implementation performs internal
2652     * sizing to accommodate this many elements.
2653     * @throws IllegalArgumentException if the initial capacity of
2654     * elements is negative
2655     * @return the new set
2656     */
2657     public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2658     return new KeySetView<K,Boolean>(new ConcurrentHashMap<K,Boolean>(initialCapacity),
2659     Boolean.TRUE);
2660     }
2661    
2662     /**
2663 dl 1.119 * {@inheritDoc}
2664     */
2665     public boolean isEmpty() {
2666     return counter.sum() <= 0L; // ignore transient negative values
2667     }
2668    
2669     /**
2670     * {@inheritDoc}
2671     */
2672     public int size() {
2673     long n = counter.sum();
2674     return ((n < 0L) ? 0 :
2675     (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
2676     (int)n);
2677     }
2678    
2679     /**
2680     * Returns the number of mappings. This method should be used
2681     * instead of {@link #size} because a ConcurrentHashMap may
2682     * contain more mappings than can be represented as an int. The
2683     * value returned is a snapshot; the actual count may differ if
2684 dl 1.127 * there are ongoing concurrent insertions or removals.
2685 dl 1.119 *
2686     * @return the number of mappings
2687     */
2688     public long mappingCount() {
2689     long n = counter.sum();
2690 dl 1.126 return (n < 0L) ? 0L : n; // ignore transient negative values
2691 dl 1.119 }
2692    
2693     /**
2694     * Returns the value to which the specified key is mapped,
2695     * or {@code null} if this map contains no mapping for the key.
2696     *
2697     * <p>More formally, if this map contains a mapping from a key
2698     * {@code k} to a value {@code v} such that {@code key.equals(k)},
2699     * then this method returns {@code v}; otherwise it returns
2700     * {@code null}. (There can be at most one such mapping.)
2701     *
2702     * @throws NullPointerException if the specified key is null
2703     */
2704 dl 1.128 @SuppressWarnings("unchecked") public V get(Object key) {
2705 dl 1.119 if (key == null)
2706     throw new NullPointerException();
2707     return (V)internalGet(key);
2708     }
2709    
2710     /**
2711 dl 1.129 * Returns the value to which the specified key is mapped,
2712 jsr166 1.133 * or the given defaultValue if this map contains no mapping for the key.
2713 dl 1.129 *
2714     * @param key the key
2715     * @param defaultValue the value to return if this map contains
2716 jsr166 1.134 * no mapping for the given key
2717 dl 1.129 * @return the mapping for the key, if present; else the defaultValue
2718     * @throws NullPointerException if the specified key is null
2719     */
2720     @SuppressWarnings("unchecked") public V getValueOrDefault(Object key, V defaultValue) {
2721     if (key == null)
2722     throw new NullPointerException();
2723     V v = (V) internalGet(key);
2724     return v == null ? defaultValue : v;
2725     }
2726    
2727     /**
2728 dl 1.119 * Tests if the specified object is a key in this table.
2729     *
2730     * @param key possible key
2731     * @return {@code true} if and only if the specified object
2732     * is a key in this table, as determined by the
2733     * {@code equals} method; {@code false} otherwise
2734     * @throws NullPointerException if the specified key is null
2735     */
2736     public boolean containsKey(Object key) {
2737     if (key == null)
2738     throw new NullPointerException();
2739     return internalGet(key) != null;
2740     }
2741    
2742     /**
2743     * Returns {@code true} if this map maps one or more keys to the
2744     * specified value. Note: This method may require a full traversal
2745     * of the map, and is much slower than method {@code containsKey}.
2746     *
2747     * @param value value whose presence in this map is to be tested
2748     * @return {@code true} if this map maps one or more keys to the
2749     * specified value
2750     * @throws NullPointerException if the specified value is null
2751     */
2752     public boolean containsValue(Object value) {
2753     if (value == null)
2754     throw new NullPointerException();
2755     Object v;
2756     Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
2757     while ((v = it.advance()) != null) {
2758     if (v == value || value.equals(v))
2759     return true;
2760     }
2761     return false;
2762     }
2763    
2764     /**
2765     * Legacy method testing if some key maps into the specified value
2766     * in this table. This method is identical in functionality to
2767     * {@link #containsValue}, and exists solely to ensure
2768     * full compatibility with class {@link java.util.Hashtable},
2769     * which supported this method prior to introduction of the
2770     * Java Collections framework.
2771     *
2772     * @param value a value to search for
2773     * @return {@code true} if and only if some key maps to the
2774     * {@code value} argument in this table as
2775     * determined by the {@code equals} method;
2776     * {@code false} otherwise
2777     * @throws NullPointerException if the specified value is null
2778     */
2779     public boolean contains(Object value) {
2780     return containsValue(value);
2781     }
2782    
2783     /**
2784     * Maps the specified key to the specified value in this table.
2785     * Neither the key nor the value can be null.
2786     *
2787     * <p> The value can be retrieved by calling the {@code get} method
2788     * with a key that is equal to the original key.
2789     *
2790     * @param key key with which the specified value is to be associated
2791     * @param value value to be associated with the specified key
2792     * @return the previous value associated with {@code key}, or
2793     * {@code null} if there was no mapping for {@code key}
2794     * @throws NullPointerException if the specified key or value is null
2795     */
2796 dl 1.128 @SuppressWarnings("unchecked") public V put(K key, V value) {
2797 dl 1.119 if (key == null || value == null)
2798     throw new NullPointerException();
2799     return (V)internalPut(key, value);
2800     }
2801    
2802     /**
2803     * {@inheritDoc}
2804     *
2805     * @return the previous value associated with the specified key,
2806     * or {@code null} if there was no mapping for the key
2807     * @throws NullPointerException if the specified key or value is null
2808     */
2809 dl 1.128 @SuppressWarnings("unchecked") public V putIfAbsent(K key, V value) {
2810 dl 1.119 if (key == null || value == null)
2811     throw new NullPointerException();
2812     return (V)internalPutIfAbsent(key, value);
2813     }
2814    
2815     /**
2816     * Copies all of the mappings from the specified map to this one.
2817     * These mappings replace any mappings that this map had for any of the
2818     * keys currently in the specified map.
2819     *
2820     * @param m mappings to be stored in this map
2821     */
2822     public void putAll(Map<? extends K, ? extends V> m) {
2823     internalPutAll(m);
2824     }
2825    
2826     /**
2827     * If the specified key is not already associated with a value,
2828     * computes its value using the given mappingFunction and enters
2829     * it into the map unless null. This is equivalent to
2830     * <pre> {@code
2831     * if (map.containsKey(key))
2832     * return map.get(key);
2833     * value = mappingFunction.apply(key);
2834     * if (value != null)
2835     * map.put(key, value);
2836     * return value;}</pre>
2837     *
2838     * except that the action is performed atomically. If the
2839     * function returns {@code null} no mapping is recorded. If the
2840     * function itself throws an (unchecked) exception, the exception
2841     * is rethrown to its caller, and no mapping is recorded. Some
2842     * attempted update operations on this map by other threads may be
2843     * blocked while computation is in progress, so the computation
2844     * should be short and simple, and must not attempt to update any
2845     * other mappings of this Map. The most appropriate usage is to
2846     * construct a new object serving as an initial mapped value, or
2847     * memoized result, as in:
2848     *
2849     * <pre> {@code
2850     * map.computeIfAbsent(key, new Fun<K, V>() {
2851     * public V map(K k) { return new Value(f(k)); }});}</pre>
2852     *
2853     * @param key key with which the specified value is to be associated
2854     * @param mappingFunction the function to compute a value
2855     * @return the current (existing or computed) value associated with
2856 jsr166 1.134 * the specified key, or null if the computed value is null
2857 dl 1.119 * @throws NullPointerException if the specified key or mappingFunction
2858     * is null
2859     * @throws IllegalStateException if the computation detectably
2860     * attempts a recursive update to this map that would
2861     * otherwise never complete
2862     * @throws RuntimeException or Error if the mappingFunction does so,
2863     * in which case the mapping is left unestablished
2864     */
2865 dl 1.128 @SuppressWarnings("unchecked") public V computeIfAbsent
2866     (K key, Fun<? super K, ? extends V> mappingFunction) {
2867 dl 1.119 if (key == null || mappingFunction == null)
2868     throw new NullPointerException();
2869     return (V)internalComputeIfAbsent(key, mappingFunction);
2870     }
2871    
2872     /**
2873     * If the given key is present, computes a new mapping value given a key and
2874     * its current mapped value. This is equivalent to
2875     * <pre> {@code
2876     * if (map.containsKey(key)) {
2877     * value = remappingFunction.apply(key, map.get(key));
2878     * if (value != null)
2879     * map.put(key, value);
2880     * else
2881     * map.remove(key);
2882     * }
2883     * }</pre>
2884     *
2885     * except that the action is performed atomically. If the
2886     * function returns {@code null}, the mapping is removed. If the
2887     * function itself throws an (unchecked) exception, the exception
2888     * is rethrown to its caller, and the current mapping is left
2889     * unchanged. Some attempted update operations on this map by
2890     * other threads may be blocked while computation is in progress,
2891     * so the computation should be short and simple, and must not
2892     * attempt to update any other mappings of this Map. For example,
2893     * to either create or append new messages to a value mapping:
2894     *
2895     * @param key key with which the specified value is to be associated
2896     * @param remappingFunction the function to compute a value
2897 jsr166 1.123 * @return the new value associated with the specified key, or null if none
2898 dl 1.119 * @throws NullPointerException if the specified key or remappingFunction
2899     * is null
2900     * @throws IllegalStateException if the computation detectably
2901     * attempts a recursive update to this map that would
2902     * otherwise never complete
2903     * @throws RuntimeException or Error if the remappingFunction does so,
2904     * in which case the mapping is unchanged
2905     */
2906 dl 1.128 @SuppressWarnings("unchecked") public V computeIfPresent
2907     (K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
2908 dl 1.119 if (key == null || remappingFunction == null)
2909     throw new NullPointerException();
2910     return (V)internalCompute(key, true, remappingFunction);
2911     }
2912    
2913     /**
2914     * Computes a new mapping value given a key and
2915     * its current mapped value (or {@code null} if there is no current
2916     * mapping). This is equivalent to
2917     * <pre> {@code
2918     * value = remappingFunction.apply(key, map.get(key));
2919     * if (value != null)
2920     * map.put(key, value);
2921     * else
2922     * map.remove(key);
2923     * }</pre>
2924     *
2925     * except that the action is performed atomically. If the
2926     * function returns {@code null}, the mapping is removed. If the
2927     * function itself throws an (unchecked) exception, the exception
2928     * is rethrown to its caller, and the current mapping is left
2929     * unchanged. Some attempted update operations on this map by
2930     * other threads may be blocked while computation is in progress,
2931     * so the computation should be short and simple, and must not
2932     * attempt to update any other mappings of this Map. For example,
2933     * to either create or append new messages to a value mapping:
2934     *
2935     * <pre> {@code
2936     * Map<Key, String> map = ...;
2937     * final String msg = ...;
2938     * map.compute(key, new BiFun<Key, String, String>() {
2939     * public String apply(Key k, String v) {
2940     * return (v == null) ? msg : v + msg;});}}</pre>
2941     *
2942     * @param key key with which the specified value is to be associated
2943     * @param remappingFunction the function to compute a value
2944 jsr166 1.123 * @return the new value associated with the specified key, or null if none
2945 dl 1.119 * @throws NullPointerException if the specified key or remappingFunction
2946     * is null
2947     * @throws IllegalStateException if the computation detectably
2948     * attempts a recursive update to this map that would
2949     * otherwise never complete
2950     * @throws RuntimeException or Error if the remappingFunction does so,
2951     * in which case the mapping is unchanged
2952     */
2953 dl 1.128 @SuppressWarnings("unchecked") public V compute
2954     (K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
2955 dl 1.119 if (key == null || remappingFunction == null)
2956     throw new NullPointerException();
2957     return (V)internalCompute(key, false, remappingFunction);
2958     }
2959    
2960     /**
2961     * If the specified key is not already associated
2962     * with a value, associate it with the given value.
2963     * Otherwise, replace the value with the results of
2964     * the given remapping function. This is equivalent to:
2965     * <pre> {@code
2966     * if (!map.containsKey(key))
2967     * map.put(value);
2968     * else {
2969     * newValue = remappingFunction.apply(map.get(key), value);
2970     * if (value != null)
2971     * map.put(key, value);
2972     * else
2973     * map.remove(key);
2974     * }
2975     * }</pre>
2976     * except that the action is performed atomically. If the
2977     * function returns {@code null}, the mapping is removed. If the
2978     * function itself throws an (unchecked) exception, the exception
2979     * is rethrown to its caller, and the current mapping is left
2980     * unchanged. Some attempted update operations on this map by
2981     * other threads may be blocked while computation is in progress,
2982     * so the computation should be short and simple, and must not
2983     * attempt to update any other mappings of this Map.
2984     */
2985 dl 1.128 @SuppressWarnings("unchecked") public V merge
2986     (K key, V value, BiFun<? super V, ? super V, ? extends V> remappingFunction) {
2987 dl 1.119 if (key == null || value == null || remappingFunction == null)
2988     throw new NullPointerException();
2989     return (V)internalMerge(key, value, remappingFunction);
2990     }
2991    
2992     /**
2993     * Removes the key (and its corresponding value) from this map.
2994     * This method does nothing if the key is not in the map.
2995     *
2996     * @param key the key that needs to be removed
2997     * @return the previous value associated with {@code key}, or
2998     * {@code null} if there was no mapping for {@code key}
2999     * @throws NullPointerException if the specified key is null
3000     */
3001 dl 1.128 @SuppressWarnings("unchecked") public V remove(Object key) {
3002 dl 1.119 if (key == null)
3003     throw new NullPointerException();
3004     return (V)internalReplace(key, null, null);
3005     }
3006    
3007     /**
3008     * {@inheritDoc}
3009     *
3010     * @throws NullPointerException if the specified key is null
3011     */
3012     public boolean remove(Object key, Object value) {
3013     if (key == null)
3014     throw new NullPointerException();
3015     if (value == null)
3016     return false;
3017     return internalReplace(key, null, value) != null;
3018 tim 1.1 }
3019 dl 1.31
3020     /**
3021 jsr166 1.68 * {@inheritDoc}
3022     *
3023     * @throws NullPointerException if any of the arguments are null
3024 dl 1.31 */
3025     public boolean replace(K key, V oldValue, V newValue) {
3026 dl 1.119 if (key == null || oldValue == null || newValue == null)
3027 dl 1.31 throw new NullPointerException();
3028 dl 1.119 return internalReplace(key, newValue, oldValue) != null;
3029 dl 1.32 }
3030    
3031     /**
3032 jsr166 1.68 * {@inheritDoc}
3033     *
3034     * @return the previous value associated with the specified key,
3035 dl 1.119 * or {@code null} if there was no mapping for the key
3036 jsr166 1.68 * @throws NullPointerException if the specified key or value is null
3037 dl 1.32 */
3038 dl 1.128 @SuppressWarnings("unchecked") public V replace(K key, V value) {
3039 dl 1.119 if (key == null || value == null)
3040 dl 1.32 throw new NullPointerException();
3041 dl 1.119 return (V)internalReplace(key, value, null);
3042 dl 1.31 }
3043    
3044 tim 1.1 /**
3045 jsr166 1.68 * Removes all of the mappings from this map.
3046 tim 1.1 */
3047     public void clear() {
3048 dl 1.119 internalClear();
3049 tim 1.1 }
3050    
3051     /**
3052 jsr166 1.68 * Returns a {@link Set} view of the keys contained in this map.
3053     * The set is backed by the map, so changes to the map are
3054 dl 1.137 * reflected in the set, and vice-versa.
3055     *
3056     * @return the set view
3057     */
3058     public KeySetView<K,V> keySet() {
3059     KeySetView<K,V> ks = keySet;
3060     return (ks != null) ? ks : (keySet = new KeySetView<K,V>(this, null));
3061     }
3062    
3063     /**
3064     * Returns a {@link Set} view of the keys in this map, using the
3065     * given common mapped value for any additions (i.e., {@link
3066     * Collection#add} and {@link Collection#addAll}). This is of
3067     * course only appropriate if it is acceptable to use the same
3068     * value for all additions from this view.
3069 jsr166 1.68 *
3070 dl 1.137 * @param mappedValue the mapped value to use for any
3071     * additions.
3072     * @return the set view
3073     * @throws NullPointerException if the mappedValue is null
3074 tim 1.1 */
3075 dl 1.137 public KeySetView<K,V> keySet(V mappedValue) {
3076     if (mappedValue == null)
3077     throw new NullPointerException();
3078     return new KeySetView<K,V>(this, mappedValue);
3079 tim 1.1 }
3080    
3081     /**
3082 jsr166 1.68 * Returns a {@link Collection} view of the values contained in this map.
3083     * The collection is backed by the map, so changes to the map are
3084     * reflected in the collection, and vice-versa. The collection
3085     * supports element removal, which removes the corresponding
3086 dl 1.119 * mapping from this map, via the {@code Iterator.remove},
3087     * {@code Collection.remove}, {@code removeAll},
3088     * {@code retainAll}, and {@code clear} operations. It does not
3089     * support the {@code add} or {@code addAll} operations.
3090 jsr166 1.68 *
3091 dl 1.119 * <p>The view's {@code iterator} is a "weakly consistent" iterator
3092 jsr166 1.68 * that will never throw {@link ConcurrentModificationException},
3093 dl 1.14 * and guarantees to traverse elements as they existed upon
3094     * construction of the iterator, and may (but is not guaranteed to)
3095     * reflect any modifications subsequent to construction.
3096 tim 1.1 */
3097 dl 1.119 public Collection<V> values() {
3098     Values<K,V> vs = values;
3099     return (vs != null) ? vs : (values = new Values<K,V>(this));
3100     }
3101    
3102     /**
3103     * Returns a {@link Set} view of the mappings contained in this map.
3104     * The set is backed by the map, so changes to the map are
3105     * reflected in the set, and vice-versa. The set supports element
3106     * removal, which removes the corresponding mapping from the map,
3107     * via the {@code Iterator.remove}, {@code Set.remove},
3108     * {@code removeAll}, {@code retainAll}, and {@code clear}
3109     * operations. It does not support the {@code add} or
3110     * {@code addAll} operations.
3111     *
3112     * <p>The view's {@code iterator} is a "weakly consistent" iterator
3113     * that will never throw {@link ConcurrentModificationException},
3114     * and guarantees to traverse elements as they existed upon
3115     * construction of the iterator, and may (but is not guaranteed to)
3116     * reflect any modifications subsequent to construction.
3117     */
3118     public Set<Map.Entry<K,V>> entrySet() {
3119     EntrySet<K,V> es = entrySet;
3120     return (es != null) ? es : (entrySet = new EntrySet<K,V>(this));
3121     }
3122    
3123     /**
3124     * Returns an enumeration of the keys in this table.
3125     *
3126     * @return an enumeration of the keys in this table
3127     * @see #keySet()
3128     */
3129     public Enumeration<K> keys() {
3130     return new KeyIterator<K,V>(this);
3131     }
3132    
3133     /**
3134     * Returns an enumeration of the values in this table.
3135     *
3136     * @return an enumeration of the values in this table
3137     * @see #values()
3138     */
3139     public Enumeration<V> elements() {
3140     return new ValueIterator<K,V>(this);
3141     }
3142    
3143     /**
3144 jsr166 1.122 * Returns a partitionable iterator of the keys in this map.
3145 dl 1.119 *
3146 jsr166 1.122 * @return a partitionable iterator of the keys in this map
3147 dl 1.119 */
3148     public Spliterator<K> keySpliterator() {
3149     return new KeyIterator<K,V>(this);
3150     }
3151    
3152     /**
3153 jsr166 1.122 * Returns a partitionable iterator of the values in this map.
3154 dl 1.119 *
3155 jsr166 1.122 * @return a partitionable iterator of the values in this map
3156 dl 1.119 */
3157     public Spliterator<V> valueSpliterator() {
3158     return new ValueIterator<K,V>(this);
3159     }
3160    
3161     /**
3162 jsr166 1.122 * Returns a partitionable iterator of the entries in this map.
3163 dl 1.119 *
3164 jsr166 1.122 * @return a partitionable iterator of the entries in this map
3165 dl 1.119 */
3166     public Spliterator<Map.Entry<K,V>> entrySpliterator() {
3167     return new EntryIterator<K,V>(this);
3168     }
3169    
3170     /**
3171     * Returns the hash code value for this {@link Map}, i.e.,
3172     * the sum of, for each key-value pair in the map,
3173     * {@code key.hashCode() ^ value.hashCode()}.
3174     *
3175     * @return the hash code value for this map
3176     */
3177     public int hashCode() {
3178     int h = 0;
3179     Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3180     Object v;
3181     while ((v = it.advance()) != null) {
3182     h += it.nextKey.hashCode() ^ v.hashCode();
3183     }
3184     return h;
3185     }
3186    
3187     /**
3188     * Returns a string representation of this map. The string
3189     * representation consists of a list of key-value mappings (in no
3190     * particular order) enclosed in braces ("{@code {}}"). Adjacent
3191     * mappings are separated by the characters {@code ", "} (comma
3192     * and space). Each key-value mapping is rendered as the key
3193     * followed by an equals sign ("{@code =}") followed by the
3194     * associated value.
3195     *
3196     * @return a string representation of this map
3197     */
3198     public String toString() {
3199     Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3200     StringBuilder sb = new StringBuilder();
3201     sb.append('{');
3202     Object v;
3203     if ((v = it.advance()) != null) {
3204     for (;;) {
3205     Object k = it.nextKey;
3206     sb.append(k == this ? "(this Map)" : k);
3207     sb.append('=');
3208     sb.append(v == this ? "(this Map)" : v);
3209     if ((v = it.advance()) == null)
3210     break;
3211     sb.append(',').append(' ');
3212     }
3213     }
3214     return sb.append('}').toString();
3215     }
3216    
3217     /**
3218     * Compares the specified object with this map for equality.
3219     * Returns {@code true} if the given object is a map with the same
3220     * mappings as this map. This operation may return misleading
3221     * results if either map is concurrently modified during execution
3222     * of this method.
3223     *
3224     * @param o object to be compared for equality with this map
3225     * @return {@code true} if the specified object is equal to this map
3226     */
3227     public boolean equals(Object o) {
3228     if (o != this) {
3229     if (!(o instanceof Map))
3230     return false;
3231     Map<?,?> m = (Map<?,?>) o;
3232     Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3233     Object val;
3234     while ((val = it.advance()) != null) {
3235     Object v = m.get(it.nextKey);
3236     if (v == null || (v != val && !v.equals(val)))
3237     return false;
3238     }
3239     for (Map.Entry<?,?> e : m.entrySet()) {
3240     Object mk, mv, v;
3241     if ((mk = e.getKey()) == null ||
3242     (mv = e.getValue()) == null ||
3243     (v = internalGet(mk)) == null ||
3244     (mv != v && !mv.equals(v)))
3245     return false;
3246     }
3247     }
3248     return true;
3249     }
3250    
3251     /* ----------------Iterators -------------- */
3252    
3253 dl 1.128 @SuppressWarnings("serial") static final class KeyIterator<K,V> extends Traverser<K,V,Object>
3254 dl 1.119 implements Spliterator<K>, Enumeration<K> {
3255     KeyIterator(ConcurrentHashMap<K, V> map) { super(map); }
3256 dl 1.128 KeyIterator(Traverser<K,V,Object> it) {
3257     super(it);
3258 dl 1.119 }
3259     public KeyIterator<K,V> split() {
3260 dl 1.137 if (nextKey != null)
3261 dl 1.119 throw new IllegalStateException();
3262 dl 1.128 return new KeyIterator<K,V>(this);
3263 dl 1.119 }
3264 dl 1.128 @SuppressWarnings("unchecked") public final K next() {
3265 dl 1.119 if (nextVal == null && advance() == null)
3266     throw new NoSuchElementException();
3267     Object k = nextKey;
3268     nextVal = null;
3269     return (K) k;
3270     }
3271    
3272     public final K nextElement() { return next(); }
3273     }
3274    
3275 dl 1.128 @SuppressWarnings("serial") static final class ValueIterator<K,V> extends Traverser<K,V,Object>
3276 dl 1.119 implements Spliterator<V>, Enumeration<V> {
3277     ValueIterator(ConcurrentHashMap<K, V> map) { super(map); }
3278 dl 1.128 ValueIterator(Traverser<K,V,Object> it) {
3279     super(it);
3280 dl 1.119 }
3281     public ValueIterator<K,V> split() {
3282 dl 1.137 if (nextKey != null)
3283 dl 1.119 throw new IllegalStateException();
3284 dl 1.128 return new ValueIterator<K,V>(this);
3285 dl 1.119 }
3286    
3287 dl 1.128 @SuppressWarnings("unchecked") public final V next() {
3288 dl 1.119 Object v;
3289     if ((v = nextVal) == null && (v = advance()) == null)
3290     throw new NoSuchElementException();
3291     nextVal = null;
3292     return (V) v;
3293     }
3294    
3295     public final V nextElement() { return next(); }
3296     }
3297    
3298 dl 1.128 @SuppressWarnings("serial") static final class EntryIterator<K,V> extends Traverser<K,V,Object>
3299 dl 1.119 implements Spliterator<Map.Entry<K,V>> {
3300     EntryIterator(ConcurrentHashMap<K, V> map) { super(map); }
3301 dl 1.128 EntryIterator(Traverser<K,V,Object> it) {
3302     super(it);
3303 dl 1.119 }
3304     public EntryIterator<K,V> split() {
3305 dl 1.137 if (nextKey != null)
3306 dl 1.119 throw new IllegalStateException();
3307 dl 1.128 return new EntryIterator<K,V>(this);
3308 dl 1.119 }
3309    
3310 dl 1.128 @SuppressWarnings("unchecked") public final Map.Entry<K,V> next() {
3311 dl 1.119 Object v;
3312     if ((v = nextVal) == null && (v = advance()) == null)
3313     throw new NoSuchElementException();
3314     Object k = nextKey;
3315     nextVal = null;
3316     return new MapEntry<K,V>((K)k, (V)v, map);
3317     }
3318     }
3319    
3320     /**
3321     * Exported Entry for iterators
3322     */
3323     static final class MapEntry<K,V> implements Map.Entry<K, V> {
3324     final K key; // non-null
3325     V val; // non-null
3326     final ConcurrentHashMap<K, V> map;
3327     MapEntry(K key, V val, ConcurrentHashMap<K, V> map) {
3328     this.key = key;
3329     this.val = val;
3330     this.map = map;
3331     }
3332     public final K getKey() { return key; }
3333     public final V getValue() { return val; }
3334     public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
3335     public final String toString(){ return key + "=" + val; }
3336    
3337     public final boolean equals(Object o) {
3338     Object k, v; Map.Entry<?,?> e;
3339     return ((o instanceof Map.Entry) &&
3340     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3341     (v = e.getValue()) != null &&
3342     (k == key || k.equals(key)) &&
3343     (v == val || v.equals(val)));
3344     }
3345    
3346     /**
3347     * Sets our entry's value and writes through to the map. The
3348     * value to return is somewhat arbitrary here. Since we do not
3349     * necessarily track asynchronous changes, the most recent
3350     * "previous" value could be different from what we return (or
3351     * could even have been removed in which case the put will
3352     * re-establish). We do not and cannot guarantee more.
3353     */
3354     public final V setValue(V value) {
3355     if (value == null) throw new NullPointerException();
3356     V v = val;
3357     val = value;
3358     map.put(key, value);
3359     return v;
3360     }
3361     }
3362    
3363     /* ----------------Views -------------- */
3364    
3365     /**
3366     * Base class for views.
3367     */
3368     static abstract class CHMView<K, V> {
3369     final ConcurrentHashMap<K, V> map;
3370     CHMView(ConcurrentHashMap<K, V> map) { this.map = map; }
3371     public final int size() { return map.size(); }
3372     public final boolean isEmpty() { return map.isEmpty(); }
3373     public final void clear() { map.clear(); }
3374    
3375     // implementations below rely on concrete classes supplying these
3376     abstract public Iterator<?> iterator();
3377     abstract public boolean contains(Object o);
3378     abstract public boolean remove(Object o);
3379    
3380     private static final String oomeMsg = "Required array size too large";
3381    
3382     public final Object[] toArray() {
3383     long sz = map.mappingCount();
3384     if (sz > (long)(MAX_ARRAY_SIZE))
3385     throw new OutOfMemoryError(oomeMsg);
3386     int n = (int)sz;
3387     Object[] r = new Object[n];
3388     int i = 0;
3389     Iterator<?> it = iterator();
3390     while (it.hasNext()) {
3391     if (i == n) {
3392     if (n >= MAX_ARRAY_SIZE)
3393     throw new OutOfMemoryError(oomeMsg);
3394     if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
3395     n = MAX_ARRAY_SIZE;
3396     else
3397     n += (n >>> 1) + 1;
3398     r = Arrays.copyOf(r, n);
3399     }
3400     r[i++] = it.next();
3401     }
3402     return (i == n) ? r : Arrays.copyOf(r, i);
3403     }
3404    
3405 dl 1.128 @SuppressWarnings("unchecked") public final <T> T[] toArray(T[] a) {
3406 dl 1.119 long sz = map.mappingCount();
3407     if (sz > (long)(MAX_ARRAY_SIZE))
3408     throw new OutOfMemoryError(oomeMsg);
3409     int m = (int)sz;
3410     T[] r = (a.length >= m) ? a :
3411     (T[])java.lang.reflect.Array
3412     .newInstance(a.getClass().getComponentType(), m);
3413     int n = r.length;
3414     int i = 0;
3415     Iterator<?> it = iterator();
3416     while (it.hasNext()) {
3417     if (i == n) {
3418     if (n >= MAX_ARRAY_SIZE)
3419     throw new OutOfMemoryError(oomeMsg);
3420     if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
3421     n = MAX_ARRAY_SIZE;
3422     else
3423     n += (n >>> 1) + 1;
3424     r = Arrays.copyOf(r, n);
3425     }
3426     r[i++] = (T)it.next();
3427     }
3428     if (a == r && i < n) {
3429     r[i] = null; // null-terminate
3430     return r;
3431     }
3432     return (i == n) ? r : Arrays.copyOf(r, i);
3433     }
3434    
3435     public final int hashCode() {
3436     int h = 0;
3437     for (Iterator<?> it = iterator(); it.hasNext();)
3438     h += it.next().hashCode();
3439     return h;
3440     }
3441    
3442     public final String toString() {
3443     StringBuilder sb = new StringBuilder();
3444     sb.append('[');
3445     Iterator<?> it = iterator();
3446     if (it.hasNext()) {
3447     for (;;) {
3448     Object e = it.next();
3449     sb.append(e == this ? "(this Collection)" : e);
3450     if (!it.hasNext())
3451     break;
3452     sb.append(',').append(' ');
3453     }
3454     }
3455     return sb.append(']').toString();
3456     }
3457    
3458     public final boolean containsAll(Collection<?> c) {
3459     if (c != this) {
3460     for (Iterator<?> it = c.iterator(); it.hasNext();) {
3461     Object e = it.next();
3462     if (e == null || !contains(e))
3463     return false;
3464     }
3465     }
3466     return true;
3467     }
3468    
3469     public final boolean removeAll(Collection<?> c) {
3470     boolean modified = false;
3471     for (Iterator<?> it = iterator(); it.hasNext();) {
3472     if (c.contains(it.next())) {
3473     it.remove();
3474     modified = true;
3475     }
3476     }
3477     return modified;
3478     }
3479    
3480     public final boolean retainAll(Collection<?> c) {
3481     boolean modified = false;
3482     for (Iterator<?> it = iterator(); it.hasNext();) {
3483     if (!c.contains(it.next())) {
3484     it.remove();
3485     modified = true;
3486     }
3487     }
3488     return modified;
3489     }
3490    
3491     }
3492    
3493     static final class Values<K,V> extends CHMView<K,V>
3494     implements Collection<V> {
3495     Values(ConcurrentHashMap<K, V> map) { super(map); }
3496     public final boolean contains(Object o) { return map.containsValue(o); }
3497     public final boolean remove(Object o) {
3498     if (o != null) {
3499     Iterator<V> it = new ValueIterator<K,V>(map);
3500     while (it.hasNext()) {
3501     if (o.equals(it.next())) {
3502     it.remove();
3503     return true;
3504     }
3505     }
3506     }
3507     return false;
3508     }
3509     public final Iterator<V> iterator() {
3510     return new ValueIterator<K,V>(map);
3511     }
3512     public final boolean add(V e) {
3513     throw new UnsupportedOperationException();
3514     }
3515     public final boolean addAll(Collection<? extends V> c) {
3516     throw new UnsupportedOperationException();
3517     }
3518    
3519     }
3520    
3521     static final class EntrySet<K,V> extends CHMView<K,V>
3522     implements Set<Map.Entry<K,V>> {
3523     EntrySet(ConcurrentHashMap<K, V> map) { super(map); }
3524     public final boolean contains(Object o) {
3525     Object k, v, r; Map.Entry<?,?> e;
3526     return ((o instanceof Map.Entry) &&
3527     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3528     (r = map.get(k)) != null &&
3529     (v = e.getValue()) != null &&
3530     (v == r || v.equals(r)));
3531     }
3532     public final boolean remove(Object o) {
3533     Object k, v; Map.Entry<?,?> e;
3534     return ((o instanceof Map.Entry) &&
3535     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3536     (v = e.getValue()) != null &&
3537     map.remove(k, v));
3538     }
3539     public final Iterator<Map.Entry<K,V>> iterator() {
3540     return new EntryIterator<K,V>(map);
3541     }
3542     public final boolean add(Entry<K,V> e) {
3543     throw new UnsupportedOperationException();
3544     }
3545     public final boolean addAll(Collection<? extends Entry<K,V>> c) {
3546     throw new UnsupportedOperationException();
3547     }
3548     public boolean equals(Object o) {
3549     Set<?> c;
3550     return ((o instanceof Set) &&
3551     ((c = (Set<?>)o) == this ||
3552     (containsAll(c) && c.containsAll(this))));
3553     }
3554     }
3555    
3556     /* ---------------- Serialization Support -------------- */
3557    
3558     /**
3559     * Stripped-down version of helper class used in previous version,
3560     * declared for the sake of serialization compatibility
3561     */
3562     static class Segment<K,V> implements Serializable {
3563     private static final long serialVersionUID = 2249069246763182397L;
3564     final float loadFactor;
3565     Segment(float lf) { this.loadFactor = lf; }
3566     }
3567    
3568     /**
3569     * Saves the state of the {@code ConcurrentHashMap} instance to a
3570     * stream (i.e., serializes it).
3571     * @param s the stream
3572     * @serialData
3573     * the key (Object) and value (Object)
3574     * for each key-value mapping, followed by a null pair.
3575     * The key-value mappings are emitted in no particular order.
3576     */
3577 dl 1.128 @SuppressWarnings("unchecked") private void writeObject(java.io.ObjectOutputStream s)
3578 dl 1.119 throws java.io.IOException {
3579     if (segments == null) { // for serialization compatibility
3580     segments = (Segment<K,V>[])
3581     new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
3582     for (int i = 0; i < segments.length; ++i)
3583     segments[i] = new Segment<K,V>(LOAD_FACTOR);
3584     }
3585     s.defaultWriteObject();
3586     Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3587     Object v;
3588     while ((v = it.advance()) != null) {
3589     s.writeObject(it.nextKey);
3590     s.writeObject(v);
3591     }
3592     s.writeObject(null);
3593     s.writeObject(null);
3594     segments = null; // throw away
3595     }
3596    
3597     /**
3598     * Reconstitutes the instance from a stream (that is, deserializes it).
3599     * @param s the stream
3600     */
3601 dl 1.128 @SuppressWarnings("unchecked") private void readObject(java.io.ObjectInputStream s)
3602 dl 1.119 throws java.io.IOException, ClassNotFoundException {
3603     s.defaultReadObject();
3604     this.segments = null; // unneeded
3605     // initialize transient final field
3606     UNSAFE.putObjectVolatile(this, counterOffset, new LongAdder());
3607    
3608     // Create all nodes, then place in table once size is known
3609     long size = 0L;
3610     Node p = null;
3611     for (;;) {
3612     K k = (K) s.readObject();
3613     V v = (V) s.readObject();
3614     if (k != null && v != null) {
3615     int h = spread(k.hashCode());
3616     p = new Node(h, k, v, p);
3617     ++size;
3618     }
3619     else
3620     break;
3621     }
3622     if (p != null) {
3623     boolean init = false;
3624     int n;
3625     if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
3626     n = MAXIMUM_CAPACITY;
3627     else {
3628     int sz = (int)size;
3629     n = tableSizeFor(sz + (sz >>> 1) + 1);
3630     }
3631     int sc = sizeCtl;
3632     boolean collide = false;
3633     if (n > sc &&
3634     UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
3635     try {
3636     if (table == null) {
3637     init = true;
3638     Node[] tab = new Node[n];
3639     int mask = n - 1;
3640     while (p != null) {
3641     int j = p.hash & mask;
3642     Node next = p.next;
3643     Node q = p.next = tabAt(tab, j);
3644     setTabAt(tab, j, p);
3645     if (!collide && q != null && q.hash == p.hash)
3646     collide = true;
3647     p = next;
3648     }
3649     table = tab;
3650     counter.add(size);
3651     sc = n - (n >>> 2);
3652     }
3653     } finally {
3654     sizeCtl = sc;
3655     }
3656     if (collide) { // rescan and convert to TreeBins
3657     Node[] tab = table;
3658     for (int i = 0; i < tab.length; ++i) {
3659     int c = 0;
3660     for (Node e = tabAt(tab, i); e != null; e = e.next) {
3661     if (++c > TREE_THRESHOLD &&
3662     (e.key instanceof Comparable)) {
3663     replaceWithTreeBin(tab, i, e.key);
3664     break;
3665     }
3666     }
3667     }
3668     }
3669     }
3670     if (!init) { // Can only happen if unsafely published.
3671     while (p != null) {
3672     internalPut(p.key, p.val);
3673     p = p.next;
3674     }
3675     }
3676     }
3677     }
3678    
3679    
3680     // -------------------------------------------------------
3681    
3682     // Sams
3683     /** Interface describing a void action of one argument */
3684     public interface Action<A> { void apply(A a); }
3685     /** Interface describing a void action of two arguments */
3686     public interface BiAction<A,B> { void apply(A a, B b); }
3687     /** Interface describing a function of one argument */
3688     public interface Fun<A,T> { T apply(A a); }
3689     /** Interface describing a function of two arguments */
3690     public interface BiFun<A,B,T> { T apply(A a, B b); }
3691     /** Interface describing a function of no arguments */
3692     public interface Generator<T> { T apply(); }
3693     /** Interface describing a function mapping its argument to a double */
3694     public interface ObjectToDouble<A> { double apply(A a); }
3695     /** Interface describing a function mapping its argument to a long */
3696     public interface ObjectToLong<A> { long apply(A a); }
3697     /** Interface describing a function mapping its argument to an int */
3698     public interface ObjectToInt<A> {int apply(A a); }
3699     /** Interface describing a function mapping two arguments to a double */
3700     public interface ObjectByObjectToDouble<A,B> { double apply(A a, B b); }
3701     /** Interface describing a function mapping two arguments to a long */
3702     public interface ObjectByObjectToLong<A,B> { long apply(A a, B b); }
3703     /** Interface describing a function mapping two arguments to an int */
3704     public interface ObjectByObjectToInt<A,B> {int apply(A a, B b); }
3705     /** Interface describing a function mapping a double to a double */
3706     public interface DoubleToDouble { double apply(double a); }
3707     /** Interface describing a function mapping a long to a long */
3708     public interface LongToLong { long apply(long a); }
3709     /** Interface describing a function mapping an int to an int */
3710     public interface IntToInt { int apply(int a); }
3711     /** Interface describing a function mapping two doubles to a double */
3712     public interface DoubleByDoubleToDouble { double apply(double a, double b); }
3713     /** Interface describing a function mapping two longs to a long */
3714     public interface LongByLongToLong { long apply(long a, long b); }
3715     /** Interface describing a function mapping two ints to an int */
3716     public interface IntByIntToInt { int apply(int a, int b); }
3717    
3718    
3719     // -------------------------------------------------------
3720    
3721     /**
3722 dl 1.137 * Performs the given action for each (key, value).
3723 dl 1.119 *
3724 dl 1.137 * @param action the action
3725 dl 1.119 */
3726 dl 1.137 public void forEach(BiAction<K,V> action) {
3727     ForkJoinTasks.forEach
3728     (this, action).invoke();
3729 dl 1.119 }
3730    
3731     /**
3732 dl 1.137 * Performs the given action for each non-null transformation
3733     * of each (key, value).
3734     *
3735     * @param transformer a function returning the transformation
3736     * for an element, or null of there is no transformation (in
3737     * which case the action is not applied).
3738     * @param action the action
3739 dl 1.119 */
3740 dl 1.137 public <U> void forEach(BiFun<? super K, ? super V, ? extends U> transformer,
3741     Action<U> action) {
3742     ForkJoinTasks.forEach
3743     (this, transformer, action).invoke();
3744     }
3745    
3746     /**
3747     * Returns a non-null result from applying the given search
3748     * function on each (key, value), or null if none. Upon
3749     * success, further element processing is suppressed and the
3750     * results of any other parallel invocations of the search
3751     * function are ignored.
3752     *
3753     * @param searchFunction a function returning a non-null
3754     * result on success, else null
3755     * @return a non-null result from applying the given search
3756     * function on each (key, value), or null if none
3757     */
3758     public <U> U search(BiFun<? super K, ? super V, ? extends U> searchFunction) {
3759     return ForkJoinTasks.search
3760     (this, searchFunction).invoke();
3761     }
3762    
3763     /**
3764     * Returns the result of accumulating the given transformation
3765     * of all (key, value) pairs using the given reducer to
3766     * combine values, or null if none.
3767     *
3768     * @param transformer a function returning the transformation
3769     * for an element, or null of there is no transformation (in
3770     * which case it is not combined).
3771     * @param reducer a commutative associative combining function
3772     * @return the result of accumulating the given transformation
3773     * of all (key, value) pairs
3774     */
3775     public <U> U reduce(BiFun<? super K, ? super V, ? extends U> transformer,
3776     BiFun<? super U, ? super U, ? extends U> reducer) {
3777     return ForkJoinTasks.reduce
3778     (this, transformer, reducer).invoke();
3779     }
3780    
3781     /**
3782     * Returns the result of accumulating the given transformation
3783     * of all (key, value) pairs using the given reducer to
3784     * combine values, and the given basis as an identity value.
3785     *
3786     * @param transformer a function returning the transformation
3787     * for an element
3788     * @param basis the identity (initial default value) for the reduction
3789     * @param reducer a commutative associative combining function
3790     * @return the result of accumulating the given transformation
3791     * of all (key, value) pairs
3792     */
3793     public double reduceToDouble(ObjectByObjectToDouble<? super K, ? super V> transformer,
3794     double basis,
3795     DoubleByDoubleToDouble reducer) {
3796     return ForkJoinTasks.reduceToDouble
3797     (this, transformer, basis, reducer).invoke();
3798     }
3799 dl 1.119
3800 dl 1.137 /**
3801     * Returns the result of accumulating the given transformation
3802     * of all (key, value) pairs using the given reducer to
3803     * combine values, and the given basis as an identity value.
3804     *
3805     * @param transformer a function returning the transformation
3806     * for an element
3807     * @param basis the identity (initial default value) for the reduction
3808     * @param reducer a commutative associative combining function
3809     * @return the result of accumulating the given transformation
3810     * of all (key, value) pairs
3811     */
3812     public long reduceToLong(ObjectByObjectToLong<? super K, ? super V> transformer,
3813     long basis,
3814     LongByLongToLong reducer) {
3815     return ForkJoinTasks.reduceToLong
3816     (this, transformer, basis, reducer).invoke();
3817     }
3818    
3819     /**
3820     * Returns the result of accumulating the given transformation
3821     * of all (key, value) pairs using the given reducer to
3822     * combine values, and the given basis as an identity value.
3823     *
3824     * @param transformer a function returning the transformation
3825     * for an element
3826     * @param basis the identity (initial default value) for the reduction
3827     * @param reducer a commutative associative combining function
3828     * @return the result of accumulating the given transformation
3829     * of all (key, value) pairs
3830     */
3831     public int reduceToInt(ObjectByObjectToInt<? super K, ? super V> transformer,
3832     int basis,
3833     IntByIntToInt reducer) {
3834     return ForkJoinTasks.reduceToInt
3835     (this, transformer, basis, reducer).invoke();
3836     }
3837    
3838     /**
3839     * Performs the given action for each key.
3840     *
3841     * @param action the action
3842     */
3843     public void forEachKey(Action<K> action) {
3844     ForkJoinTasks.forEachKey
3845     (this, action).invoke();
3846     }
3847 dl 1.119
3848 dl 1.137 /**
3849     * Performs the given action for each non-null transformation
3850     * of each key.
3851     *
3852     * @param transformer a function returning the transformation
3853     * for an element, or null of there is no transformation (in
3854     * which case the action is not applied).
3855     * @param action the action
3856     */
3857     public <U> void forEachKey(Fun<? super K, ? extends U> transformer,
3858     Action<U> action) {
3859     ForkJoinTasks.forEachKey
3860     (this, transformer, action).invoke();
3861     }
3862 dl 1.119
3863 dl 1.137 /**
3864     * Returns a non-null result from applying the given search
3865     * function on each key, or null if none. Upon success,
3866     * further element processing is suppressed and the results of
3867     * any other parallel invocations of the search function are
3868     * ignored.
3869     *
3870     * @param searchFunction a function returning a non-null
3871     * result on success, else null
3872     * @return a non-null result from applying the given search
3873     * function on each key, or null if none
3874     */
3875     public <U> U searchKeys(Fun<? super K, ? extends U> searchFunction) {
3876     return ForkJoinTasks.searchKeys
3877     (this, searchFunction).invoke();
3878     }
3879 dl 1.119
3880 dl 1.137 /**
3881     * Returns the result of accumulating all keys using the given
3882     * reducer to combine values, or null if none.
3883     *
3884     * @param reducer a commutative associative combining function
3885     * @return the result of accumulating all keys using the given
3886     * reducer to combine values, or null if none
3887     */
3888     public K reduceKeys(BiFun<? super K, ? super K, ? extends K> reducer) {
3889     return ForkJoinTasks.reduceKeys
3890     (this, reducer).invoke();
3891     }
3892 dl 1.119
3893 dl 1.137 /**
3894     * Returns the result of accumulating the given transformation
3895     * of all keys using the given reducer to combine values, or
3896     * null if none.
3897     *
3898     * @param transformer a function returning the transformation
3899     * for an element, or null of there is no transformation (in
3900     * which case it is not combined).
3901     * @param reducer a commutative associative combining function
3902     * @return the result of accumulating the given transformation
3903     * of all keys
3904     */
3905     public <U> U reduceKeys(Fun<? super K, ? extends U> transformer,
3906 dl 1.119 BiFun<? super U, ? super U, ? extends U> reducer) {
3907 dl 1.137 return ForkJoinTasks.reduceKeys
3908     (this, transformer, reducer).invoke();
3909     }
3910 dl 1.119
3911 dl 1.137 /**
3912     * Returns the result of accumulating the given transformation
3913     * of all keys using the given reducer to combine values, and
3914     * the given basis as an identity value.
3915     *
3916     * @param transformer a function returning the transformation
3917     * for an element
3918     * @param basis the identity (initial default value) for the reduction
3919     * @param reducer a commutative associative combining function
3920     * @return the result of accumulating the given transformation
3921     * of all keys
3922     */
3923     public double reduceKeysToDouble(ObjectToDouble<? super K> transformer,
3924 dl 1.119 double basis,
3925     DoubleByDoubleToDouble reducer) {
3926 dl 1.137 return ForkJoinTasks.reduceKeysToDouble
3927     (this, transformer, basis, reducer).invoke();
3928     }
3929 dl 1.119
3930 dl 1.137 /**
3931     * Returns the result of accumulating the given transformation
3932     * of all keys using the given reducer to combine values, and
3933     * the given basis as an identity value.
3934     *
3935     * @param transformer a function returning the transformation
3936     * for an element
3937     * @param basis the identity (initial default value) for the reduction
3938     * @param reducer a commutative associative combining function
3939     * @return the result of accumulating the given transformation
3940     * of all keys
3941     */
3942     public long reduceKeysToLong(ObjectToLong<? super K> transformer,
3943 dl 1.119 long basis,
3944     LongByLongToLong reducer) {
3945 dl 1.137 return ForkJoinTasks.reduceKeysToLong
3946     (this, transformer, basis, reducer).invoke();
3947     }
3948 dl 1.119
3949 dl 1.137 /**
3950     * Returns the result of accumulating the given transformation
3951     * of all keys using the given reducer to combine values, and
3952     * the given basis as an identity value.
3953     *
3954     * @param transformer a function returning the transformation
3955     * for an element
3956     * @param basis the identity (initial default value) for the reduction
3957     * @param reducer a commutative associative combining function
3958     * @return the result of accumulating the given transformation
3959     * of all keys
3960     */
3961     public int reduceKeysToInt(ObjectToInt<? super K> transformer,
3962 dl 1.119 int basis,
3963     IntByIntToInt reducer) {
3964 dl 1.137 return ForkJoinTasks.reduceKeysToInt
3965     (this, transformer, basis, reducer).invoke();
3966     }
3967 dl 1.119
3968 dl 1.137 /**
3969     * Performs the given action for each value.
3970     *
3971     * @param action the action
3972     */
3973     public void forEachValue(Action<V> action) {
3974     ForkJoinTasks.forEachValue
3975     (this, action).invoke();
3976     }
3977 dl 1.119
3978 dl 1.137 /**
3979     * Performs the given action for each non-null transformation
3980     * of each value.
3981     *
3982     * @param transformer a function returning the transformation
3983     * for an element, or null of there is no transformation (in
3984     * which case the action is not applied).
3985     */
3986     public <U> void forEachValue(Fun<? super V, ? extends U> transformer,
3987     Action<U> action) {
3988     ForkJoinTasks.forEachValue
3989     (this, transformer, action).invoke();
3990     }
3991 dl 1.119
3992 dl 1.137 /**
3993     * Returns a non-null result from applying the given search
3994     * function on each value, or null if none. Upon success,
3995     * further element processing is suppressed and the results of
3996     * any other parallel invocations of the search function are
3997     * ignored.
3998     *
3999     * @param searchFunction a function returning a non-null
4000     * result on success, else null
4001     * @return a non-null result from applying the given search
4002     * function on each value, or null if none
4003     *
4004     */
4005     public <U> U searchValues(Fun<? super V, ? extends U> searchFunction) {
4006     return ForkJoinTasks.searchValues
4007     (this, searchFunction).invoke();
4008     }
4009 dl 1.119
4010 dl 1.137 /**
4011     * Returns the result of accumulating all values using the
4012     * given reducer to combine values, or null if none.
4013     *
4014     * @param reducer a commutative associative combining function
4015     * @return the result of accumulating all values
4016     */
4017     public V reduceValues(BiFun<? super V, ? super V, ? extends V> reducer) {
4018     return ForkJoinTasks.reduceValues
4019     (this, reducer).invoke();
4020     }
4021 dl 1.119
4022 dl 1.137 /**
4023     * Returns the result of accumulating the given transformation
4024     * of all values using the given reducer to combine values, or
4025     * null if none.
4026     *
4027     * @param transformer a function returning the transformation
4028     * for an element, or null of there is no transformation (in
4029     * which case it is not combined).
4030     * @param reducer a commutative associative combining function
4031     * @return the result of accumulating the given transformation
4032     * of all values
4033     */
4034     public <U> U reduceValues(Fun<? super V, ? extends U> transformer,
4035     BiFun<? super U, ? super U, ? extends U> reducer) {
4036     return ForkJoinTasks.reduceValues
4037     (this, transformer, reducer).invoke();
4038     }
4039 dl 1.119
4040 dl 1.137 /**
4041     * Returns the result of accumulating the given transformation
4042     * of all values using the given reducer to combine values,
4043     * and the given basis as an identity value.
4044     *
4045     * @param transformer a function returning the transformation
4046     * for an element
4047     * @param basis the identity (initial default value) for the reduction
4048     * @param reducer a commutative associative combining function
4049     * @return the result of accumulating the given transformation
4050     * of all values
4051     */
4052     public double reduceValuesToDouble(ObjectToDouble<? super V> transformer,
4053     double basis,
4054     DoubleByDoubleToDouble reducer) {
4055     return ForkJoinTasks.reduceValuesToDouble
4056     (this, transformer, basis, reducer).invoke();
4057     }
4058 dl 1.119
4059 dl 1.137 /**
4060     * Returns the result of accumulating the given transformation
4061     * of all values using the given reducer to combine values,
4062     * and the given basis as an identity value.
4063     *
4064     * @param transformer a function returning the transformation
4065     * for an element
4066     * @param basis the identity (initial default value) for the reduction
4067     * @param reducer a commutative associative combining function
4068     * @return the result of accumulating the given transformation
4069     * of all values
4070     */
4071     public long reduceValuesToLong(ObjectToLong<? super V> transformer,
4072     long basis,
4073     LongByLongToLong reducer) {
4074     return ForkJoinTasks.reduceValuesToLong
4075     (this, transformer, basis, reducer).invoke();
4076     }
4077 dl 1.119
4078 dl 1.137 /**
4079     * Returns the result of accumulating the given transformation
4080     * of all values using the given reducer to combine values,
4081     * and the given basis as an identity value.
4082     *
4083     * @param transformer a function returning the transformation
4084     * for an element
4085     * @param basis the identity (initial default value) for the reduction
4086     * @param reducer a commutative associative combining function
4087     * @return the result of accumulating the given transformation
4088     * of all values
4089     */
4090     public int reduceValuesToInt(ObjectToInt<? super V> transformer,
4091     int basis,
4092     IntByIntToInt reducer) {
4093     return ForkJoinTasks.reduceValuesToInt
4094     (this, transformer, basis, reducer).invoke();
4095     }
4096 dl 1.119
4097 dl 1.137 /**
4098     * Performs the given action for each entry.
4099     *
4100     * @param action the action
4101     */
4102     public void forEachEntry(Action<Map.Entry<K,V>> action) {
4103     ForkJoinTasks.forEachEntry
4104     (this, action).invoke();
4105     }
4106 dl 1.119
4107 dl 1.137 /**
4108     * Performs the given action for each non-null transformation
4109     * of each entry.
4110     *
4111     * @param transformer a function returning the transformation
4112     * for an element, or null of there is no transformation (in
4113     * which case the action is not applied).
4114     * @param action the action
4115     */
4116     public <U> void forEachEntry(Fun<Map.Entry<K,V>, ? extends U> transformer,
4117     Action<U> action) {
4118     ForkJoinTasks.forEachEntry
4119     (this, transformer, action).invoke();
4120     }
4121 dl 1.119
4122 dl 1.137 /**
4123     * Returns a non-null result from applying the given search
4124     * function on each entry, or null if none. Upon success,
4125     * further element processing is suppressed and the results of
4126     * any other parallel invocations of the search function are
4127     * ignored.
4128     *
4129     * @param searchFunction a function returning a non-null
4130     * result on success, else null
4131     * @return a non-null result from applying the given search
4132     * function on each entry, or null if none
4133     */
4134     public <U> U searchEntries(Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
4135     return ForkJoinTasks.searchEntries
4136     (this, searchFunction).invoke();
4137     }
4138 dl 1.119
4139 dl 1.137 /**
4140     * Returns the result of accumulating all entries using the
4141     * given reducer to combine values, or null if none.
4142     *
4143     * @param reducer a commutative associative combining function
4144     * @return the result of accumulating all entries
4145     */
4146     public Map.Entry<K,V> reduceEntries(BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4147     return ForkJoinTasks.reduceEntries
4148     (this, reducer).invoke();
4149     }
4150 dl 1.119
4151 dl 1.137 /**
4152     * Returns the result of accumulating the given transformation
4153     * of all entries using the given reducer to combine values,
4154     * or null if none.
4155     *
4156     * @param transformer a function returning the transformation
4157     * for an element, or null of there is no transformation (in
4158     * which case it is not combined).
4159     * @param reducer a commutative associative combining function
4160     * @return the result of accumulating the given transformation
4161     * of all entries
4162     */
4163     public <U> U reduceEntries(Fun<Map.Entry<K,V>, ? extends U> transformer,
4164     BiFun<? super U, ? super U, ? extends U> reducer) {
4165     return ForkJoinTasks.reduceEntries
4166     (this, transformer, reducer).invoke();
4167     }
4168 dl 1.119
4169 dl 1.137 /**
4170     * Returns the result of accumulating the given transformation
4171     * of all entries using the given reducer to combine values,
4172     * and the given basis as an identity value.
4173     *
4174     * @param transformer a function returning the transformation
4175     * for an element
4176     * @param basis the identity (initial default value) for the reduction
4177     * @param reducer a commutative associative combining function
4178     * @return the result of accumulating the given transformation
4179     * of all entries
4180     */
4181     public double reduceEntriesToDouble(ObjectToDouble<Map.Entry<K,V>> transformer,
4182     double basis,
4183     DoubleByDoubleToDouble reducer) {
4184     return ForkJoinTasks.reduceEntriesToDouble
4185     (this, transformer, basis, reducer).invoke();
4186     }
4187 dl 1.119
4188 dl 1.137 /**
4189     * Returns the result of accumulating the given transformation
4190     * of all entries using the given reducer to combine values,
4191     * and the given basis as an identity value.
4192     *
4193     * @param transformer a function returning the transformation
4194     * for an element
4195     * @param basis the identity (initial default value) for the reduction
4196     * @param reducer a commutative associative combining function
4197     * @return the result of accumulating the given transformation
4198     * of all entries
4199     */
4200     public long reduceEntriesToLong(ObjectToLong<Map.Entry<K,V>> transformer,
4201     long basis,
4202     LongByLongToLong reducer) {
4203     return ForkJoinTasks.reduceEntriesToLong
4204     (this, transformer, basis, reducer).invoke();
4205     }
4206 dl 1.119
4207 dl 1.137 /**
4208     * Returns the result of accumulating the given transformation
4209     * of all entries using the given reducer to combine values,
4210     * and the given basis as an identity value.
4211     *
4212     * @param transformer a function returning the transformation
4213     * for an element
4214     * @param basis the identity (initial default value) for the reduction
4215     * @param reducer a commutative associative combining function
4216     * @return the result of accumulating the given transformation
4217     * of all entries
4218     */
4219     public int reduceEntriesToInt(ObjectToInt<Map.Entry<K,V>> transformer,
4220     int basis,
4221     IntByIntToInt reducer) {
4222     return ForkJoinTasks.reduceEntriesToInt
4223     (this, transformer, basis, reducer).invoke();
4224 dl 1.119 }
4225    
4226     // ---------------------------------------------------------------------
4227    
4228     /**
4229     * Predefined tasks for performing bulk parallel operations on
4230     * ConcurrentHashMaps. These tasks follow the forms and rules used
4231 dl 1.137 * for bulk operations. Each method has the same name, but returns
4232     * a task rather than invoking it. These methods may be useful in
4233     * custom applications such as submitting a task without waiting
4234     * for completion, using a custom pool, or combining with other
4235     * tasks.
4236 dl 1.119 */
4237     public static class ForkJoinTasks {
4238     private ForkJoinTasks() {}
4239    
4240     /**
4241     * Returns a task that when invoked, performs the given
4242     * action for each (key, value)
4243     *
4244     * @param map the map
4245     * @param action the action
4246     * @return the task
4247     */
4248 jsr166 1.120 public static <K,V> ForkJoinTask<Void> forEach
4249 dl 1.119 (ConcurrentHashMap<K,V> map,
4250     BiAction<K,V> action) {
4251     if (action == null) throw new NullPointerException();
4252 dl 1.137 return new ForEachMappingTask<K,V>(map, null, -1, null, action);
4253 dl 1.119 }
4254    
4255     /**
4256     * Returns a task that when invoked, performs the given
4257     * action for each non-null transformation of each (key, value)
4258     *
4259     * @param map the map
4260     * @param transformer a function returning the transformation
4261 jsr166 1.135 * for an element, or null if there is no transformation (in
4262 jsr166 1.134 * which case the action is not applied)
4263 dl 1.119 * @param action the action
4264     * @return the task
4265     */
4266 jsr166 1.120 public static <K,V,U> ForkJoinTask<Void> forEach
4267 dl 1.119 (ConcurrentHashMap<K,V> map,
4268     BiFun<? super K, ? super V, ? extends U> transformer,
4269     Action<U> action) {
4270     if (transformer == null || action == null)
4271     throw new NullPointerException();
4272     return new ForEachTransformedMappingTask<K,V,U>
4273 dl 1.137 (map, null, -1, null, transformer, action);
4274 dl 1.119 }
4275    
4276     /**
4277 dl 1.126 * Returns a task that when invoked, returns a non-null result
4278     * from applying the given search function on each (key,
4279     * value), or null if none. Upon success, further element
4280     * processing is suppressed and the results of any other
4281     * parallel invocations of the search function are ignored.
4282 dl 1.119 *
4283     * @param map the map
4284     * @param searchFunction a function returning a non-null
4285     * result on success, else null
4286     * @return the task
4287     */
4288     public static <K,V,U> ForkJoinTask<U> search
4289     (ConcurrentHashMap<K,V> map,
4290     BiFun<? super K, ? super V, ? extends U> searchFunction) {
4291     if (searchFunction == null) throw new NullPointerException();
4292     return new SearchMappingsTask<K,V,U>
4293 dl 1.137 (map, null, -1, null, searchFunction,
4294 dl 1.119 new AtomicReference<U>());
4295     }
4296    
4297     /**
4298     * Returns a task that when invoked, returns the result of
4299     * accumulating the given transformation of all (key, value) pairs
4300     * using the given reducer to combine values, or null if none.
4301     *
4302     * @param map the map
4303     * @param transformer a function returning the transformation
4304 jsr166 1.135 * for an element, or null if there is no transformation (in
4305 dl 1.119 * which case it is not combined).
4306     * @param reducer a commutative associative combining function
4307     * @return the task
4308     */
4309     public static <K,V,U> ForkJoinTask<U> reduce
4310     (ConcurrentHashMap<K,V> map,
4311     BiFun<? super K, ? super V, ? extends U> transformer,
4312     BiFun<? super U, ? super U, ? extends U> reducer) {
4313     if (transformer == null || reducer == null)
4314     throw new NullPointerException();
4315     return new MapReduceMappingsTask<K,V,U>
4316 dl 1.130 (map, null, -1, null, transformer, reducer);
4317 dl 1.119 }
4318    
4319     /**
4320     * Returns a task that when invoked, returns the result of
4321     * accumulating the given transformation of all (key, value) pairs
4322     * using the given reducer to combine values, and the given
4323     * basis as an identity value.
4324     *
4325     * @param map the map
4326     * @param transformer a function returning the transformation
4327     * for an element
4328     * @param basis the identity (initial default value) for the reduction
4329     * @param reducer a commutative associative combining function
4330     * @return the task
4331     */
4332     public static <K,V> ForkJoinTask<Double> reduceToDouble
4333     (ConcurrentHashMap<K,V> map,
4334     ObjectByObjectToDouble<? super K, ? super V> transformer,
4335     double basis,
4336     DoubleByDoubleToDouble reducer) {
4337     if (transformer == null || reducer == null)
4338     throw new NullPointerException();
4339     return new MapReduceMappingsToDoubleTask<K,V>
4340 dl 1.130 (map, null, -1, null, transformer, basis, reducer);
4341 dl 1.119 }
4342    
4343     /**
4344     * Returns a task that when invoked, returns the result of
4345     * accumulating the given transformation of all (key, value) pairs
4346     * using the given reducer to combine values, and the given
4347     * basis as an identity value.
4348     *
4349     * @param map the map
4350     * @param transformer a function returning the transformation
4351     * for an element
4352     * @param basis the identity (initial default value) for the reduction
4353     * @param reducer a commutative associative combining function
4354     * @return the task
4355     */
4356     public static <K,V> ForkJoinTask<Long> reduceToLong
4357     (ConcurrentHashMap<K,V> map,
4358     ObjectByObjectToLong<? super K, ? super V> transformer,
4359     long basis,
4360     LongByLongToLong reducer) {
4361     if (transformer == null || reducer == null)
4362     throw new NullPointerException();
4363     return new MapReduceMappingsToLongTask<K,V>
4364 dl 1.130 (map, null, -1, null, transformer, basis, reducer);
4365 dl 1.119 }
4366    
4367     /**
4368     * Returns a task that when invoked, returns the result of
4369     * accumulating the given transformation of all (key, value) pairs
4370     * using the given reducer to combine values, and the given
4371     * basis as an identity value.
4372     *
4373     * @param transformer a function returning the transformation
4374     * for an element
4375     * @param basis the identity (initial default value) for the reduction
4376     * @param reducer a commutative associative combining function
4377     * @return the task
4378     */
4379     public static <K,V> ForkJoinTask<Integer> reduceToInt
4380     (ConcurrentHashMap<K,V> map,
4381     ObjectByObjectToInt<? super K, ? super V> transformer,
4382     int basis,
4383     IntByIntToInt reducer) {
4384     if (transformer == null || reducer == null)
4385     throw new NullPointerException();
4386     return new MapReduceMappingsToIntTask<K,V>
4387 dl 1.130 (map, null, -1, null, transformer, basis, reducer);
4388 dl 1.119 }
4389    
4390     /**
4391     * Returns a task that when invoked, performs the given action
4392 jsr166 1.123 * for each key.
4393 dl 1.119 *
4394     * @param map the map
4395     * @param action the action
4396     * @return the task
4397     */
4398 jsr166 1.120 public static <K,V> ForkJoinTask<Void> forEachKey
4399 dl 1.119 (ConcurrentHashMap<K,V> map,
4400     Action<K> action) {
4401     if (action == null) throw new NullPointerException();
4402 dl 1.137 return new ForEachKeyTask<K,V>(map, null, -1, null, action);
4403 dl 1.119 }
4404    
4405     /**
4406     * Returns a task that when invoked, performs the given action
4407 jsr166 1.123 * for each non-null transformation of each key.
4408 dl 1.119 *
4409     * @param map the map
4410     * @param transformer a function returning the transformation
4411 jsr166 1.135 * for an element, or null if there is no transformation (in
4412 jsr166 1.134 * which case the action is not applied)
4413 dl 1.119 * @param action the action
4414     * @return the task
4415     */
4416 jsr166 1.120 public static <K,V,U> ForkJoinTask<Void> forEachKey
4417 dl 1.119 (ConcurrentHashMap<K,V> map,
4418     Fun<? super K, ? extends U> transformer,
4419     Action<U> action) {
4420     if (transformer == null || action == null)
4421     throw new NullPointerException();
4422     return new ForEachTransformedKeyTask<K,V,U>
4423 dl 1.137 (map, null, -1, null, transformer, action);
4424 dl 1.119 }
4425    
4426     /**
4427     * Returns a task that when invoked, returns a non-null result
4428     * from applying the given search function on each key, or
4429 dl 1.126 * null if none. Upon success, further element processing is
4430     * suppressed and the results of any other parallel
4431     * invocations of the search function are ignored.
4432 dl 1.119 *
4433     * @param map the map
4434     * @param searchFunction a function returning a non-null
4435     * result on success, else null
4436     * @return the task
4437     */
4438     public static <K,V,U> ForkJoinTask<U> searchKeys
4439     (ConcurrentHashMap<K,V> map,
4440     Fun<? super K, ? extends U> searchFunction) {
4441     if (searchFunction == null) throw new NullPointerException();
4442     return new SearchKeysTask<K,V,U>
4443 dl 1.137 (map, null, -1, null, searchFunction,
4444 dl 1.119 new AtomicReference<U>());
4445     }
4446    
4447     /**
4448     * Returns a task that when invoked, returns the result of
4449     * accumulating all keys using the given reducer to combine
4450     * values, or null if none.
4451     *
4452     * @param map the map
4453     * @param reducer a commutative associative combining function
4454     * @return the task
4455     */
4456     public static <K,V> ForkJoinTask<K> reduceKeys
4457     (ConcurrentHashMap<K,V> map,
4458     BiFun<? super K, ? super K, ? extends K> reducer) {
4459     if (reducer == null) throw new NullPointerException();
4460     return new ReduceKeysTask<K,V>
4461 dl 1.130 (map, null, -1, null, reducer);
4462 dl 1.119 }
4463 jsr166 1.125
4464 dl 1.119 /**
4465     * Returns a task that when invoked, returns the result of
4466     * accumulating the given transformation of all keys using the given
4467     * reducer to combine values, or null if none.
4468     *
4469     * @param map the map
4470     * @param transformer a function returning the transformation
4471 jsr166 1.135 * for an element, or null if there is no transformation (in
4472 dl 1.119 * which case it is not combined).
4473     * @param reducer a commutative associative combining function
4474     * @return the task
4475     */
4476     public static <K,V,U> ForkJoinTask<U> reduceKeys
4477     (ConcurrentHashMap<K,V> map,
4478     Fun<? super K, ? extends U> transformer,
4479     BiFun<? super U, ? super U, ? extends U> reducer) {
4480     if (transformer == null || reducer == null)
4481     throw new NullPointerException();
4482     return new MapReduceKeysTask<K,V,U>
4483 dl 1.130 (map, null, -1, null, transformer, reducer);
4484 dl 1.119 }
4485    
4486     /**
4487     * Returns a task that when invoked, returns the result of
4488     * accumulating the given transformation of all keys using the given
4489     * reducer to combine values, and the given basis as an
4490     * identity value.
4491     *
4492     * @param map the map
4493     * @param transformer a function returning the transformation
4494     * for an element
4495     * @param basis the identity (initial default value) for the reduction
4496     * @param reducer a commutative associative combining function
4497     * @return the task
4498     */
4499     public static <K,V> ForkJoinTask<Double> reduceKeysToDouble
4500     (ConcurrentHashMap<K,V> map,
4501     ObjectToDouble<? super K> transformer,
4502     double basis,
4503     DoubleByDoubleToDouble reducer) {
4504     if (transformer == null || reducer == null)
4505     throw new NullPointerException();
4506     return new MapReduceKeysToDoubleTask<K,V>
4507 dl 1.130 (map, null, -1, null, transformer, basis, reducer);
4508 dl 1.119 }
4509    
4510     /**
4511     * Returns a task that when invoked, returns the result of
4512     * accumulating the given transformation of all keys using the given
4513     * reducer to combine values, and the given basis as an
4514     * identity value.
4515     *
4516     * @param map the map
4517     * @param transformer a function returning the transformation
4518     * for an element
4519     * @param basis the identity (initial default value) for the reduction
4520     * @param reducer a commutative associative combining function
4521     * @return the task
4522     */
4523     public static <K,V> ForkJoinTask<Long> reduceKeysToLong
4524     (ConcurrentHashMap<K,V> map,
4525     ObjectToLong<? super K> transformer,
4526     long basis,
4527     LongByLongToLong reducer) {
4528     if (transformer == null || reducer == null)
4529     throw new NullPointerException();
4530     return new MapReduceKeysToLongTask<K,V>
4531 dl 1.130 (map, null, -1, null, transformer, basis, reducer);
4532 dl 1.119 }
4533    
4534     /**
4535     * Returns a task that when invoked, returns the result of
4536     * accumulating the given transformation of all keys using the given
4537     * reducer to combine values, and the given basis as an
4538     * identity value.
4539     *
4540     * @param map the map
4541     * @param transformer a function returning the transformation
4542     * for an element
4543     * @param basis the identity (initial default value) for the reduction
4544     * @param reducer a commutative associative combining function
4545     * @return the task
4546     */
4547     public static <K,V> ForkJoinTask<Integer> reduceKeysToInt
4548     (ConcurrentHashMap<K,V> map,
4549     ObjectToInt<? super K> transformer,
4550     int basis,
4551     IntByIntToInt reducer) {
4552     if (transformer == null || reducer == null)
4553     throw new NullPointerException();
4554     return new MapReduceKeysToIntTask<K,V>
4555 dl 1.130 (map, null, -1, null, transformer, basis, reducer);
4556 dl 1.119 }
4557    
4558     /**
4559     * Returns a task that when invoked, performs the given action
4560 jsr166 1.123 * for each value.
4561 dl 1.119 *
4562     * @param map the map
4563     * @param action the action
4564     */
4565 jsr166 1.120 public static <K,V> ForkJoinTask<Void> forEachValue
4566 dl 1.119 (ConcurrentHashMap<K,V> map,
4567     Action<V> action) {
4568     if (action == null) throw new NullPointerException();
4569 dl 1.137 return new ForEachValueTask<K,V>(map, null, -1, null, action);
4570 dl 1.119 }
4571    
4572     /**
4573     * Returns a task that when invoked, performs the given action
4574 jsr166 1.123 * for each non-null transformation of each value.
4575 dl 1.119 *
4576     * @param map the map
4577     * @param transformer a function returning the transformation
4578 jsr166 1.135 * for an element, or null if there is no transformation (in
4579 jsr166 1.134 * which case the action is not applied)
4580 dl 1.119 * @param action the action
4581     */
4582 jsr166 1.120 public static <K,V,U> ForkJoinTask<Void> forEachValue
4583 dl 1.119 (ConcurrentHashMap<K,V> map,
4584     Fun<? super V, ? extends U> transformer,
4585     Action<U> action) {
4586     if (transformer == null || action == null)
4587     throw new NullPointerException();
4588     return new ForEachTransformedValueTask<K,V,U>
4589 dl 1.137 (map, null, -1, null, transformer, action);
4590 dl 1.119 }
4591    
4592     /**
4593     * Returns a task that when invoked, returns a non-null result
4594     * from applying the given search function on each value, or
4595 dl 1.126 * null if none. Upon success, further element processing is
4596     * suppressed and the results of any other parallel
4597     * invocations of the search function are ignored.
4598 dl 1.119 *
4599     * @param map the map
4600     * @param searchFunction a function returning a non-null
4601     * result on success, else null
4602     * @return the task
4603     */
4604     public static <K,V,U> ForkJoinTask<U> searchValues
4605     (ConcurrentHashMap<K,V> map,
4606     Fun<? super V, ? extends U> searchFunction) {
4607     if (searchFunction == null) throw new NullPointerException();
4608     return new SearchValuesTask<K,V,U>
4609 dl 1.137 (map, null, -1, null, searchFunction,
4610 dl 1.119 new AtomicReference<U>());
4611     }
4612    
4613     /**
4614     * Returns a task that when invoked, returns the result of
4615     * accumulating all values using the given reducer to combine
4616     * values, or null if none.
4617     *
4618     * @param map the map
4619     * @param reducer a commutative associative combining function
4620     * @return the task
4621     */
4622     public static <K,V> ForkJoinTask<V> reduceValues
4623     (ConcurrentHashMap<K,V> map,
4624     BiFun<? super V, ? super V, ? extends V> reducer) {
4625     if (reducer == null) throw new NullPointerException();
4626     return new ReduceValuesTask<K,V>
4627 dl 1.130 (map, null, -1, null, reducer);
4628 dl 1.119 }
4629    
4630     /**
4631     * Returns a task that when invoked, returns the result of
4632     * accumulating the given transformation of all values using the
4633     * given reducer to combine values, or null if none.
4634     *
4635     * @param map the map
4636     * @param transformer a function returning the transformation
4637 jsr166 1.135 * for an element, or null if there is no transformation (in
4638 dl 1.119 * which case it is not combined).
4639     * @param reducer a commutative associative combining function
4640     * @return the task
4641     */
4642     public static <K,V,U> ForkJoinTask<U> reduceValues
4643     (ConcurrentHashMap<K,V> map,
4644     Fun<? super V, ? extends U> transformer,
4645     BiFun<? super U, ? super U, ? extends U> reducer) {
4646     if (transformer == null || reducer == null)
4647     throw new NullPointerException();
4648     return new MapReduceValuesTask<K,V,U>
4649 dl 1.130 (map, null, -1, null, transformer, reducer);
4650 dl 1.119 }
4651    
4652     /**
4653     * Returns a task that when invoked, returns the result of
4654     * accumulating the given transformation of all values using the
4655     * given reducer to combine values, and the given basis as an
4656     * identity value.
4657     *
4658     * @param map the map
4659     * @param transformer a function returning the transformation
4660     * for an element
4661     * @param basis the identity (initial default value) for the reduction
4662     * @param reducer a commutative associative combining function
4663     * @return the task
4664     */
4665     public static <K,V> ForkJoinTask<Double> reduceValuesToDouble
4666     (ConcurrentHashMap<K,V> map,
4667     ObjectToDouble<? super V> transformer,
4668     double basis,
4669     DoubleByDoubleToDouble reducer) {
4670     if (transformer == null || reducer == null)
4671     throw new NullPointerException();
4672     return new MapReduceValuesToDoubleTask<K,V>
4673 dl 1.130 (map, null, -1, null, transformer, basis, reducer);
4674 dl 1.119 }
4675    
4676     /**
4677     * Returns a task that when invoked, returns the result of
4678     * accumulating the given transformation of all values using the
4679     * given reducer to combine values, and the given basis as an
4680     * identity value.
4681     *
4682     * @param map the map
4683     * @param transformer a function returning the transformation
4684     * for an element
4685     * @param basis the identity (initial default value) for the reduction
4686     * @param reducer a commutative associative combining function
4687     * @return the task
4688     */
4689     public static <K,V> ForkJoinTask<Long> reduceValuesToLong
4690     (ConcurrentHashMap<K,V> map,
4691     ObjectToLong<? super V> transformer,
4692     long basis,
4693     LongByLongToLong reducer) {
4694     if (transformer == null || reducer == null)
4695     throw new NullPointerException();
4696     return new MapReduceValuesToLongTask<K,V>
4697 dl 1.130 (map, null, -1, null, transformer, basis, reducer);
4698 dl 1.119 }
4699    
4700     /**
4701     * Returns a task that when invoked, returns the result of
4702     * accumulating the given transformation of all values using the
4703     * given reducer to combine values, and the given basis as an
4704     * identity value.
4705     *
4706     * @param map the map
4707     * @param transformer a function returning the transformation
4708     * for an element
4709     * @param basis the identity (initial default value) for the reduction
4710     * @param reducer a commutative associative combining function
4711     * @return the task
4712     */
4713     public static <K,V> ForkJoinTask<Integer> reduceValuesToInt
4714     (ConcurrentHashMap<K,V> map,
4715     ObjectToInt<? super V> transformer,
4716     int basis,
4717     IntByIntToInt reducer) {
4718     if (transformer == null || reducer == null)
4719     throw new NullPointerException();
4720     return new MapReduceValuesToIntTask<K,V>
4721 dl 1.130 (map, null, -1, null, transformer, basis, reducer);
4722 dl 1.119 }
4723    
4724     /**
4725     * Returns a task that when invoked, perform the given action
4726 jsr166 1.123 * for each entry.
4727 dl 1.119 *
4728     * @param map the map
4729     * @param action the action
4730     */
4731 jsr166 1.120 public static <K,V> ForkJoinTask<Void> forEachEntry
4732 dl 1.119 (ConcurrentHashMap<K,V> map,
4733     Action<Map.Entry<K,V>> action) {
4734     if (action == null) throw new NullPointerException();
4735 dl 1.137 return new ForEachEntryTask<K,V>(map, null, -1, null, action);
4736 dl 1.119 }
4737    
4738     /**
4739     * Returns a task that when invoked, perform the given action
4740 jsr166 1.123 * for each non-null transformation of each entry.
4741 dl 1.119 *
4742     * @param map the map
4743     * @param transformer a function returning the transformation
4744 jsr166 1.135 * for an element, or null if there is no transformation (in
4745 jsr166 1.134 * which case the action is not applied)
4746 dl 1.119 * @param action the action
4747     */
4748 jsr166 1.120 public static <K,V,U> ForkJoinTask<Void> forEachEntry
4749 dl 1.119 (ConcurrentHashMap<K,V> map,
4750     Fun<Map.Entry<K,V>, ? extends U> transformer,
4751     Action<U> action) {
4752     if (transformer == null || action == null)
4753     throw new NullPointerException();
4754     return new ForEachTransformedEntryTask<K,V,U>
4755 dl 1.137 (map, null, -1, null, transformer, action);
4756 dl 1.119 }
4757    
4758     /**
4759     * Returns a task that when invoked, returns a non-null result
4760     * from applying the given search function on each entry, or
4761 dl 1.126 * null if none. Upon success, further element processing is
4762     * suppressed and the results of any other parallel
4763     * invocations of the search function are ignored.
4764 dl 1.119 *
4765     * @param map the map
4766     * @param searchFunction a function returning a non-null
4767     * result on success, else null
4768     * @return the task
4769     */
4770     public static <K,V,U> ForkJoinTask<U> searchEntries
4771     (ConcurrentHashMap<K,V> map,
4772     Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
4773     if (searchFunction == null) throw new NullPointerException();
4774     return new SearchEntriesTask<K,V,U>
4775 dl 1.137 (map, null, -1, null, searchFunction,
4776 dl 1.119 new AtomicReference<U>());
4777     }
4778    
4779     /**
4780     * Returns a task that when invoked, returns the result of
4781     * accumulating all entries using the given reducer to combine
4782     * values, or null if none.
4783     *
4784     * @param map the map
4785     * @param reducer a commutative associative combining function
4786     * @return the task
4787     */
4788     public static <K,V> ForkJoinTask<Map.Entry<K,V>> reduceEntries
4789     (ConcurrentHashMap<K,V> map,
4790     BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4791     if (reducer == null) throw new NullPointerException();
4792     return new ReduceEntriesTask<K,V>
4793 dl 1.130 (map, null, -1, null, reducer);
4794 dl 1.119 }
4795    
4796     /**
4797     * Returns a task that when invoked, returns the result of
4798     * accumulating the given transformation of all entries using the
4799     * given reducer to combine values, or null if none.
4800     *
4801     * @param map the map
4802     * @param transformer a function returning the transformation
4803 jsr166 1.135 * for an element, or null if there is no transformation (in
4804 dl 1.119 * which case it is not combined).
4805     * @param reducer a commutative associative combining function
4806     * @return the task
4807     */
4808     public static <K,V,U> ForkJoinTask<U> reduceEntries
4809     (ConcurrentHashMap<K,V> map,
4810     Fun<Map.Entry<K,V>, ? extends U> transformer,
4811     BiFun<? super U, ? super U, ? extends U> reducer) {
4812     if (transformer == null || reducer == null)
4813     throw new NullPointerException();
4814     return new MapReduceEntriesTask<K,V,U>
4815 dl 1.130 (map, null, -1, null, transformer, reducer);
4816 dl 1.119 }
4817    
4818     /**
4819     * Returns a task that when invoked, returns the result of
4820     * accumulating the given transformation of all entries using the
4821     * given reducer to combine values, and the given basis as an
4822     * identity value.
4823     *
4824     * @param map the map
4825     * @param transformer a function returning the transformation
4826     * for an element
4827     * @param basis the identity (initial default value) for the reduction
4828     * @param reducer a commutative associative combining function
4829     * @return the task
4830     */
4831     public static <K,V> ForkJoinTask<Double> reduceEntriesToDouble
4832     (ConcurrentHashMap<K,V> map,
4833     ObjectToDouble<Map.Entry<K,V>> transformer,
4834     double basis,
4835     DoubleByDoubleToDouble reducer) {
4836     if (transformer == null || reducer == null)
4837     throw new NullPointerException();
4838     return new MapReduceEntriesToDoubleTask<K,V>
4839 dl 1.130 (map, null, -1, null, transformer, basis, reducer);
4840 dl 1.119 }
4841    
4842     /**
4843     * Returns a task that when invoked, returns the result of
4844     * accumulating the given transformation of all entries using the
4845     * given reducer to combine values, and the given basis as an
4846     * identity value.
4847     *
4848     * @param map the map
4849     * @param transformer a function returning the transformation
4850     * for an element
4851     * @param basis the identity (initial default value) for the reduction
4852     * @param reducer a commutative associative combining function
4853     * @return the task
4854     */
4855     public static <K,V> ForkJoinTask<Long> reduceEntriesToLong
4856     (ConcurrentHashMap<K,V> map,
4857     ObjectToLong<Map.Entry<K,V>> transformer,
4858     long basis,
4859     LongByLongToLong reducer) {
4860     if (transformer == null || reducer == null)
4861     throw new NullPointerException();
4862     return new MapReduceEntriesToLongTask<K,V>
4863 dl 1.130 (map, null, -1, null, transformer, basis, reducer);
4864 dl 1.119 }
4865    
4866     /**
4867     * Returns a task that when invoked, returns the result of
4868     * accumulating the given transformation of all entries using the
4869     * given reducer to combine values, and the given basis as an
4870     * identity value.
4871     *
4872     * @param map the map
4873     * @param transformer a function returning the transformation
4874     * for an element
4875     * @param basis the identity (initial default value) for the reduction
4876     * @param reducer a commutative associative combining function
4877     * @return the task
4878     */
4879     public static <K,V> ForkJoinTask<Integer> reduceEntriesToInt
4880     (ConcurrentHashMap<K,V> map,
4881     ObjectToInt<Map.Entry<K,V>> transformer,
4882     int basis,
4883     IntByIntToInt reducer) {
4884     if (transformer == null || reducer == null)
4885     throw new NullPointerException();
4886     return new MapReduceEntriesToIntTask<K,V>
4887 dl 1.130 (map, null, -1, null, transformer, basis, reducer);
4888 dl 1.119 }
4889     }
4890    
4891     // -------------------------------------------------------
4892    
4893     /**
4894     * Base for FJ tasks for bulk operations. This adds a variant of
4895 jsr166 1.122 * CountedCompleters and some split and merge bookkeeping to
4896 dl 1.119 * iterator functionality. The forEach and reduce methods are
4897     * similar to those illustrated in CountedCompleter documentation,
4898     * except that bottom-up reduction completions perform them within
4899     * their compute methods. The search methods are like forEach
4900     * except they continually poll for success and exit early. Also,
4901     * exceptions are handled in a simpler manner, by just trying to
4902     * complete root task exceptionally.
4903     */
4904 dl 1.128 @SuppressWarnings("serial") static abstract class BulkTask<K,V,R> extends Traverser<K,V,R> {
4905 dl 1.119 final BulkTask<K,V,?> parent; // completion target
4906 dl 1.130 int batch; // split control; -1 for unknown
4907 dl 1.119 int pending; // completion control
4908    
4909 jsr166 1.131 BulkTask(ConcurrentHashMap<K,V> map, BulkTask<K,V,?> parent,
4910 dl 1.130 int batch) {
4911 dl 1.119 super(map);
4912     this.parent = parent;
4913     this.batch = batch;
4914 dl 1.130 if (parent != null && map != null) { // split parent
4915     Node[] t;
4916     if ((t = parent.tab) == null &&
4917     (t = parent.tab = map.table) != null)
4918     parent.baseLimit = parent.baseSize = t.length;
4919     this.tab = t;
4920     this.baseSize = parent.baseSize;
4921     int hi = this.baseLimit = parent.baseLimit;
4922     parent.baseLimit = this.index = this.baseIndex =
4923     (hi + parent.baseIndex + 1) >>> 1;
4924     }
4925 dl 1.119 }
4926    
4927     // FJ methods
4928    
4929     /**
4930 jsr166 1.123 * Propagates completion. Note that all reduce actions
4931 dl 1.119 * bypass this method to combine while completing.
4932     */
4933     final void tryComplete() {
4934     BulkTask<K,V,?> a = this, s = a;
4935     for (int c;;) {
4936     if ((c = a.pending) == 0) {
4937     if ((a = (s = a).parent) == null) {
4938     s.quietlyComplete();
4939     break;
4940     }
4941     }
4942     else if (U.compareAndSwapInt(a, PENDING, c, c - 1))
4943     break;
4944     }
4945     }
4946    
4947     /**
4948 dl 1.128 * Forces root task to complete.
4949     * @param ex if null, complete normally, else exceptionally
4950     * @return false to simplify use
4951 dl 1.119 */
4952 dl 1.128 final boolean tryCompleteComputation(Throwable ex) {
4953 dl 1.119 for (BulkTask<K,V,?> a = this;;) {
4954     BulkTask<K,V,?> p = a.parent;
4955     if (p == null) {
4956 dl 1.128 if (ex != null)
4957     a.completeExceptionally(ex);
4958     else
4959     a.quietlyComplete();
4960     return false;
4961 dl 1.119 }
4962     a = p;
4963     }
4964     }
4965    
4966 dl 1.128 /**
4967     * Version of tryCompleteComputation for function screening checks
4968     */
4969     final boolean abortOnNullFunction() {
4970     return tryCompleteComputation(new Error("Unexpected null function"));
4971 dl 1.119 }
4972    
4973     // utilities
4974    
4975     /** CompareAndSet pending count */
4976     final boolean casPending(int cmp, int val) {
4977     return U.compareAndSwapInt(this, PENDING, cmp, val);
4978     }
4979    
4980     /**
4981 jsr166 1.123 * Returns approx exp2 of the number of times (minus one) to
4982 dl 1.119 * split task by two before executing leaf action. This value
4983     * is faster to compute and more convenient to use as a guide
4984     * to splitting than is the depth, since it is used while
4985     * dividing by two anyway.
4986     */
4987     final int batch() {
4988 dl 1.137 ConcurrentHashMap<K, V> m; int b; Node[] t; ForkJoinPool pool;
4989 dl 1.130 if ((b = batch) < 0 && (m = map) != null) { // force initialization
4990     if ((t = tab) == null && (t = tab = m.table) != null)
4991     baseLimit = baseSize = t.length;
4992     if (t != null) {
4993     long n = m.counter.sum();
4994 dl 1.137 int par = (pool = getPool()) == null?
4995     ForkJoinPool.getCommonPoolParallelism() :
4996     pool.getParallelism();
4997     int sp = par << 3; // slack of 8
4998 dl 1.130 b = batch = (n <= 0L) ? 0 : (n < (long)sp) ? (int)n : sp;
4999     }
5000 dl 1.119 }
5001     return b;
5002     }
5003    
5004     /**
5005 jsr166 1.123 * Returns exportable snapshot entry.
5006 dl 1.119 */
5007     static <K,V> AbstractMap.SimpleEntry<K,V> entryFor(K k, V v) {
5008 dl 1.126 return new AbstractMap.SimpleEntry<K,V>(k, v);
5009 dl 1.119 }
5010    
5011     // Unsafe mechanics
5012     private static final sun.misc.Unsafe U;
5013     private static final long PENDING;
5014     static {
5015     try {
5016     U = sun.misc.Unsafe.getUnsafe();
5017     PENDING = U.objectFieldOffset
5018     (BulkTask.class.getDeclaredField("pending"));
5019     } catch (Exception e) {
5020     throw new Error(e);
5021     }
5022     }
5023     }
5024    
5025     /*
5026     * Task classes. Coded in a regular but ugly format/style to
5027     * simplify checks that each variant differs in the right way from
5028     * others.
5029     */
5030    
5031 dl 1.128 @SuppressWarnings("serial") static final class ForEachKeyTask<K,V>
5032 dl 1.119 extends BulkTask<K,V,Void> {
5033     final Action<K> action;
5034 dl 1.137 ForEachKeyTask<K,V> nextRight;
5035 dl 1.119 ForEachKeyTask
5036 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5037 dl 1.137 ForEachKeyTask<K,V> nextRight,
5038 dl 1.119 Action<K> action) {
5039 dl 1.130 super(m, p, b);
5040 dl 1.137 this.nextRight = nextRight;
5041 dl 1.119 this.action = action;
5042     }
5043 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5044 dl 1.119 final Action<K> action = this.action;
5045     if (action == null)
5046 dl 1.128 return abortOnNullFunction();
5047 dl 1.137 ForEachKeyTask<K,V> rights = null;
5048 dl 1.128 try {
5049     int b = batch(), c;
5050     while (b > 1 && baseIndex != baseLimit) {
5051     do {} while (!casPending(c = pending, c+1));
5052 dl 1.137 (rights = new ForEachKeyTask<K,V>
5053     (map, this, b >>>= 1, rights, action)).fork();
5054 dl 1.128 }
5055     while (advance() != null)
5056     action.apply((K)nextKey);
5057     tryComplete();
5058     } catch (Throwable ex) {
5059     return tryCompleteComputation(ex);
5060 dl 1.119 }
5061 dl 1.137 while (rights != null && rights.tryUnfork()) {
5062     rights.exec();
5063     rights = rights.nextRight;
5064     }
5065 dl 1.128 return false;
5066 dl 1.119 }
5067     }
5068    
5069 dl 1.128 @SuppressWarnings("serial") static final class ForEachValueTask<K,V>
5070 dl 1.119 extends BulkTask<K,V,Void> {
5071 dl 1.137 ForEachValueTask<K,V> nextRight;
5072 dl 1.119 final Action<V> action;
5073     ForEachValueTask
5074 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5075 dl 1.137 ForEachValueTask<K,V> nextRight,
5076 dl 1.119 Action<V> action) {
5077 dl 1.130 super(m, p, b);
5078 dl 1.137 this.nextRight = nextRight;
5079 dl 1.119 this.action = action;
5080     }
5081 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5082 dl 1.119 final Action<V> action = this.action;
5083     if (action == null)
5084 dl 1.128 return abortOnNullFunction();
5085 dl 1.137 ForEachValueTask<K,V> rights = null;
5086 dl 1.128 try {
5087     int b = batch(), c;
5088     while (b > 1 && baseIndex != baseLimit) {
5089     do {} while (!casPending(c = pending, c+1));
5090 dl 1.137 (rights = new ForEachValueTask<K,V>
5091     (map, this, b >>>= 1, rights, action)).fork();
5092 dl 1.128 }
5093     Object v;
5094     while ((v = advance()) != null)
5095     action.apply((V)v);
5096     tryComplete();
5097     } catch (Throwable ex) {
5098     return tryCompleteComputation(ex);
5099 dl 1.119 }
5100 dl 1.137 while (rights != null && rights.tryUnfork()) {
5101     rights.exec();
5102     rights = rights.nextRight;
5103     }
5104 dl 1.128 return false;
5105 dl 1.119 }
5106     }
5107    
5108 dl 1.128 @SuppressWarnings("serial") static final class ForEachEntryTask<K,V>
5109 dl 1.119 extends BulkTask<K,V,Void> {
5110 dl 1.137 ForEachEntryTask<K,V> nextRight;
5111 dl 1.119 final Action<Entry<K,V>> action;
5112     ForEachEntryTask
5113 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5114 dl 1.137 ForEachEntryTask<K,V> nextRight,
5115 dl 1.119 Action<Entry<K,V>> action) {
5116 dl 1.130 super(m, p, b);
5117 dl 1.137 this.nextRight = nextRight;
5118 dl 1.119 this.action = action;
5119     }
5120 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5121 dl 1.119 final Action<Entry<K,V>> action = this.action;
5122     if (action == null)
5123 dl 1.128 return abortOnNullFunction();
5124 dl 1.137 ForEachEntryTask<K,V> rights = null;
5125 dl 1.128 try {
5126     int b = batch(), c;
5127     while (b > 1 && baseIndex != baseLimit) {
5128     do {} while (!casPending(c = pending, c+1));
5129 dl 1.137 (rights = new ForEachEntryTask<K,V>
5130     (map, this, b >>>= 1, rights, action)).fork();
5131 dl 1.128 }
5132     Object v;
5133     while ((v = advance()) != null)
5134     action.apply(entryFor((K)nextKey, (V)v));
5135     tryComplete();
5136     } catch (Throwable ex) {
5137     return tryCompleteComputation(ex);
5138 dl 1.119 }
5139 dl 1.137 while (rights != null && rights.tryUnfork()) {
5140     rights.exec();
5141     rights = rights.nextRight;
5142     }
5143 dl 1.128 return false;
5144 dl 1.119 }
5145     }
5146    
5147 dl 1.128 @SuppressWarnings("serial") static final class ForEachMappingTask<K,V>
5148 dl 1.119 extends BulkTask<K,V,Void> {
5149 dl 1.137 ForEachMappingTask<K,V> nextRight;
5150 dl 1.119 final BiAction<K,V> action;
5151     ForEachMappingTask
5152 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5153 dl 1.137 ForEachMappingTask<K,V> nextRight,
5154 dl 1.119 BiAction<K,V> action) {
5155 dl 1.130 super(m, p, b);
5156 dl 1.137 this.nextRight = nextRight;
5157 dl 1.119 this.action = action;
5158     }
5159 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5160 dl 1.119 final BiAction<K,V> action = this.action;
5161     if (action == null)
5162 dl 1.128 return abortOnNullFunction();
5163 dl 1.137 ForEachMappingTask<K,V> rights = null;
5164 dl 1.128 try {
5165     int b = batch(), c;
5166     while (b > 1 && baseIndex != baseLimit) {
5167     do {} while (!casPending(c = pending, c+1));
5168 dl 1.137 (rights = new ForEachMappingTask<K,V>
5169     (map, this, b >>>= 1, rights, action)).fork();
5170 dl 1.128 }
5171     Object v;
5172     while ((v = advance()) != null)
5173     action.apply((K)nextKey, (V)v);
5174     tryComplete();
5175     } catch (Throwable ex) {
5176     return tryCompleteComputation(ex);
5177 dl 1.119 }
5178 dl 1.137 while (rights != null && rights.tryUnfork()) {
5179     rights.exec();
5180     rights = rights.nextRight;
5181     }
5182 dl 1.128 return false;
5183 dl 1.119 }
5184     }
5185    
5186 dl 1.128 @SuppressWarnings("serial") static final class ForEachTransformedKeyTask<K,V,U>
5187 dl 1.119 extends BulkTask<K,V,Void> {
5188 dl 1.137 ForEachTransformedKeyTask<K,V,U> nextRight;
5189 dl 1.119 final Fun<? super K, ? extends U> transformer;
5190     final Action<U> action;
5191     ForEachTransformedKeyTask
5192 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5193 dl 1.137 ForEachTransformedKeyTask<K,V,U> nextRight,
5194 dl 1.119 Fun<? super K, ? extends U> transformer,
5195     Action<U> action) {
5196 dl 1.130 super(m, p, b);
5197 dl 1.137 this.nextRight = nextRight;
5198 dl 1.119 this.transformer = transformer;
5199     this.action = action;
5200    
5201     }
5202 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5203 dl 1.119 final Fun<? super K, ? extends U> transformer =
5204     this.transformer;
5205     final Action<U> action = this.action;
5206     if (transformer == null || action == null)
5207 dl 1.128 return abortOnNullFunction();
5208 dl 1.137 ForEachTransformedKeyTask<K,V,U> rights = null;
5209 dl 1.128 try {
5210     int b = batch(), c;
5211     while (b > 1 && baseIndex != baseLimit) {
5212     do {} while (!casPending(c = pending, c+1));
5213 dl 1.137 (rights = new ForEachTransformedKeyTask<K,V,U>
5214     (map, this, b >>>= 1, rights, transformer, action)).fork();
5215 dl 1.128 }
5216     U u;
5217     while (advance() != null) {
5218     if ((u = transformer.apply((K)nextKey)) != null)
5219     action.apply(u);
5220     }
5221     tryComplete();
5222     } catch (Throwable ex) {
5223     return tryCompleteComputation(ex);
5224 dl 1.119 }
5225 dl 1.137 while (rights != null && rights.tryUnfork()) {
5226     rights.exec();
5227     rights = rights.nextRight;
5228     }
5229 dl 1.128 return false;
5230 dl 1.119 }
5231     }
5232    
5233 dl 1.128 @SuppressWarnings("serial") static final class ForEachTransformedValueTask<K,V,U>
5234 dl 1.119 extends BulkTask<K,V,Void> {
5235 dl 1.137 ForEachTransformedValueTask<K,V,U> nextRight;
5236 dl 1.119 final Fun<? super V, ? extends U> transformer;
5237     final Action<U> action;
5238     ForEachTransformedValueTask
5239 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5240 dl 1.137 ForEachTransformedValueTask<K,V,U> nextRight,
5241 dl 1.119 Fun<? super V, ? extends U> transformer,
5242     Action<U> action) {
5243 dl 1.130 super(m, p, b);
5244 dl 1.137 this.nextRight = nextRight;
5245 dl 1.119 this.transformer = transformer;
5246     this.action = action;
5247    
5248     }
5249 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5250 dl 1.119 final Fun<? super V, ? extends U> transformer =
5251     this.transformer;
5252     final Action<U> action = this.action;
5253     if (transformer == null || action == null)
5254 dl 1.128 return abortOnNullFunction();
5255 dl 1.137 ForEachTransformedValueTask<K,V,U> rights = null;
5256 dl 1.128 try {
5257     int b = batch(), c;
5258     while (b > 1 && baseIndex != baseLimit) {
5259     do {} while (!casPending(c = pending, c+1));
5260 dl 1.137 (rights = new ForEachTransformedValueTask<K,V,U>
5261     (map, this, b >>>= 1, rights, transformer, action)).fork();
5262 dl 1.128 }
5263     Object v; U u;
5264     while ((v = advance()) != null) {
5265     if ((u = transformer.apply((V)v)) != null)
5266     action.apply(u);
5267     }
5268     tryComplete();
5269     } catch (Throwable ex) {
5270     return tryCompleteComputation(ex);
5271 dl 1.119 }
5272 dl 1.137 while (rights != null && rights.tryUnfork()) {
5273     rights.exec();
5274     rights = rights.nextRight;
5275     }
5276 dl 1.128 return false;
5277 dl 1.119 }
5278 tim 1.1 }
5279    
5280 dl 1.128 @SuppressWarnings("serial") static final class ForEachTransformedEntryTask<K,V,U>
5281 dl 1.119 extends BulkTask<K,V,Void> {
5282 dl 1.137 ForEachTransformedEntryTask<K,V,U> nextRight;
5283 dl 1.119 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5284     final Action<U> action;
5285     ForEachTransformedEntryTask
5286 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5287 dl 1.137 ForEachTransformedEntryTask<K,V,U> nextRight,
5288 dl 1.119 Fun<Map.Entry<K,V>, ? extends U> transformer,
5289     Action<U> action) {
5290 dl 1.130 super(m, p, b);
5291 dl 1.137 this.nextRight = nextRight;
5292 dl 1.119 this.transformer = transformer;
5293     this.action = action;
5294    
5295     }
5296 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5297 dl 1.119 final Fun<Map.Entry<K,V>, ? extends U> transformer =
5298     this.transformer;
5299     final Action<U> action = this.action;
5300     if (transformer == null || action == null)
5301 dl 1.128 return abortOnNullFunction();
5302 dl 1.137 ForEachTransformedEntryTask<K,V,U> rights = null;
5303 dl 1.128 try {
5304     int b = batch(), c;
5305     while (b > 1 && baseIndex != baseLimit) {
5306     do {} while (!casPending(c = pending, c+1));
5307 dl 1.137 (rights = new ForEachTransformedEntryTask<K,V,U>
5308     (map, this, b >>>= 1, rights, transformer, action)).fork();
5309 dl 1.128 }
5310     Object v; U u;
5311     while ((v = advance()) != null) {
5312     if ((u = transformer.apply(entryFor((K)nextKey, (V)v))) != null)
5313     action.apply(u);
5314     }
5315     tryComplete();
5316     } catch (Throwable ex) {
5317     return tryCompleteComputation(ex);
5318 dl 1.119 }
5319 dl 1.137 while (rights != null && rights.tryUnfork()) {
5320     rights.exec();
5321     rights = rights.nextRight;
5322     }
5323 dl 1.128 return false;
5324 dl 1.119 }
5325 tim 1.1 }
5326    
5327 dl 1.128 @SuppressWarnings("serial") static final class ForEachTransformedMappingTask<K,V,U>
5328 dl 1.119 extends BulkTask<K,V,Void> {
5329 dl 1.137 ForEachTransformedMappingTask<K,V,U> nextRight;
5330 dl 1.119 final BiFun<? super K, ? super V, ? extends U> transformer;
5331     final Action<U> action;
5332     ForEachTransformedMappingTask
5333 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5334 dl 1.137 ForEachTransformedMappingTask<K,V,U> nextRight,
5335 dl 1.119 BiFun<? super K, ? super V, ? extends U> transformer,
5336     Action<U> action) {
5337 dl 1.130 super(m, p, b);
5338 dl 1.137 this.nextRight = nextRight;
5339 dl 1.119 this.transformer = transformer;
5340     this.action = action;
5341    
5342     }
5343 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5344 dl 1.119 final BiFun<? super K, ? super V, ? extends U> transformer =
5345     this.transformer;
5346     final Action<U> action = this.action;
5347     if (transformer == null || action == null)
5348 dl 1.128 return abortOnNullFunction();
5349 dl 1.137 ForEachTransformedMappingTask<K,V,U> rights = null;
5350 dl 1.128 try {
5351     int b = batch(), c;
5352     while (b > 1 && baseIndex != baseLimit) {
5353     do {} while (!casPending(c = pending, c+1));
5354 dl 1.137 (rights = new ForEachTransformedMappingTask<K,V,U>
5355     (map, this, b >>>= 1, rights, transformer, action)).fork();
5356 dl 1.128 }
5357     Object v; U u;
5358     while ((v = advance()) != null) {
5359     if ((u = transformer.apply((K)nextKey, (V)v)) != null)
5360     action.apply(u);
5361     }
5362     tryComplete();
5363     } catch (Throwable ex) {
5364     return tryCompleteComputation(ex);
5365 dl 1.119 }
5366 dl 1.137 while (rights != null && rights.tryUnfork()) {
5367     rights.exec();
5368     rights = rights.nextRight;
5369     }
5370 dl 1.128 return false;
5371 dl 1.119 }
5372 tim 1.1 }
5373    
5374 dl 1.128 @SuppressWarnings("serial") static final class SearchKeysTask<K,V,U>
5375 dl 1.119 extends BulkTask<K,V,U> {
5376 dl 1.137 SearchKeysTask<K,V,U> nextRight;
5377 dl 1.119 final Fun<? super K, ? extends U> searchFunction;
5378     final AtomicReference<U> result;
5379     SearchKeysTask
5380 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5381 dl 1.137 SearchKeysTask<K,V,U> nextRight,
5382 dl 1.119 Fun<? super K, ? extends U> searchFunction,
5383     AtomicReference<U> result) {
5384 dl 1.130 super(m, p, b);
5385 dl 1.137 this.nextRight = nextRight;
5386 dl 1.119 this.searchFunction = searchFunction; this.result = result;
5387     }
5388 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5389 dl 1.119 AtomicReference<U> result = this.result;
5390     final Fun<? super K, ? extends U> searchFunction =
5391     this.searchFunction;
5392     if (searchFunction == null || result == null)
5393 dl 1.128 return abortOnNullFunction();
5394 dl 1.137 SearchKeysTask<K,V,U> rights = null;
5395 dl 1.128 try {
5396     int b = batch(), c;
5397     while (b > 1 && baseIndex != baseLimit && result.get() == null) {
5398     do {} while (!casPending(c = pending, c+1));
5399 dl 1.137 (rights = new SearchKeysTask<K,V,U>
5400     (map, this, b >>>= 1, rights, searchFunction, result)).fork();
5401 dl 1.128 }
5402     U u;
5403     while (result.get() == null && advance() != null) {
5404     if ((u = searchFunction.apply((K)nextKey)) != null) {
5405     if (result.compareAndSet(null, u))
5406     tryCompleteComputation(null);
5407     break;
5408 dl 1.126 }
5409 dl 1.119 }
5410 dl 1.128 tryComplete();
5411     } catch (Throwable ex) {
5412     return tryCompleteComputation(ex);
5413 dl 1.119 }
5414 dl 1.137 while (rights != null && result.get() == null && rights.tryUnfork()) {
5415     rights.exec();
5416     rights = rights.nextRight;
5417     }
5418 dl 1.128 return false;
5419 dl 1.119 }
5420     public final U getRawResult() { return result.get(); }
5421 tim 1.1 }
5422    
5423 dl 1.128 @SuppressWarnings("serial") static final class SearchValuesTask<K,V,U>
5424 dl 1.119 extends BulkTask<K,V,U> {
5425 dl 1.137 SearchValuesTask<K,V,U> nextRight;
5426 dl 1.119 final Fun<? super V, ? extends U> searchFunction;
5427     final AtomicReference<U> result;
5428     SearchValuesTask
5429 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5430 dl 1.137 SearchValuesTask<K,V,U> nextRight,
5431 dl 1.119 Fun<? super V, ? extends U> searchFunction,
5432     AtomicReference<U> result) {
5433 dl 1.130 super(m, p, b);
5434 dl 1.137 this.nextRight = nextRight;
5435 dl 1.119 this.searchFunction = searchFunction; this.result = result;
5436     }
5437 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5438 dl 1.119 AtomicReference<U> result = this.result;
5439     final Fun<? super V, ? extends U> searchFunction =
5440     this.searchFunction;
5441     if (searchFunction == null || result == null)
5442 dl 1.128 return abortOnNullFunction();
5443 dl 1.137 SearchValuesTask<K,V,U> rights = null;
5444 dl 1.128 try {
5445     int b = batch(), c;
5446     while (b > 1 && baseIndex != baseLimit && result.get() == null) {
5447     do {} while (!casPending(c = pending, c+1));
5448 dl 1.137 (rights = new SearchValuesTask<K,V,U>
5449     (map, this, b >>>= 1, rights, searchFunction, result)).fork();
5450 dl 1.128 }
5451     Object v; U u;
5452     while (result.get() == null && (v = advance()) != null) {
5453     if ((u = searchFunction.apply((V)v)) != null) {
5454     if (result.compareAndSet(null, u))
5455     tryCompleteComputation(null);
5456     break;
5457 dl 1.126 }
5458 dl 1.119 }
5459 dl 1.128 tryComplete();
5460     } catch (Throwable ex) {
5461     return tryCompleteComputation(ex);
5462 dl 1.119 }
5463 dl 1.137 while (rights != null && result.get() == null && rights.tryUnfork()) {
5464     rights.exec();
5465     rights = rights.nextRight;
5466     }
5467 dl 1.128 return false;
5468 dl 1.119 }
5469     public final U getRawResult() { return result.get(); }
5470     }
5471 tim 1.11
5472 dl 1.128 @SuppressWarnings("serial") static final class SearchEntriesTask<K,V,U>
5473 dl 1.119 extends BulkTask<K,V,U> {
5474 dl 1.137 SearchEntriesTask<K,V,U> nextRight;
5475 dl 1.119 final Fun<Entry<K,V>, ? extends U> searchFunction;
5476     final AtomicReference<U> result;
5477     SearchEntriesTask
5478 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5479 dl 1.137 SearchEntriesTask<K,V,U> nextRight,
5480 dl 1.119 Fun<Entry<K,V>, ? extends U> searchFunction,
5481     AtomicReference<U> result) {
5482 dl 1.130 super(m, p, b);
5483 dl 1.137 this.nextRight = nextRight;
5484 dl 1.119 this.searchFunction = searchFunction; this.result = result;
5485     }
5486 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5487 dl 1.119 AtomicReference<U> result = this.result;
5488     final Fun<Entry<K,V>, ? extends U> searchFunction =
5489     this.searchFunction;
5490     if (searchFunction == null || result == null)
5491 dl 1.128 return abortOnNullFunction();
5492 dl 1.137 SearchEntriesTask<K,V,U> rights = null;
5493 dl 1.128 try {
5494     int b = batch(), c;
5495     while (b > 1 && baseIndex != baseLimit && result.get() == null) {
5496     do {} while (!casPending(c = pending, c+1));
5497 dl 1.137 (rights = new SearchEntriesTask<K,V,U>
5498     (map, this, b >>>= 1, rights, searchFunction, result)).fork();
5499 dl 1.128 }
5500     Object v; U u;
5501     while (result.get() == null && (v = advance()) != null) {
5502     if ((u = searchFunction.apply(entryFor((K)nextKey, (V)v))) != null) {
5503     if (result.compareAndSet(null, u))
5504     tryCompleteComputation(null);
5505     break;
5506 dl 1.126 }
5507 dl 1.119 }
5508 dl 1.128 tryComplete();
5509     } catch (Throwable ex) {
5510     return tryCompleteComputation(ex);
5511 dl 1.119 }
5512 dl 1.137 while (rights != null && result.get() == null && rights.tryUnfork()) {
5513     rights.exec();
5514     rights = rights.nextRight;
5515     }
5516 dl 1.128 return false;
5517 dl 1.119 }
5518     public final U getRawResult() { return result.get(); }
5519     }
5520 tim 1.1
5521 dl 1.128 @SuppressWarnings("serial") static final class SearchMappingsTask<K,V,U>
5522 dl 1.119 extends BulkTask<K,V,U> {
5523 dl 1.137 SearchMappingsTask<K,V,U> nextRight;
5524 dl 1.119 final BiFun<? super K, ? super V, ? extends U> searchFunction;
5525     final AtomicReference<U> result;
5526     SearchMappingsTask
5527 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5528 dl 1.137 SearchMappingsTask<K,V,U> nextRight,
5529 dl 1.119 BiFun<? super K, ? super V, ? extends U> searchFunction,
5530     AtomicReference<U> result) {
5531 dl 1.130 super(m, p, b);
5532 dl 1.137 this.nextRight = nextRight;
5533 dl 1.119 this.searchFunction = searchFunction; this.result = result;
5534     }
5535 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5536 dl 1.119 AtomicReference<U> result = this.result;
5537     final BiFun<? super K, ? super V, ? extends U> searchFunction =
5538     this.searchFunction;
5539     if (searchFunction == null || result == null)
5540 dl 1.128 return abortOnNullFunction();
5541 dl 1.137 SearchMappingsTask<K,V,U> rights = null;
5542 dl 1.128 try {
5543     int b = batch(), c;
5544     while (b > 1 && baseIndex != baseLimit && result.get() == null) {
5545     do {} while (!casPending(c = pending, c+1));
5546 dl 1.137 (rights = new SearchMappingsTask<K,V,U>
5547     (map, this, b >>>= 1, rights, searchFunction, result)).fork();
5548 dl 1.128 }
5549     Object v; U u;
5550     while (result.get() == null && (v = advance()) != null) {
5551     if ((u = searchFunction.apply((K)nextKey, (V)v)) != null) {
5552     if (result.compareAndSet(null, u))
5553     tryCompleteComputation(null);
5554     break;
5555 dl 1.126 }
5556 dl 1.119 }
5557 dl 1.128 tryComplete();
5558     } catch (Throwable ex) {
5559     return tryCompleteComputation(ex);
5560 dl 1.119 }
5561 dl 1.137 while (rights != null && result.get() == null && rights.tryUnfork()) {
5562     rights.exec();
5563     rights = rights.nextRight;
5564     }
5565 dl 1.128 return false;
5566 tim 1.1 }
5567 dl 1.119 public final U getRawResult() { return result.get(); }
5568     }
5569 tim 1.1
5570 dl 1.128 @SuppressWarnings("serial") static final class ReduceKeysTask<K,V>
5571 dl 1.119 extends BulkTask<K,V,K> {
5572     final BiFun<? super K, ? super K, ? extends K> reducer;
5573     K result;
5574 dl 1.128 ReduceKeysTask<K,V> rights, nextRight;
5575 dl 1.119 ReduceKeysTask
5576 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5577 dl 1.128 ReduceKeysTask<K,V> nextRight,
5578 dl 1.119 BiFun<? super K, ? super K, ? extends K> reducer) {
5579 dl 1.130 super(m, p, b); this.nextRight = nextRight;
5580 dl 1.119 this.reducer = reducer;
5581     }
5582 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5583 dl 1.119 final BiFun<? super K, ? super K, ? extends K> reducer =
5584     this.reducer;
5585     if (reducer == null)
5586 dl 1.128 return abortOnNullFunction();
5587     try {
5588     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5589     do {} while (!casPending(c = pending, c+1));
5590     (rights = new ReduceKeysTask<K,V>
5591 dl 1.130 (map, this, b >>>= 1, rights, reducer)).fork();
5592 dl 1.128 }
5593     K r = null;
5594     while (advance() != null) {
5595     K u = (K)nextKey;
5596     r = (r == null) ? u : reducer.apply(r, u);
5597 dl 1.99 }
5598 dl 1.128 result = r;
5599     for (ReduceKeysTask<K,V> t = this, s;;) {
5600     int c; BulkTask<K,V,?> par; K tr, sr;
5601     if ((c = t.pending) == 0) {
5602     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5603     if ((sr = s.result) != null)
5604 jsr166 1.132 t.result = ((tr = t.result) == null) ? sr : reducer.apply(tr, sr);
5605 dl 1.128 }
5606     if ((par = t.parent) == null ||
5607     !(par instanceof ReduceKeysTask)) {
5608     t.quietlyComplete();
5609     break;
5610     }
5611     t = (ReduceKeysTask<K,V>)par;
5612     }
5613     else if (t.casPending(c, c - 1))
5614     break;
5615 tim 1.1 }
5616 dl 1.128 } catch (Throwable ex) {
5617     return tryCompleteComputation(ex);
5618 tim 1.1 }
5619 dl 1.137 for (ReduceKeysTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
5620     s.exec();
5621 dl 1.128 return false;
5622 tim 1.1 }
5623 dl 1.119 public final K getRawResult() { return result; }
5624     }
5625 tim 1.1
5626 dl 1.128 @SuppressWarnings("serial") static final class ReduceValuesTask<K,V>
5627 dl 1.119 extends BulkTask<K,V,V> {
5628     final BiFun<? super V, ? super V, ? extends V> reducer;
5629     V result;
5630 dl 1.128 ReduceValuesTask<K,V> rights, nextRight;
5631 dl 1.119 ReduceValuesTask
5632 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5633 dl 1.128 ReduceValuesTask<K,V> nextRight,
5634 dl 1.119 BiFun<? super V, ? super V, ? extends V> reducer) {
5635 dl 1.130 super(m, p, b); this.nextRight = nextRight;
5636 dl 1.119 this.reducer = reducer;
5637     }
5638 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5639 dl 1.119 final BiFun<? super V, ? super V, ? extends V> reducer =
5640     this.reducer;
5641     if (reducer == null)
5642 dl 1.128 return abortOnNullFunction();
5643     try {
5644     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5645     do {} while (!casPending(c = pending, c+1));
5646     (rights = new ReduceValuesTask<K,V>
5647 dl 1.130 (map, this, b >>>= 1, rights, reducer)).fork();
5648 dl 1.128 }
5649     V r = null;
5650     Object v;
5651     while ((v = advance()) != null) {
5652     V u = (V)v;
5653     r = (r == null) ? u : reducer.apply(r, u);
5654 dl 1.119 }
5655 dl 1.128 result = r;
5656     for (ReduceValuesTask<K,V> t = this, s;;) {
5657     int c; BulkTask<K,V,?> par; V tr, sr;
5658     if ((c = t.pending) == 0) {
5659     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5660     if ((sr = s.result) != null)
5661 jsr166 1.132 t.result = ((tr = t.result) == null) ? sr : reducer.apply(tr, sr);
5662 dl 1.128 }
5663     if ((par = t.parent) == null ||
5664     !(par instanceof ReduceValuesTask)) {
5665     t.quietlyComplete();
5666     break;
5667     }
5668     t = (ReduceValuesTask<K,V>)par;
5669     }
5670     else if (t.casPending(c, c - 1))
5671     break;
5672 dl 1.119 }
5673 dl 1.128 } catch (Throwable ex) {
5674     return tryCompleteComputation(ex);
5675 dl 1.119 }
5676 dl 1.137 for (ReduceValuesTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
5677     s.exec();
5678 dl 1.128 return false;
5679 tim 1.1 }
5680 dl 1.119 public final V getRawResult() { return result; }
5681     }
5682 tim 1.1
5683 dl 1.128 @SuppressWarnings("serial") static final class ReduceEntriesTask<K,V>
5684 dl 1.119 extends BulkTask<K,V,Map.Entry<K,V>> {
5685     final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5686     Map.Entry<K,V> result;
5687 dl 1.128 ReduceEntriesTask<K,V> rights, nextRight;
5688 dl 1.119 ReduceEntriesTask
5689 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5690     ReduceEntriesTask<K,V> nextRight,
5691 dl 1.119 BiFun<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5692 dl 1.130 super(m, p, b); this.nextRight = nextRight;
5693 dl 1.119 this.reducer = reducer;
5694     }
5695 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5696 dl 1.119 final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer =
5697     this.reducer;
5698     if (reducer == null)
5699 dl 1.128 return abortOnNullFunction();
5700     try {
5701     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5702     do {} while (!casPending(c = pending, c+1));
5703     (rights = new ReduceEntriesTask<K,V>
5704 dl 1.130 (map, this, b >>>= 1, rights, reducer)).fork();
5705 dl 1.128 }
5706     Map.Entry<K,V> r = null;
5707     Object v;
5708     while ((v = advance()) != null) {
5709     Map.Entry<K,V> u = entryFor((K)nextKey, (V)v);
5710     r = (r == null) ? u : reducer.apply(r, u);
5711 dl 1.119 }
5712 dl 1.128 result = r;
5713     for (ReduceEntriesTask<K,V> t = this, s;;) {
5714     int c; BulkTask<K,V,?> par; Map.Entry<K,V> tr, sr;
5715     if ((c = t.pending) == 0) {
5716     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5717     if ((sr = s.result) != null)
5718 jsr166 1.132 t.result = ((tr = t.result) == null) ? sr : reducer.apply(tr, sr);
5719 dl 1.128 }
5720     if ((par = t.parent) == null ||
5721     !(par instanceof ReduceEntriesTask)) {
5722     t.quietlyComplete();
5723     break;
5724     }
5725     t = (ReduceEntriesTask<K,V>)par;
5726     }
5727     else if (t.casPending(c, c - 1))
5728     break;
5729 dl 1.119 }
5730 dl 1.128 } catch (Throwable ex) {
5731     return tryCompleteComputation(ex);
5732 dl 1.119 }
5733 dl 1.137 for (ReduceEntriesTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
5734     s.exec();
5735 dl 1.128 return false;
5736 dl 1.119 }
5737     public final Map.Entry<K,V> getRawResult() { return result; }
5738     }
5739 dl 1.99
5740 dl 1.128 @SuppressWarnings("serial") static final class MapReduceKeysTask<K,V,U>
5741 dl 1.119 extends BulkTask<K,V,U> {
5742     final Fun<? super K, ? extends U> transformer;
5743     final BiFun<? super U, ? super U, ? extends U> reducer;
5744     U result;
5745 dl 1.128 MapReduceKeysTask<K,V,U> rights, nextRight;
5746 dl 1.119 MapReduceKeysTask
5747 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5748 dl 1.128 MapReduceKeysTask<K,V,U> nextRight,
5749 dl 1.119 Fun<? super K, ? extends U> transformer,
5750     BiFun<? super U, ? super U, ? extends U> reducer) {
5751 dl 1.130 super(m, p, b); this.nextRight = nextRight;
5752 dl 1.119 this.transformer = transformer;
5753     this.reducer = reducer;
5754     }
5755 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5756 dl 1.119 final Fun<? super K, ? extends U> transformer =
5757     this.transformer;
5758     final BiFun<? super U, ? super U, ? extends U> reducer =
5759     this.reducer;
5760     if (transformer == null || reducer == null)
5761 dl 1.128 return abortOnNullFunction();
5762     try {
5763     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5764     do {} while (!casPending(c = pending, c+1));
5765     (rights = new MapReduceKeysTask<K,V,U>
5766 dl 1.130 (map, this, b >>>= 1, rights, transformer, reducer)).fork();
5767 dl 1.128 }
5768     U r = null, u;
5769     while (advance() != null) {
5770     if ((u = transformer.apply((K)nextKey)) != null)
5771     r = (r == null) ? u : reducer.apply(r, u);
5772 dl 1.119 }
5773 dl 1.128 result = r;
5774     for (MapReduceKeysTask<K,V,U> t = this, s;;) {
5775     int c; BulkTask<K,V,?> par; U tr, sr;
5776     if ((c = t.pending) == 0) {
5777     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5778     if ((sr = s.result) != null)
5779 jsr166 1.132 t.result = ((tr = t.result) == null) ? sr : reducer.apply(tr, sr);
5780 dl 1.128 }
5781     if ((par = t.parent) == null ||
5782     !(par instanceof MapReduceKeysTask)) {
5783     t.quietlyComplete();
5784     break;
5785     }
5786     t = (MapReduceKeysTask<K,V,U>)par;
5787     }
5788     else if (t.casPending(c, c - 1))
5789     break;
5790 dl 1.119 }
5791 dl 1.128 } catch (Throwable ex) {
5792     return tryCompleteComputation(ex);
5793 dl 1.119 }
5794 dl 1.137 for (MapReduceKeysTask<K,V,U> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
5795     s.exec();
5796 dl 1.128 return false;
5797 tim 1.1 }
5798 dl 1.119 public final U getRawResult() { return result; }
5799 dl 1.4 }
5800    
5801 dl 1.128 @SuppressWarnings("serial") static final class MapReduceValuesTask<K,V,U>
5802 dl 1.119 extends BulkTask<K,V,U> {
5803     final Fun<? super V, ? extends U> transformer;
5804     final BiFun<? super U, ? super U, ? extends U> reducer;
5805     U result;
5806 dl 1.128 MapReduceValuesTask<K,V,U> rights, nextRight;
5807 dl 1.119 MapReduceValuesTask
5808 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5809 dl 1.128 MapReduceValuesTask<K,V,U> nextRight,
5810 dl 1.119 Fun<? super V, ? extends U> transformer,
5811     BiFun<? super U, ? super U, ? extends U> reducer) {
5812 dl 1.130 super(m, p, b); this.nextRight = nextRight;
5813 dl 1.119 this.transformer = transformer;
5814     this.reducer = reducer;
5815     }
5816 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5817 dl 1.119 final Fun<? super V, ? extends U> transformer =
5818     this.transformer;
5819     final BiFun<? super U, ? super U, ? extends U> reducer =
5820     this.reducer;
5821     if (transformer == null || reducer == null)
5822 dl 1.128 return abortOnNullFunction();
5823     try {
5824     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5825     do {} while (!casPending(c = pending, c+1));
5826     (rights = new MapReduceValuesTask<K,V,U>
5827 dl 1.130 (map, this, b >>>= 1, rights, transformer, reducer)).fork();
5828 dl 1.128 }
5829     U r = null, u;
5830     Object v;
5831     while ((v = advance()) != null) {
5832     if ((u = transformer.apply((V)v)) != null)
5833     r = (r == null) ? u : reducer.apply(r, u);
5834 dl 1.119 }
5835 dl 1.128 result = r;
5836     for (MapReduceValuesTask<K,V,U> t = this, s;;) {
5837     int c; BulkTask<K,V,?> par; U tr, sr;
5838     if ((c = t.pending) == 0) {
5839     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5840     if ((sr = s.result) != null)
5841 jsr166 1.132 t.result = ((tr = t.result) == null) ? sr : reducer.apply(tr, sr);
5842 dl 1.128 }
5843     if ((par = t.parent) == null ||
5844     !(par instanceof MapReduceValuesTask)) {
5845     t.quietlyComplete();
5846     break;
5847     }
5848     t = (MapReduceValuesTask<K,V,U>)par;
5849     }
5850     else if (t.casPending(c, c - 1))
5851     break;
5852 dl 1.119 }
5853 dl 1.128 } catch (Throwable ex) {
5854     return tryCompleteComputation(ex);
5855 dl 1.119 }
5856 dl 1.137 for (MapReduceValuesTask<K,V,U> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
5857     s.exec();
5858 dl 1.128 return false;
5859 dl 1.119 }
5860     public final U getRawResult() { return result; }
5861 dl 1.4 }
5862    
5863 dl 1.128 @SuppressWarnings("serial") static final class MapReduceEntriesTask<K,V,U>
5864 dl 1.119 extends BulkTask<K,V,U> {
5865     final Fun<Map.Entry<K,V>, ? extends U> transformer;
5866     final BiFun<? super U, ? super U, ? extends U> reducer;
5867     U result;
5868 dl 1.128 MapReduceEntriesTask<K,V,U> rights, nextRight;
5869 dl 1.119 MapReduceEntriesTask
5870 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5871 dl 1.128 MapReduceEntriesTask<K,V,U> nextRight,
5872 dl 1.119 Fun<Map.Entry<K,V>, ? extends U> transformer,
5873     BiFun<? super U, ? super U, ? extends U> reducer) {
5874 dl 1.130 super(m, p, b); this.nextRight = nextRight;
5875 dl 1.119 this.transformer = transformer;
5876     this.reducer = reducer;
5877     }
5878 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5879 dl 1.119 final Fun<Map.Entry<K,V>, ? extends U> transformer =
5880     this.transformer;
5881     final BiFun<? super U, ? super U, ? extends U> reducer =
5882     this.reducer;
5883     if (transformer == null || reducer == null)
5884 dl 1.128 return abortOnNullFunction();
5885     try {
5886     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5887     do {} while (!casPending(c = pending, c+1));
5888     (rights = new MapReduceEntriesTask<K,V,U>
5889 dl 1.130 (map, this, b >>>= 1, rights, transformer, reducer)).fork();
5890 dl 1.128 }
5891     U r = null, u;
5892     Object v;
5893     while ((v = advance()) != null) {
5894     if ((u = transformer.apply(entryFor((K)nextKey, (V)v))) != null)
5895     r = (r == null) ? u : reducer.apply(r, u);
5896 dl 1.119 }
5897 dl 1.128 result = r;
5898     for (MapReduceEntriesTask<K,V,U> t = this, s;;) {
5899     int c; BulkTask<K,V,?> par; U tr, sr;
5900     if ((c = t.pending) == 0) {
5901     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5902     if ((sr = s.result) != null)
5903 jsr166 1.132 t.result = ((tr = t.result) == null) ? sr : reducer.apply(tr, sr);
5904 dl 1.128 }
5905     if ((par = t.parent) == null ||
5906     !(par instanceof MapReduceEntriesTask)) {
5907     t.quietlyComplete();
5908     break;
5909     }
5910     t = (MapReduceEntriesTask<K,V,U>)par;
5911     }
5912     else if (t.casPending(c, c - 1))
5913     break;
5914 dl 1.119 }
5915 dl 1.128 } catch (Throwable ex) {
5916     return tryCompleteComputation(ex);
5917 dl 1.119 }
5918 dl 1.137 for (MapReduceEntriesTask<K,V,U> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
5919     s.exec();
5920 dl 1.128 return false;
5921 dl 1.119 }
5922     public final U getRawResult() { return result; }
5923 dl 1.4 }
5924 tim 1.1
5925 dl 1.128 @SuppressWarnings("serial") static final class MapReduceMappingsTask<K,V,U>
5926 dl 1.119 extends BulkTask<K,V,U> {
5927     final BiFun<? super K, ? super V, ? extends U> transformer;
5928     final BiFun<? super U, ? super U, ? extends U> reducer;
5929     U result;
5930 dl 1.128 MapReduceMappingsTask<K,V,U> rights, nextRight;
5931 dl 1.119 MapReduceMappingsTask
5932 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5933 dl 1.128 MapReduceMappingsTask<K,V,U> nextRight,
5934 dl 1.119 BiFun<? super K, ? super V, ? extends U> transformer,
5935     BiFun<? super U, ? super U, ? extends U> reducer) {
5936 dl 1.130 super(m, p, b); this.nextRight = nextRight;
5937 dl 1.119 this.transformer = transformer;
5938     this.reducer = reducer;
5939     }
5940 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5941 dl 1.119 final BiFun<? super K, ? super V, ? extends U> transformer =
5942     this.transformer;
5943     final BiFun<? super U, ? super U, ? extends U> reducer =
5944     this.reducer;
5945     if (transformer == null || reducer == null)
5946 dl 1.128 return abortOnNullFunction();
5947     try {
5948     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5949     do {} while (!casPending(c = pending, c+1));
5950     (rights = new MapReduceMappingsTask<K,V,U>
5951 dl 1.130 (map, this, b >>>= 1, rights, transformer, reducer)).fork();
5952 dl 1.128 }
5953     U r = null, u;
5954     Object v;
5955     while ((v = advance()) != null) {
5956     if ((u = transformer.apply((K)nextKey, (V)v)) != null)
5957     r = (r == null) ? u : reducer.apply(r, u);
5958 dl 1.119 }
5959 dl 1.128 result = r;
5960     for (MapReduceMappingsTask<K,V,U> t = this, s;;) {
5961     int c; BulkTask<K,V,?> par; U tr, sr;
5962     if ((c = t.pending) == 0) {
5963     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5964     if ((sr = s.result) != null)
5965 jsr166 1.132 t.result = ((tr = t.result) == null) ? sr : reducer.apply(tr, sr);
5966 dl 1.128 }
5967     if ((par = t.parent) == null ||
5968     !(par instanceof MapReduceMappingsTask)) {
5969     t.quietlyComplete();
5970     break;
5971     }
5972     t = (MapReduceMappingsTask<K,V,U>)par;
5973     }
5974     else if (t.casPending(c, c - 1))
5975     break;
5976 dl 1.119 }
5977 dl 1.128 } catch (Throwable ex) {
5978     return tryCompleteComputation(ex);
5979 dl 1.119 }
5980 dl 1.137 for (MapReduceMappingsTask<K,V,U> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
5981     s.exec();
5982 dl 1.128 return false;
5983 dl 1.119 }
5984     public final U getRawResult() { return result; }
5985     }
5986 jsr166 1.114
5987 dl 1.128 @SuppressWarnings("serial") static final class MapReduceKeysToDoubleTask<K,V>
5988 dl 1.119 extends BulkTask<K,V,Double> {
5989     final ObjectToDouble<? super K> transformer;
5990     final DoubleByDoubleToDouble reducer;
5991     final double basis;
5992     double result;
5993 dl 1.128 MapReduceKeysToDoubleTask<K,V> rights, nextRight;
5994 dl 1.119 MapReduceKeysToDoubleTask
5995 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5996 dl 1.128 MapReduceKeysToDoubleTask<K,V> nextRight,
5997 dl 1.119 ObjectToDouble<? super K> transformer,
5998     double basis,
5999     DoubleByDoubleToDouble reducer) {
6000 dl 1.130 super(m, p, b); this.nextRight = nextRight;
6001 dl 1.119 this.transformer = transformer;
6002     this.basis = basis; this.reducer = reducer;
6003     }
6004 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
6005 dl 1.119 final ObjectToDouble<? super K> transformer =
6006     this.transformer;
6007     final DoubleByDoubleToDouble reducer = this.reducer;
6008     if (transformer == null || reducer == null)
6009 dl 1.128 return abortOnNullFunction();
6010     try {
6011     final double id = this.basis;
6012     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6013     do {} while (!casPending(c = pending, c+1));
6014     (rights = new MapReduceKeysToDoubleTask<K,V>
6015 dl 1.130 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6016 dl 1.128 }
6017     double r = id;
6018     while (advance() != null)
6019     r = reducer.apply(r, transformer.apply((K)nextKey));
6020     result = r;
6021     for (MapReduceKeysToDoubleTask<K,V> t = this, s;;) {
6022     int c; BulkTask<K,V,?> par;
6023     if ((c = t.pending) == 0) {
6024     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6025     t.result = reducer.apply(t.result, s.result);
6026     }
6027     if ((par = t.parent) == null ||
6028     !(par instanceof MapReduceKeysToDoubleTask)) {
6029     t.quietlyComplete();
6030     break;
6031     }
6032     t = (MapReduceKeysToDoubleTask<K,V>)par;
6033     }
6034     else if (t.casPending(c, c - 1))
6035     break;
6036 dl 1.119 }
6037 dl 1.128 } catch (Throwable ex) {
6038     return tryCompleteComputation(ex);
6039 dl 1.119 }
6040 dl 1.137 for (MapReduceKeysToDoubleTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
6041     s.exec();
6042 dl 1.128 return false;
6043 dl 1.79 }
6044 dl 1.119 public final Double getRawResult() { return result; }
6045     }
6046 dl 1.79
6047 dl 1.128 @SuppressWarnings("serial") static final class MapReduceValuesToDoubleTask<K,V>
6048 dl 1.119 extends BulkTask<K,V,Double> {
6049     final ObjectToDouble<? super V> transformer;
6050     final DoubleByDoubleToDouble reducer;
6051     final double basis;
6052     double result;
6053 dl 1.128 MapReduceValuesToDoubleTask<K,V> rights, nextRight;
6054 dl 1.119 MapReduceValuesToDoubleTask
6055 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6056 dl 1.128 MapReduceValuesToDoubleTask<K,V> nextRight,
6057 dl 1.119 ObjectToDouble<? super V> transformer,
6058     double basis,
6059     DoubleByDoubleToDouble reducer) {
6060 dl 1.130 super(m, p, b); this.nextRight = nextRight;
6061 dl 1.119 this.transformer = transformer;
6062     this.basis = basis; this.reducer = reducer;
6063     }
6064 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
6065 dl 1.119 final ObjectToDouble<? super V> transformer =
6066     this.transformer;
6067     final DoubleByDoubleToDouble reducer = this.reducer;
6068     if (transformer == null || reducer == null)
6069 dl 1.128 return abortOnNullFunction();
6070     try {
6071     final double id = this.basis;
6072     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6073     do {} while (!casPending(c = pending, c+1));
6074     (rights = new MapReduceValuesToDoubleTask<K,V>
6075 dl 1.130 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6076 dl 1.128 }
6077     double r = id;
6078     Object v;
6079     while ((v = advance()) != null)
6080     r = reducer.apply(r, transformer.apply((V)v));
6081     result = r;
6082     for (MapReduceValuesToDoubleTask<K,V> t = this, s;;) {
6083     int c; BulkTask<K,V,?> par;
6084     if ((c = t.pending) == 0) {
6085     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6086     t.result = reducer.apply(t.result, s.result);
6087     }
6088     if ((par = t.parent) == null ||
6089     !(par instanceof MapReduceValuesToDoubleTask)) {
6090     t.quietlyComplete();
6091     break;
6092     }
6093     t = (MapReduceValuesToDoubleTask<K,V>)par;
6094     }
6095     else if (t.casPending(c, c - 1))
6096     break;
6097 dl 1.119 }
6098 dl 1.128 } catch (Throwable ex) {
6099     return tryCompleteComputation(ex);
6100 dl 1.119 }
6101 dl 1.137 for (MapReduceValuesToDoubleTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
6102     s.exec();
6103 dl 1.128 return false;
6104 dl 1.30 }
6105 dl 1.119 public final Double getRawResult() { return result; }
6106 dl 1.79 }
6107 dl 1.30
6108 dl 1.128 @SuppressWarnings("serial") static final class MapReduceEntriesToDoubleTask<K,V>
6109 dl 1.119 extends BulkTask<K,V,Double> {
6110     final ObjectToDouble<Map.Entry<K,V>> transformer;
6111     final DoubleByDoubleToDouble reducer;
6112     final double basis;
6113     double result;
6114 dl 1.128 MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
6115 dl 1.119 MapReduceEntriesToDoubleTask
6116 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6117 dl 1.128 MapReduceEntriesToDoubleTask<K,V> nextRight,
6118 dl 1.119 ObjectToDouble<Map.Entry<K,V>> transformer,
6119     double basis,
6120     DoubleByDoubleToDouble reducer) {
6121 dl 1.130 super(m, p, b); this.nextRight = nextRight;
6122 dl 1.119 this.transformer = transformer;
6123     this.basis = basis; this.reducer = reducer;
6124     }
6125 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
6126 dl 1.119 final ObjectToDouble<Map.Entry<K,V>> transformer =
6127     this.transformer;
6128     final DoubleByDoubleToDouble reducer = this.reducer;
6129     if (transformer == null || reducer == null)
6130 dl 1.128 return abortOnNullFunction();
6131     try {
6132     final double id = this.basis;
6133     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6134     do {} while (!casPending(c = pending, c+1));
6135     (rights = new MapReduceEntriesToDoubleTask<K,V>
6136 dl 1.130 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6137 dl 1.128 }
6138     double r = id;
6139     Object v;
6140     while ((v = advance()) != null)
6141     r = reducer.apply(r, transformer.apply(entryFor((K)nextKey, (V)v)));
6142     result = r;
6143     for (MapReduceEntriesToDoubleTask<K,V> t = this, s;;) {
6144     int c; BulkTask<K,V,?> par;
6145     if ((c = t.pending) == 0) {
6146     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6147     t.result = reducer.apply(t.result, s.result);
6148     }
6149     if ((par = t.parent) == null ||
6150     !(par instanceof MapReduceEntriesToDoubleTask)) {
6151     t.quietlyComplete();
6152     break;
6153     }
6154     t = (MapReduceEntriesToDoubleTask<K,V>)par;
6155     }
6156     else if (t.casPending(c, c - 1))
6157     break;
6158 dl 1.119 }
6159 dl 1.128 } catch (Throwable ex) {
6160     return tryCompleteComputation(ex);
6161 dl 1.119 }
6162 dl 1.137 for (MapReduceEntriesToDoubleTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
6163     s.exec();
6164 dl 1.128 return false;
6165 dl 1.30 }
6166 dl 1.119 public final Double getRawResult() { return result; }
6167 tim 1.1 }
6168    
6169 dl 1.128 @SuppressWarnings("serial") static final class MapReduceMappingsToDoubleTask<K,V>
6170 dl 1.119 extends BulkTask<K,V,Double> {
6171     final ObjectByObjectToDouble<? super K, ? super V> transformer;
6172     final DoubleByDoubleToDouble reducer;
6173     final double basis;
6174     double result;
6175 dl 1.128 MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
6176 dl 1.119 MapReduceMappingsToDoubleTask
6177 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6178 dl 1.128 MapReduceMappingsToDoubleTask<K,V> nextRight,
6179 dl 1.119 ObjectByObjectToDouble<? super K, ? super V> transformer,
6180     double basis,
6181     DoubleByDoubleToDouble reducer) {
6182 dl 1.130 super(m, p, b); this.nextRight = nextRight;
6183 dl 1.119 this.transformer = transformer;
6184     this.basis = basis; this.reducer = reducer;
6185     }
6186 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
6187 dl 1.119 final ObjectByObjectToDouble<? super K, ? super V> transformer =
6188     this.transformer;
6189     final DoubleByDoubleToDouble reducer = this.reducer;
6190     if (transformer == null || reducer == null)
6191 dl 1.128 return abortOnNullFunction();
6192     try {
6193     final double id = this.basis;
6194     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6195     do {} while (!casPending(c = pending, c+1));
6196     (rights = new MapReduceMappingsToDoubleTask<K,V>
6197 dl 1.130 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6198 dl 1.128 }
6199     double r = id;
6200     Object v;
6201     while ((v = advance()) != null)
6202     r = reducer.apply(r, transformer.apply((K)nextKey, (V)v));
6203     result = r;
6204     for (MapReduceMappingsToDoubleTask<K,V> t = this, s;;) {
6205     int c; BulkTask<K,V,?> par;
6206     if ((c = t.pending) == 0) {
6207     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6208     t.result = reducer.apply(t.result, s.result);
6209     }
6210     if ((par = t.parent) == null ||
6211     !(par instanceof MapReduceMappingsToDoubleTask)) {
6212     t.quietlyComplete();
6213     break;
6214     }
6215     t = (MapReduceMappingsToDoubleTask<K,V>)par;
6216     }
6217     else if (t.casPending(c, c - 1))
6218     break;
6219 dl 1.119 }
6220 dl 1.128 } catch (Throwable ex) {
6221     return tryCompleteComputation(ex);
6222 dl 1.119 }
6223 dl 1.137 for (MapReduceMappingsToDoubleTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
6224     s.exec();
6225 dl 1.128 return false;
6226 dl 1.4 }
6227 dl 1.119 public final Double getRawResult() { return result; }
6228     }
6229    
6230 dl 1.128 @SuppressWarnings("serial") static final class MapReduceKeysToLongTask<K,V>
6231 dl 1.119 extends BulkTask<K,V,Long> {
6232     final ObjectToLong<? super K> transformer;
6233     final LongByLongToLong reducer;
6234     final long basis;
6235     long result;
6236 dl 1.128 MapReduceKeysToLongTask<K,V> rights, nextRight;
6237 dl 1.119 MapReduceKeysToLongTask
6238 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6239 dl 1.128 MapReduceKeysToLongTask<K,V> nextRight,
6240 dl 1.119 ObjectToLong<? super K> transformer,
6241     long basis,
6242     LongByLongToLong reducer) {
6243 dl 1.130 super(m, p, b); this.nextRight = nextRight;
6244 dl 1.119 this.transformer = transformer;
6245     this.basis = basis; this.reducer = reducer;
6246     }
6247 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
6248 dl 1.119 final ObjectToLong<? super K> transformer =
6249     this.transformer;
6250     final LongByLongToLong reducer = this.reducer;
6251     if (transformer == null || reducer == null)
6252 dl 1.128 return abortOnNullFunction();
6253     try {
6254     final long id = this.basis;
6255     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6256     do {} while (!casPending(c = pending, c+1));
6257     (rights = new MapReduceKeysToLongTask<K,V>
6258 dl 1.130 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6259 dl 1.128 }
6260     long r = id;
6261     while (advance() != null)
6262     r = reducer.apply(r, transformer.apply((K)nextKey));
6263     result = r;
6264     for (MapReduceKeysToLongTask<K,V> t = this, s;;) {
6265     int c; BulkTask<K,V,?> par;
6266     if ((c = t.pending) == 0) {
6267     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6268     t.result = reducer.apply(t.result, s.result);
6269     }
6270     if ((par = t.parent) == null ||
6271     !(par instanceof MapReduceKeysToLongTask)) {
6272     t.quietlyComplete();
6273     break;
6274     }
6275     t = (MapReduceKeysToLongTask<K,V>)par;
6276     }
6277     else if (t.casPending(c, c - 1))
6278     break;
6279 dl 1.119 }
6280 dl 1.128 } catch (Throwable ex) {
6281     return tryCompleteComputation(ex);
6282 dl 1.119 }
6283 dl 1.137 for (MapReduceKeysToLongTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
6284     s.exec();
6285 dl 1.128 return false;
6286 dl 1.4 }
6287 dl 1.119 public final Long getRawResult() { return result; }
6288     }
6289    
6290 dl 1.128 @SuppressWarnings("serial") static final class MapReduceValuesToLongTask<K,V>
6291 dl 1.119 extends BulkTask<K,V,Long> {
6292     final ObjectToLong<? super V> transformer;
6293     final LongByLongToLong reducer;
6294     final long basis;
6295     long result;
6296 dl 1.128 MapReduceValuesToLongTask<K,V> rights, nextRight;
6297 dl 1.119 MapReduceValuesToLongTask
6298 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6299 dl 1.128 MapReduceValuesToLongTask<K,V> nextRight,
6300 dl 1.119 ObjectToLong<? super V> transformer,
6301     long basis,
6302     LongByLongToLong reducer) {
6303 dl 1.130 super(m, p, b); this.nextRight = nextRight;
6304 dl 1.119 this.transformer = transformer;
6305     this.basis = basis; this.reducer = reducer;
6306     }
6307 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
6308 dl 1.119 final ObjectToLong<? super V> transformer =
6309     this.transformer;
6310     final LongByLongToLong reducer = this.reducer;
6311     if (transformer == null || reducer == null)
6312 dl 1.128 return abortOnNullFunction();
6313     try {
6314     final long id = this.basis;
6315     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6316     do {} while (!casPending(c = pending, c+1));
6317     (rights = new MapReduceValuesToLongTask<K,V>
6318 dl 1.130 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6319 dl 1.128 }
6320     long r = id;
6321     Object v;
6322     while ((v = advance()) != null)
6323     r = reducer.apply(r, transformer.apply((V)v));
6324     result = r;
6325     for (MapReduceValuesToLongTask<K,V> t = this, s;;) {
6326     int c; BulkTask<K,V,?> par;
6327     if ((c = t.pending) == 0) {
6328     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6329     t.result = reducer.apply(t.result, s.result);
6330     }
6331     if ((par = t.parent) == null ||
6332     !(par instanceof MapReduceValuesToLongTask)) {
6333     t.quietlyComplete();
6334     break;
6335     }
6336     t = (MapReduceValuesToLongTask<K,V>)par;
6337     }
6338     else if (t.casPending(c, c - 1))
6339     break;
6340 dl 1.119 }
6341 dl 1.128 } catch (Throwable ex) {
6342     return tryCompleteComputation(ex);
6343 dl 1.119 }
6344 dl 1.137 for (MapReduceValuesToLongTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
6345     s.exec();
6346 dl 1.128 return false;
6347 jsr166 1.95 }
6348 dl 1.119 public final Long getRawResult() { return result; }
6349     }
6350    
6351 dl 1.128 @SuppressWarnings("serial") static final class MapReduceEntriesToLongTask<K,V>
6352 dl 1.119 extends BulkTask<K,V,Long> {
6353     final ObjectToLong<Map.Entry<K,V>> transformer;
6354     final LongByLongToLong reducer;
6355     final long basis;
6356     long result;
6357 dl 1.128 MapReduceEntriesToLongTask<K,V> rights, nextRight;
6358 dl 1.119 MapReduceEntriesToLongTask
6359 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6360 dl 1.128 MapReduceEntriesToLongTask<K,V> nextRight,
6361 dl 1.119 ObjectToLong<Map.Entry<K,V>> transformer,
6362     long basis,
6363     LongByLongToLong reducer) {
6364 dl 1.130 super(m, p, b); this.nextRight = nextRight;
6365 dl 1.119 this.transformer = transformer;
6366     this.basis = basis; this.reducer = reducer;
6367     }
6368 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
6369 dl 1.119 final ObjectToLong<Map.Entry<K,V>> transformer =
6370     this.transformer;
6371     final LongByLongToLong reducer = this.reducer;
6372     if (transformer == null || reducer == null)
6373 dl 1.128 return abortOnNullFunction();
6374     try {
6375     final long id = this.basis;
6376     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6377     do {} while (!casPending(c = pending, c+1));
6378     (rights = new MapReduceEntriesToLongTask<K,V>
6379 dl 1.130 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6380 dl 1.128 }
6381     long r = id;
6382     Object v;
6383     while ((v = advance()) != null)
6384     r = reducer.apply(r, transformer.apply(entryFor((K)nextKey, (V)v)));
6385     result = r;
6386     for (MapReduceEntriesToLongTask<K,V> t = this, s;;) {
6387     int c; BulkTask<K,V,?> par;
6388     if ((c = t.pending) == 0) {
6389     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6390     t.result = reducer.apply(t.result, s.result);
6391     }
6392     if ((par = t.parent) == null ||
6393     !(par instanceof MapReduceEntriesToLongTask)) {
6394     t.quietlyComplete();
6395     break;
6396     }
6397     t = (MapReduceEntriesToLongTask<K,V>)par;
6398     }
6399     else if (t.casPending(c, c - 1))
6400     break;
6401 dl 1.119 }
6402 dl 1.128 } catch (Throwable ex) {
6403     return tryCompleteComputation(ex);
6404 dl 1.119 }
6405 dl 1.137 for (MapReduceEntriesToLongTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
6406     s.exec();
6407 dl 1.128 return false;
6408 dl 1.4 }
6409 dl 1.119 public final Long getRawResult() { return result; }
6410 tim 1.1 }
6411    
6412 dl 1.128 @SuppressWarnings("serial") static final class MapReduceMappingsToLongTask<K,V>
6413 dl 1.119 extends BulkTask<K,V,Long> {
6414     final ObjectByObjectToLong<? super K, ? super V> transformer;
6415     final LongByLongToLong reducer;
6416     final long basis;
6417     long result;
6418 dl 1.128 MapReduceMappingsToLongTask<K,V> rights, nextRight;
6419 dl 1.119 MapReduceMappingsToLongTask
6420 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6421 dl 1.128 MapReduceMappingsToLongTask<K,V> nextRight,
6422 dl 1.119 ObjectByObjectToLong<? super K, ? super V> transformer,
6423     long basis,
6424     LongByLongToLong reducer) {
6425 dl 1.130 super(m, p, b); this.nextRight = nextRight;
6426 dl 1.119 this.transformer = transformer;
6427     this.basis = basis; this.reducer = reducer;
6428     }
6429 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
6430 dl 1.119 final ObjectByObjectToLong<? super K, ? super V> transformer =
6431     this.transformer;
6432     final LongByLongToLong reducer = this.reducer;
6433     if (transformer == null || reducer == null)
6434 dl 1.128 return abortOnNullFunction();
6435     try {
6436     final long id = this.basis;
6437     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6438     do {} while (!casPending(c = pending, c+1));
6439     (rights = new MapReduceMappingsToLongTask<K,V>
6440 dl 1.130 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6441 dl 1.128 }
6442     long r = id;
6443     Object v;
6444     while ((v = advance()) != null)
6445     r = reducer.apply(r, transformer.apply((K)nextKey, (V)v));
6446     result = r;
6447     for (MapReduceMappingsToLongTask<K,V> t = this, s;;) {
6448     int c; BulkTask<K,V,?> par;
6449     if ((c = t.pending) == 0) {
6450     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6451     t.result = reducer.apply(t.result, s.result);
6452     }
6453     if ((par = t.parent) == null ||
6454     !(par instanceof MapReduceMappingsToLongTask)) {
6455     t.quietlyComplete();
6456     break;
6457     }
6458     t = (MapReduceMappingsToLongTask<K,V>)par;
6459     }
6460     else if (t.casPending(c, c - 1))
6461     break;
6462 dl 1.119 }
6463 dl 1.128 } catch (Throwable ex) {
6464     return tryCompleteComputation(ex);
6465 dl 1.119 }
6466 dl 1.137 for (MapReduceMappingsToLongTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
6467     s.exec();
6468 dl 1.128 return false;
6469 dl 1.4 }
6470 dl 1.119 public final Long getRawResult() { return result; }
6471 tim 1.1 }
6472    
6473 dl 1.128 @SuppressWarnings("serial") static final class MapReduceKeysToIntTask<K,V>
6474 dl 1.119 extends BulkTask<K,V,Integer> {
6475     final ObjectToInt<? super K> transformer;
6476     final IntByIntToInt reducer;
6477     final int basis;
6478     int result;
6479 dl 1.128 MapReduceKeysToIntTask<K,V> rights, nextRight;
6480 dl 1.119 MapReduceKeysToIntTask
6481 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6482 dl 1.128 MapReduceKeysToIntTask<K,V> nextRight,
6483 dl 1.119 ObjectToInt<? super K> transformer,
6484     int basis,
6485     IntByIntToInt reducer) {
6486 dl 1.130 super(m, p, b); this.nextRight = nextRight;
6487 dl 1.119 this.transformer = transformer;
6488     this.basis = basis; this.reducer = reducer;
6489     }
6490 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
6491 dl 1.119 final ObjectToInt<? super K> transformer =
6492     this.transformer;
6493     final IntByIntToInt reducer = this.reducer;
6494     if (transformer == null || reducer == null)
6495 dl 1.128 return abortOnNullFunction();
6496     try {
6497     final int id = this.basis;
6498     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6499     do {} while (!casPending(c = pending, c+1));
6500     (rights = new MapReduceKeysToIntTask<K,V>
6501 dl 1.130 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6502 dl 1.128 }
6503     int r = id;
6504     while (advance() != null)
6505     r = reducer.apply(r, transformer.apply((K)nextKey));
6506     result = r;
6507     for (MapReduceKeysToIntTask<K,V> t = this, s;;) {
6508     int c; BulkTask<K,V,?> par;
6509     if ((c = t.pending) == 0) {
6510     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6511     t.result = reducer.apply(t.result, s.result);
6512     }
6513     if ((par = t.parent) == null ||
6514     !(par instanceof MapReduceKeysToIntTask)) {
6515     t.quietlyComplete();
6516     break;
6517     }
6518     t = (MapReduceKeysToIntTask<K,V>)par;
6519     }
6520     else if (t.casPending(c, c - 1))
6521     break;
6522 dl 1.119 }
6523 dl 1.128 } catch (Throwable ex) {
6524     return tryCompleteComputation(ex);
6525 dl 1.119 }
6526 dl 1.137 for (MapReduceKeysToIntTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
6527     s.exec();
6528 dl 1.128 return false;
6529 dl 1.30 }
6530 dl 1.119 public final Integer getRawResult() { return result; }
6531 dl 1.30 }
6532    
6533 dl 1.128 @SuppressWarnings("serial") static final class MapReduceValuesToIntTask<K,V>
6534 dl 1.119 extends BulkTask<K,V,Integer> {
6535     final ObjectToInt<? super V> transformer;
6536     final IntByIntToInt reducer;
6537     final int basis;
6538     int result;
6539 dl 1.128 MapReduceValuesToIntTask<K,V> rights, nextRight;
6540 dl 1.119 MapReduceValuesToIntTask
6541 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6542 dl 1.128 MapReduceValuesToIntTask<K,V> nextRight,
6543 dl 1.119 ObjectToInt<? super V> transformer,
6544     int basis,
6545     IntByIntToInt reducer) {
6546 dl 1.130 super(m, p, b); this.nextRight = nextRight;
6547 dl 1.119 this.transformer = transformer;
6548     this.basis = basis; this.reducer = reducer;
6549     }
6550 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
6551 dl 1.119 final ObjectToInt<? super V> transformer =
6552     this.transformer;
6553     final IntByIntToInt reducer = this.reducer;
6554     if (transformer == null || reducer == null)
6555 dl 1.128 return abortOnNullFunction();
6556     try {
6557     final int id = this.basis;
6558     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6559     do {} while (!casPending(c = pending, c+1));
6560     (rights = new MapReduceValuesToIntTask<K,V>
6561 dl 1.130 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6562 dl 1.128 }
6563     int r = id;
6564     Object v;
6565     while ((v = advance()) != null)
6566     r = reducer.apply(r, transformer.apply((V)v));
6567     result = r;
6568     for (MapReduceValuesToIntTask<K,V> t = this, s;;) {
6569     int c; BulkTask<K,V,?> par;
6570     if ((c = t.pending) == 0) {
6571     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6572     t.result = reducer.apply(t.result, s.result);
6573     }
6574     if ((par = t.parent) == null ||
6575     !(par instanceof MapReduceValuesToIntTask)) {
6576     t.quietlyComplete();
6577     break;
6578     }
6579     t = (MapReduceValuesToIntTask<K,V>)par;
6580     }
6581     else if (t.casPending(c, c - 1))
6582     break;
6583 dl 1.119 }
6584 dl 1.128 } catch (Throwable ex) {
6585     return tryCompleteComputation(ex);
6586 dl 1.2 }
6587 dl 1.137 for (MapReduceValuesToIntTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
6588     s.exec();
6589 dl 1.128 return false;
6590 tim 1.1 }
6591 dl 1.119 public final Integer getRawResult() { return result; }
6592 tim 1.1 }
6593    
6594 dl 1.128 @SuppressWarnings("serial") static final class MapReduceEntriesToIntTask<K,V>
6595 dl 1.119 extends BulkTask<K,V,Integer> {
6596     final ObjectToInt<Map.Entry<K,V>> transformer;
6597     final IntByIntToInt reducer;
6598     final int basis;
6599     int result;
6600 dl 1.128 MapReduceEntriesToIntTask<K,V> rights, nextRight;
6601 dl 1.119 MapReduceEntriesToIntTask
6602 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6603 dl 1.128 MapReduceEntriesToIntTask<K,V> nextRight,
6604 dl 1.119 ObjectToInt<Map.Entry<K,V>> transformer,
6605     int basis,
6606     IntByIntToInt reducer) {
6607 dl 1.130 super(m, p, b); this.nextRight = nextRight;
6608 dl 1.119 this.transformer = transformer;
6609     this.basis = basis; this.reducer = reducer;
6610     }
6611 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
6612 dl 1.119 final ObjectToInt<Map.Entry<K,V>> transformer =
6613     this.transformer;
6614     final IntByIntToInt reducer = this.reducer;
6615     if (transformer == null || reducer == null)
6616 dl 1.128 return abortOnNullFunction();
6617     try {
6618     final int id = this.basis;
6619     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6620     do {} while (!casPending(c = pending, c+1));
6621     (rights = new MapReduceEntriesToIntTask<K,V>
6622 dl 1.130 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6623 dl 1.128 }
6624     int r = id;
6625     Object v;
6626     while ((v = advance()) != null)
6627     r = reducer.apply(r, transformer.apply(entryFor((K)nextKey, (V)v)));
6628     result = r;
6629     for (MapReduceEntriesToIntTask<K,V> t = this, s;;) {
6630     int c; BulkTask<K,V,?> par;
6631     if ((c = t.pending) == 0) {
6632     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6633     t.result = reducer.apply(t.result, s.result);
6634     }
6635     if ((par = t.parent) == null ||
6636     !(par instanceof MapReduceEntriesToIntTask)) {
6637     t.quietlyComplete();
6638     break;
6639     }
6640     t = (MapReduceEntriesToIntTask<K,V>)par;
6641     }
6642     else if (t.casPending(c, c - 1))
6643     break;
6644 dl 1.119 }
6645 dl 1.128 } catch (Throwable ex) {
6646     return tryCompleteComputation(ex);
6647 dl 1.99 }
6648 dl 1.137 for (MapReduceEntriesToIntTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
6649     s.exec();
6650 dl 1.128 return false;
6651 dl 1.4 }
6652 dl 1.119 public final Integer getRawResult() { return result; }
6653     }
6654 tim 1.1
6655 dl 1.128 @SuppressWarnings("serial") static final class MapReduceMappingsToIntTask<K,V>
6656 dl 1.119 extends BulkTask<K,V,Integer> {
6657     final ObjectByObjectToInt<? super K, ? super V> transformer;
6658     final IntByIntToInt reducer;
6659     final int basis;
6660     int result;
6661 dl 1.128 MapReduceMappingsToIntTask<K,V> rights, nextRight;
6662 dl 1.119 MapReduceMappingsToIntTask
6663 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6664     MapReduceMappingsToIntTask<K,V> rights,
6665 dl 1.119 ObjectByObjectToInt<? super K, ? super V> transformer,
6666     int basis,
6667     IntByIntToInt reducer) {
6668 dl 1.130 super(m, p, b); this.nextRight = nextRight;
6669 dl 1.119 this.transformer = transformer;
6670     this.basis = basis; this.reducer = reducer;
6671     }
6672 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
6673 dl 1.119 final ObjectByObjectToInt<? super K, ? super V> transformer =
6674     this.transformer;
6675     final IntByIntToInt reducer = this.reducer;
6676     if (transformer == null || reducer == null)
6677 dl 1.128 return abortOnNullFunction();
6678     try {
6679     final int id = this.basis;
6680     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6681     do {} while (!casPending(c = pending, c+1));
6682     (rights = new MapReduceMappingsToIntTask<K,V>
6683 dl 1.130 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6684 dl 1.128 }
6685     int r = id;
6686     Object v;
6687     while ((v = advance()) != null)
6688     r = reducer.apply(r, transformer.apply((K)nextKey, (V)v));
6689     result = r;
6690     for (MapReduceMappingsToIntTask<K,V> t = this, s;;) {
6691     int c; BulkTask<K,V,?> par;
6692     if ((c = t.pending) == 0) {
6693     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6694     t.result = reducer.apply(t.result, s.result);
6695     }
6696     if ((par = t.parent) == null ||
6697     !(par instanceof MapReduceMappingsToIntTask)) {
6698     t.quietlyComplete();
6699     break;
6700     }
6701     t = (MapReduceMappingsToIntTask<K,V>)par;
6702     }
6703     else if (t.casPending(c, c - 1))
6704     break;
6705 dl 1.119 }
6706 dl 1.128 } catch (Throwable ex) {
6707     return tryCompleteComputation(ex);
6708 dl 1.119 }
6709 dl 1.137 for (MapReduceMappingsToIntTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
6710     s.exec();
6711 dl 1.128 return false;
6712 tim 1.1 }
6713 dl 1.119 public final Integer getRawResult() { return result; }
6714 tim 1.1 }
6715 dl 1.99
6716     // Unsafe mechanics
6717     private static final sun.misc.Unsafe UNSAFE;
6718 dl 1.119 private static final long counterOffset;
6719     private static final long sizeCtlOffset;
6720     private static final long ABASE;
6721     private static final int ASHIFT;
6722 dl 1.99
6723     static {
6724 dl 1.119 int ss;
6725 dl 1.99 try {
6726 dl 1.126 UNSAFE = sun.misc.Unsafe.getUnsafe();
6727 dl 1.119 Class<?> k = ConcurrentHashMap.class;
6728     counterOffset = UNSAFE.objectFieldOffset
6729     (k.getDeclaredField("counter"));
6730     sizeCtlOffset = UNSAFE.objectFieldOffset
6731     (k.getDeclaredField("sizeCtl"));
6732     Class<?> sc = Node[].class;
6733     ABASE = UNSAFE.arrayBaseOffset(sc);
6734 dl 1.99 ss = UNSAFE.arrayIndexScale(sc);
6735     } catch (Exception e) {
6736     throw new Error(e);
6737     }
6738 dl 1.119 if ((ss & (ss-1)) != 0)
6739 dl 1.99 throw new Error("data type scale not a power of two");
6740 dl 1.119 ASHIFT = 31 - Integer.numberOfLeadingZeros(ss);
6741 dl 1.99 }
6742 dl 1.128 }