ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/main/java/util/concurrent/ConcurrentHashMap.java
Revision: 1.140
Committed: Tue Oct 30 16:46:09 2012 UTC (11 years, 7 months ago) by jsr166
Branch: MAIN
Changes since 1.139: +1 -1 lines
Log Message:
typo

File Contents

# User Rev Content
1 dl 1.2 /*
2     * Written by Doug Lea with assistance from members of JCP JSR-166
3 dl 1.36 * Expert Group and released to the public domain, as explained at
4 dl 1.100 * http://creativecommons.org/publicdomain/zero/1.0/
5 dl 1.2 */
6    
7 tim 1.1 package java.util.concurrent;
8 dl 1.119 import java.util.concurrent.atomic.LongAdder;
9     import java.util.concurrent.ForkJoinPool;
10     import java.util.concurrent.ForkJoinTask;
11    
12     import java.util.Comparator;
13     import java.util.Arrays;
14     import java.util.Map;
15     import java.util.Set;
16     import java.util.Collection;
17     import java.util.AbstractMap;
18     import java.util.AbstractSet;
19     import java.util.AbstractCollection;
20     import java.util.Hashtable;
21     import java.util.HashMap;
22     import java.util.Iterator;
23     import java.util.Enumeration;
24     import java.util.ConcurrentModificationException;
25     import java.util.NoSuchElementException;
26     import java.util.concurrent.ConcurrentMap;
27     import java.util.concurrent.ThreadLocalRandom;
28     import java.util.concurrent.locks.LockSupport;
29     import java.util.concurrent.locks.AbstractQueuedSynchronizer;
30     import java.util.concurrent.atomic.AtomicReference;
31    
32 tim 1.1 import java.io.Serializable;
33    
34     /**
35 dl 1.4 * A hash table supporting full concurrency of retrievals and
36 dl 1.119 * high expected concurrency for updates. This class obeys the
37 dl 1.22 * same functional specification as {@link java.util.Hashtable}, and
38 dl 1.19 * includes versions of methods corresponding to each method of
39 dl 1.119 * {@code Hashtable}. However, even though all operations are
40 dl 1.19 * thread-safe, retrieval operations do <em>not</em> entail locking,
41     * and there is <em>not</em> any support for locking the entire table
42     * in a way that prevents all access. This class is fully
43 dl 1.119 * interoperable with {@code Hashtable} in programs that rely on its
44 dl 1.4 * thread safety but not on its synchronization details.
45 tim 1.11 *
46 dl 1.119 * <p> Retrieval operations (including {@code get}) generally do not
47     * block, so may overlap with update operations (including {@code put}
48     * and {@code remove}). Retrievals reflect the results of the most
49     * recently <em>completed</em> update operations holding upon their
50 dl 1.126 * onset. (More formally, an update operation for a given key bears a
51     * <em>happens-before</em> relation with any (non-null) retrieval for
52     * that key reporting the updated value.) For aggregate operations
53     * such as {@code putAll} and {@code clear}, concurrent retrievals may
54     * reflect insertion or removal of only some entries. Similarly,
55     * Iterators and Enumerations return elements reflecting the state of
56     * the hash table at some point at or since the creation of the
57     * iterator/enumeration. They do <em>not</em> throw {@link
58     * ConcurrentModificationException}. However, iterators are designed
59     * to be used by only one thread at a time. Bear in mind that the
60     * results of aggregate status methods including {@code size}, {@code
61     * isEmpty}, and {@code containsValue} are typically useful only when
62     * a map is not undergoing concurrent updates in other threads.
63     * Otherwise the results of these methods reflect transient states
64     * that may be adequate for monitoring or estimation purposes, but not
65     * for program control.
66 tim 1.1 *
67 dl 1.119 * <p> The table is dynamically expanded when there are too many
68     * collisions (i.e., keys that have distinct hash codes but fall into
69     * the same slot modulo the table size), with the expected average
70     * effect of maintaining roughly two bins per mapping (corresponding
71     * to a 0.75 load factor threshold for resizing). There may be much
72     * variance around this average as mappings are added and removed, but
73     * overall, this maintains a commonly accepted time/space tradeoff for
74     * hash tables. However, resizing this or any other kind of hash
75     * table may be a relatively slow operation. When possible, it is a
76     * good idea to provide a size estimate as an optional {@code
77     * initialCapacity} constructor argument. An additional optional
78     * {@code loadFactor} constructor argument provides a further means of
79     * customizing initial table capacity by specifying the table density
80     * to be used in calculating the amount of space to allocate for the
81     * given number of elements. Also, for compatibility with previous
82     * versions of this class, constructors may optionally specify an
83     * expected {@code concurrencyLevel} as an additional hint for
84     * internal sizing. Note that using many keys with exactly the same
85     * {@code hashCode()} is a sure way to slow down performance of any
86     * hash table.
87 tim 1.1 *
88 dl 1.137 * <p> A {@link Set} projection of a ConcurrentHashMap may be created
89     * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
90     * (using {@link #keySet(Object)} when only keys are of interest, and the
91     * mapped values are (perhaps transiently) not used or all take the
92     * same mapping value.
93     *
94     * <p> A ConcurrentHashMap can be used as scalable frequency map (a
95     * form of histogram or multiset) by using {@link LongAdder} values
96     * and initializing via {@link #computeIfAbsent}. For example, to add
97     * a count to a {@code ConcurrentHashMap<String,LongAdder> freqs}, you
98     * can use {@code freqs.computeIfAbsent(k -> new
99     * LongAdder()).increment();}
100     *
101 dl 1.45 * <p>This class and its views and iterators implement all of the
102     * <em>optional</em> methods of the {@link Map} and {@link Iterator}
103     * interfaces.
104 dl 1.23 *
105 jsr166 1.68 * <p> Like {@link Hashtable} but unlike {@link HashMap}, this class
106 dl 1.119 * does <em>not</em> allow {@code null} to be used as a key or value.
107 tim 1.1 *
108 dl 1.137 * <p>ConcurrentHashMaps support parallel operations using the {@link
109 jsr166 1.140 * ForkJoinPool#commonPool}. (Tasks that may be used in other contexts
110 dl 1.137 * are available in class {@link ForkJoinTasks}). These operations are
111     * designed to be safely, and often sensibly, applied even with maps
112     * that are being concurrently updated by other threads; for example,
113     * when computing a snapshot summary of the values in a shared
114     * registry. There are three kinds of operation, each with four
115     * forms, accepting functions with Keys, Values, Entries, and (Key,
116     * Value) arguments and/or return values. Because the elements of a
117     * ConcurrentHashMap are not ordered in any particular way, and may be
118     * processed in different orders in different parallel executions, the
119     * correctness of supplied functions should not depend on any
120     * ordering, or on any other objects or values that may transiently
121     * change while computation is in progress; and except for forEach
122     * actions, should ideally be side-effect-free.
123     *
124     * <ul>
125     * <li> forEach: Perform a given action on each element.
126     * A variant form applies a given transformation on each element
127     * before performing the action.</li>
128     *
129     * <li> search: Return the first available non-null result of
130     * applying a given function on each element; skipping further
131     * search when a result is found.</li>
132     *
133     * <li> reduce: Accumulate each element. The supplied reduction
134     * function cannot rely on ordering (more formally, it should be
135     * both associative and commutative). There are five variants:
136     *
137     * <ul>
138     *
139     * <li> Plain reductions. (There is not a form of this method for
140     * (key, value) function arguments since there is no corresponding
141     * return type.)</li>
142     *
143     * <li> Mapped reductions that accumulate the results of a given
144     * function applied to each element.</li>
145     *
146     * <li> Reductions to scalar doubles, longs, and ints, using a
147     * given basis value.</li>
148     *
149     * </li>
150     * </ul>
151     * </ul>
152     *
153     * <p>The concurrency properties of bulk operations follow
154     * from those of ConcurrentHashMap: Any non-null result returned
155     * from {@code get(key)} and related access methods bears a
156     * happens-before relation with the associated insertion or
157     * update. The result of any bulk operation reflects the
158     * composition of these per-element relations (but is not
159     * necessarily atomic with respect to the map as a whole unless it
160     * is somehow known to be quiescent). Conversely, because keys
161     * and values in the map are never null, null serves as a reliable
162     * atomic indicator of the current lack of any result. To
163     * maintain this property, null serves as an implicit basis for
164     * all non-scalar reduction operations. For the double, long, and
165     * int versions, the basis should be one that, when combined with
166     * any other value, returns that other value (more formally, it
167     * should be the identity element for the reduction). Most common
168     * reductions have these properties; for example, computing a sum
169     * with basis 0 or a minimum with basis MAX_VALUE.
170     *
171     * <p>Search and transformation functions provided as arguments
172     * should similarly return null to indicate the lack of any result
173     * (in which case it is not used). In the case of mapped
174     * reductions, this also enables transformations to serve as
175     * filters, returning null (or, in the case of primitive
176     * specializations, the identity basis) if the element should not
177     * be combined. You can create compound transformations and
178     * filterings by composing them yourself under this "null means
179     * there is nothing there now" rule before using them in search or
180     * reduce operations.
181     *
182     * <p>Methods accepting and/or returning Entry arguments maintain
183     * key-value associations. They may be useful for example when
184     * finding the key for the greatest value. Note that "plain" Entry
185     * arguments can be supplied using {@code new
186     * AbstractMap.SimpleEntry(k,v)}.
187     *
188     * <p> Bulk operations may complete abruptly, throwing an
189     * exception encountered in the application of a supplied
190     * function. Bear in mind when handling such exceptions that other
191     * concurrently executing functions could also have thrown
192     * exceptions, or would have done so if the first exception had
193     * not occurred.
194     *
195     * <p>Parallel speedups for bulk operations compared to sequential
196     * processing are common but not guaranteed. Operations involving
197     * brief functions on small maps may execute more slowly than
198     * sequential loops if the underlying work to parallelize the
199     * computation is more expensive than the computation
200     * itself. Similarly, parallelization may not lead to much actual
201     * parallelism if all processors are busy performing unrelated tasks.
202     *
203     * <p> All arguments to all task methods must be non-null.
204     *
205     * <p><em>jsr166e note: During transition, this class
206     * uses nested functional interfaces with different names but the
207     * same forms as those expected for JDK8.<em>
208     *
209 dl 1.42 * <p>This class is a member of the
210 jsr166 1.88 * <a href="{@docRoot}/../technotes/guides/collections/index.html">
211 dl 1.42 * Java Collections Framework</a>.
212     *
213 dl 1.8 * @since 1.5
214     * @author Doug Lea
215 dl 1.27 * @param <K> the type of keys maintained by this map
216 jsr166 1.64 * @param <V> the type of mapped values
217 dl 1.8 */
218 dl 1.119 public class ConcurrentHashMap<K, V>
219     implements ConcurrentMap<K, V>, Serializable {
220 dl 1.20 private static final long serialVersionUID = 7249069246763182397L;
221 tim 1.1
222 dl 1.119 /**
223     * A partitionable iterator. A Spliterator can be traversed
224     * directly, but can also be partitioned (before traversal) by
225     * creating another Spliterator that covers a non-overlapping
226     * portion of the elements, and so may be amenable to parallel
227     * execution.
228     *
229     * <p> This interface exports a subset of expected JDK8
230     * functionality.
231     *
232     * <p>Sample usage: Here is one (of the several) ways to compute
233     * the sum of the values held in a map using the ForkJoin
234     * framework. As illustrated here, Spliterators are well suited to
235     * designs in which a task repeatedly splits off half its work
236     * into forked subtasks until small enough to process directly,
237     * and then joins these subtasks. Variants of this style can also
238     * be used in completion-based designs.
239     *
240     * <pre>
241     * {@code ConcurrentHashMap<String, Long> m = ...
242     * // split as if have 8 * parallelism, for load balance
243     * int n = m.size();
244     * int p = aForkJoinPool.getParallelism() * 8;
245     * int split = (n < p)? n : p;
246     * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), split, null));
247     * // ...
248     * static class SumValues extends RecursiveTask<Long> {
249     * final Spliterator<Long> s;
250     * final int split; // split while > 1
251     * final SumValues nextJoin; // records forked subtasks to join
252     * SumValues(Spliterator<Long> s, int depth, SumValues nextJoin) {
253     * this.s = s; this.depth = depth; this.nextJoin = nextJoin;
254     * }
255     * public Long compute() {
256     * long sum = 0;
257     * SumValues subtasks = null; // fork subtasks
258     * for (int s = split >>> 1; s > 0; s >>>= 1)
259     * (subtasks = new SumValues(s.split(), s, subtasks)).fork();
260     * while (s.hasNext()) // directly process remaining elements
261     * sum += s.next();
262     * for (SumValues t = subtasks; t != null; t = t.nextJoin)
263     * sum += t.join(); // collect subtask results
264     * return sum;
265     * }
266     * }
267     * }</pre>
268     */
269     public static interface Spliterator<T> extends Iterator<T> {
270     /**
271     * Returns a Spliterator covering approximately half of the
272     * elements, guaranteed not to overlap with those subsequently
273     * returned by this Spliterator. After invoking this method,
274     * the current Spliterator will <em>not</em> produce any of
275     * the elements of the returned Spliterator, but the two
276     * Spliterators together will produce all of the elements that
277     * would have been produced by this Spliterator had this
278     * method not been called. The exact number of elements
279     * produced by the returned Spliterator is not guaranteed, and
280     * may be zero (i.e., with {@code hasNext()} reporting {@code
281     * false}) if this Spliterator cannot be further split.
282     *
283     * @return a Spliterator covering approximately half of the
284     * elements
285     * @throws IllegalStateException if this Spliterator has
286     * already commenced traversing elements
287     */
288     Spliterator<T> split();
289     }
290    
291 dl 1.137 /**
292     * A view of a ConcurrentHashMap as a {@link Set} of keys, in
293     * which additions may optionally be enabled by mapping to a
294     * common value. This class cannot be directly instantiated. See
295     * {@link #keySet}, {@link #keySet(Object)}, {@link #newKeySet()},
296     * {@link #newKeySet(int)}.
297     *
298     * <p>The view's {@code iterator} is a "weakly consistent" iterator
299     * that will never throw {@link ConcurrentModificationException},
300     * and guarantees to traverse elements as they existed upon
301     * construction of the iterator, and may (but is not guaranteed to)
302     * reflect any modifications subsequent to construction.
303     */
304     public static class KeySetView<K,V> extends CHMView<K,V> implements Set<K>, java.io.Serializable {
305     private static final long serialVersionUID = 7249069246763182397L;
306     private final V value;
307     KeySetView(ConcurrentHashMap<K, V> map, V value) { // non-public
308     super(map);
309     this.value = value;
310     }
311    
312     /**
313     * Returns the map backing this view.
314     *
315     * @return the map backing this view
316     */
317     public ConcurrentHashMap<K,V> getMap() { return map; }
318    
319     /**
320     * Returns the default mapped value for additions,
321     * or {@code null} if additions are not supported.
322     *
323     * @return the default mapped value for additions, or {@code null}
324     * if not supported.
325     */
326     public V getMappedValue() { return value; }
327    
328     // implement Set API
329    
330     public boolean contains(Object o) { return map.containsKey(o); }
331     public boolean remove(Object o) { return map.remove(o) != null; }
332     public Iterator<K> iterator() { return new KeyIterator<K,V>(map); }
333     public boolean add(K e) {
334     V v;
335     if ((v = value) == null)
336     throw new UnsupportedOperationException();
337     if (e == null)
338     throw new NullPointerException();
339     return map.internalPutIfAbsent(e, v) == null;
340     }
341     public boolean addAll(Collection<? extends K> c) {
342     boolean added = false;
343     V v;
344     if ((v = value) == null)
345     throw new UnsupportedOperationException();
346     for (K e : c) {
347     if (e == null)
348     throw new NullPointerException();
349     if (map.internalPutIfAbsent(e, v) == null)
350     added = true;
351     }
352     return added;
353     }
354     public boolean equals(Object o) {
355     Set<?> c;
356     return ((o instanceof Set) &&
357     ((c = (Set<?>)o) == this ||
358     (containsAll(c) && c.containsAll(this))));
359     }
360     }
361    
362 tim 1.1 /*
363 dl 1.119 * Overview:
364     *
365     * The primary design goal of this hash table is to maintain
366     * concurrent readability (typically method get(), but also
367     * iterators and related methods) while minimizing update
368     * contention. Secondary goals are to keep space consumption about
369     * the same or better than java.util.HashMap, and to support high
370     * initial insertion rates on an empty table by many threads.
371     *
372     * Each key-value mapping is held in a Node. Because Node fields
373     * can contain special values, they are defined using plain Object
374     * types. Similarly in turn, all internal methods that use them
375     * work off Object types. And similarly, so do the internal
376     * methods of auxiliary iterator and view classes. All public
377     * generic typed methods relay in/out of these internal methods,
378     * supplying null-checks and casts as needed. This also allows
379     * many of the public methods to be factored into a smaller number
380     * of internal methods (although sadly not so for the five
381     * variants of put-related operations). The validation-based
382     * approach explained below leads to a lot of code sprawl because
383     * retry-control precludes factoring into smaller methods.
384     *
385     * The table is lazily initialized to a power-of-two size upon the
386     * first insertion. Each bin in the table normally contains a
387     * list of Nodes (most often, the list has only zero or one Node).
388     * Table accesses require volatile/atomic reads, writes, and
389     * CASes. Because there is no other way to arrange this without
390     * adding further indirections, we use intrinsics
391     * (sun.misc.Unsafe) operations. The lists of nodes within bins
392     * are always accurately traversable under volatile reads, so long
393     * as lookups check hash code and non-nullness of value before
394     * checking key equality.
395     *
396     * We use the top two bits of Node hash fields for control
397     * purposes -- they are available anyway because of addressing
398     * constraints. As explained further below, these top bits are
399     * used as follows:
400     * 00 - Normal
401     * 01 - Locked
402     * 11 - Locked and may have a thread waiting for lock
403     * 10 - Node is a forwarding node
404     *
405     * The lower 30 bits of each Node's hash field contain a
406     * transformation of the key's hash code, except for forwarding
407     * nodes, for which the lower bits are zero (and so always have
408     * hash field == MOVED).
409     *
410     * Insertion (via put or its variants) of the first node in an
411     * empty bin is performed by just CASing it to the bin. This is
412     * by far the most common case for put operations under most
413     * key/hash distributions. Other update operations (insert,
414     * delete, and replace) require locks. We do not want to waste
415     * the space required to associate a distinct lock object with
416     * each bin, so instead use the first node of a bin list itself as
417     * a lock. Blocking support for these locks relies on the builtin
418     * "synchronized" monitors. However, we also need a tryLock
419     * construction, so we overlay these by using bits of the Node
420     * hash field for lock control (see above), and so normally use
421     * builtin monitors only for blocking and signalling using
422     * wait/notifyAll constructions. See Node.tryAwaitLock.
423     *
424     * Using the first node of a list as a lock does not by itself
425     * suffice though: When a node is locked, any update must first
426     * validate that it is still the first node after locking it, and
427     * retry if not. Because new nodes are always appended to lists,
428     * once a node is first in a bin, it remains first until deleted
429     * or the bin becomes invalidated (upon resizing). However,
430     * operations that only conditionally update may inspect nodes
431     * until the point of update. This is a converse of sorts to the
432     * lazy locking technique described by Herlihy & Shavit.
433     *
434     * The main disadvantage of per-bin locks is that other update
435     * operations on other nodes in a bin list protected by the same
436     * lock can stall, for example when user equals() or mapping
437     * functions take a long time. However, statistically, under
438     * random hash codes, this is not a common problem. Ideally, the
439     * frequency of nodes in bins follows a Poisson distribution
440     * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
441     * parameter of about 0.5 on average, given the resizing threshold
442     * of 0.75, although with a large variance because of resizing
443     * granularity. Ignoring variance, the expected occurrences of
444     * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
445     * first values are:
446     *
447     * 0: 0.60653066
448     * 1: 0.30326533
449     * 2: 0.07581633
450     * 3: 0.01263606
451     * 4: 0.00157952
452     * 5: 0.00015795
453     * 6: 0.00001316
454     * 7: 0.00000094
455     * 8: 0.00000006
456     * more: less than 1 in ten million
457     *
458     * Lock contention probability for two threads accessing distinct
459     * elements is roughly 1 / (8 * #elements) under random hashes.
460     *
461     * Actual hash code distributions encountered in practice
462     * sometimes deviate significantly from uniform randomness. This
463     * includes the case when N > (1<<30), so some keys MUST collide.
464     * Similarly for dumb or hostile usages in which multiple keys are
465     * designed to have identical hash codes. Also, although we guard
466     * against the worst effects of this (see method spread), sets of
467     * hashes may differ only in bits that do not impact their bin
468     * index for a given power-of-two mask. So we use a secondary
469     * strategy that applies when the number of nodes in a bin exceeds
470     * a threshold, and at least one of the keys implements
471     * Comparable. These TreeBins use a balanced tree to hold nodes
472     * (a specialized form of red-black trees), bounding search time
473     * to O(log N). Each search step in a TreeBin is around twice as
474     * slow as in a regular list, but given that N cannot exceed
475     * (1<<64) (before running out of addresses) this bounds search
476     * steps, lock hold times, etc, to reasonable constants (roughly
477     * 100 nodes inspected per operation worst case) so long as keys
478     * are Comparable (which is very common -- String, Long, etc).
479     * TreeBin nodes (TreeNodes) also maintain the same "next"
480     * traversal pointers as regular nodes, so can be traversed in
481     * iterators in the same way.
482     *
483     * The table is resized when occupancy exceeds a percentage
484     * threshold (nominally, 0.75, but see below). Only a single
485     * thread performs the resize (using field "sizeCtl", to arrange
486     * exclusion), but the table otherwise remains usable for reads
487     * and updates. Resizing proceeds by transferring bins, one by
488     * one, from the table to the next table. Because we are using
489     * power-of-two expansion, the elements from each bin must either
490     * stay at same index, or move with a power of two offset. We
491     * eliminate unnecessary node creation by catching cases where old
492     * nodes can be reused because their next fields won't change. On
493     * average, only about one-sixth of them need cloning when a table
494     * doubles. The nodes they replace will be garbage collectable as
495     * soon as they are no longer referenced by any reader thread that
496     * may be in the midst of concurrently traversing table. Upon
497     * transfer, the old table bin contains only a special forwarding
498     * node (with hash field "MOVED") that contains the next table as
499     * its key. On encountering a forwarding node, access and update
500     * operations restart, using the new table.
501     *
502     * Each bin transfer requires its bin lock. However, unlike other
503     * cases, a transfer can skip a bin if it fails to acquire its
504     * lock, and revisit it later (unless it is a TreeBin). Method
505     * rebuild maintains a buffer of TRANSFER_BUFFER_SIZE bins that
506     * have been skipped because of failure to acquire a lock, and
507     * blocks only if none are available (i.e., only very rarely).
508     * The transfer operation must also ensure that all accessible
509     * bins in both the old and new table are usable by any traversal.
510     * When there are no lock acquisition failures, this is arranged
511     * simply by proceeding from the last bin (table.length - 1) up
512     * towards the first. Upon seeing a forwarding node, traversals
513     * (see class Iter) arrange to move to the new table
514     * without revisiting nodes. However, when any node is skipped
515     * during a transfer, all earlier table bins may have become
516     * visible, so are initialized with a reverse-forwarding node back
517     * to the old table until the new ones are established. (This
518     * sometimes requires transiently locking a forwarding node, which
519     * is possible under the above encoding.) These more expensive
520     * mechanics trigger only when necessary.
521     *
522     * The traversal scheme also applies to partial traversals of
523     * ranges of bins (via an alternate Traverser constructor)
524     * to support partitioned aggregate operations. Also, read-only
525     * operations give up if ever forwarded to a null table, which
526     * provides support for shutdown-style clearing, which is also not
527     * currently implemented.
528     *
529     * Lazy table initialization minimizes footprint until first use,
530     * and also avoids resizings when the first operation is from a
531     * putAll, constructor with map argument, or deserialization.
532     * These cases attempt to override the initial capacity settings,
533     * but harmlessly fail to take effect in cases of races.
534     *
535     * The element count is maintained using a LongAdder, which avoids
536     * contention on updates but can encounter cache thrashing if read
537     * too frequently during concurrent access. To avoid reading so
538     * often, resizing is attempted either when a bin lock is
539     * contended, or upon adding to a bin already holding two or more
540     * nodes (checked before adding in the xIfAbsent methods, after
541     * adding in others). Under uniform hash distributions, the
542     * probability of this occurring at threshold is around 13%,
543     * meaning that only about 1 in 8 puts check threshold (and after
544     * resizing, many fewer do so). But this approximation has high
545     * variance for small table sizes, so we check on any collision
546     * for sizes <= 64. The bulk putAll operation further reduces
547     * contention by only committing count updates upon these size
548     * checks.
549     *
550     * Maintaining API and serialization compatibility with previous
551     * versions of this class introduces several oddities. Mainly: We
552     * leave untouched but unused constructor arguments refering to
553     * concurrencyLevel. We accept a loadFactor constructor argument,
554     * but apply it only to initial table capacity (which is the only
555     * time that we can guarantee to honor it.) We also declare an
556     * unused "Segment" class that is instantiated in minimal form
557     * only when serializing.
558 dl 1.4 */
559 tim 1.1
560 dl 1.4 /* ---------------- Constants -------------- */
561 tim 1.11
562 dl 1.4 /**
563 dl 1.119 * The largest possible table capacity. This value must be
564     * exactly 1<<30 to stay within Java array allocation and indexing
565     * bounds for power of two table sizes, and is further required
566     * because the top two bits of 32bit hash fields are used for
567     * control purposes.
568 dl 1.4 */
569 dl 1.119 private static final int MAXIMUM_CAPACITY = 1 << 30;
570 dl 1.56
571     /**
572 dl 1.119 * The default initial table capacity. Must be a power of 2
573     * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
574 dl 1.56 */
575 dl 1.119 private static final int DEFAULT_CAPACITY = 16;
576 dl 1.56
577     /**
578 dl 1.119 * The largest possible (non-power of two) array size.
579     * Needed by toArray and related methods.
580 jsr166 1.59 */
581 dl 1.119 static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
582 tim 1.1
583     /**
584 dl 1.119 * The default concurrency level for this table. Unused but
585     * defined for compatibility with previous versions of this class.
586 dl 1.4 */
587 dl 1.119 private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
588 tim 1.11
589 tim 1.1 /**
590 dl 1.119 * The load factor for this table. Overrides of this value in
591     * constructors affect only the initial table capacity. The
592     * actual floating point value isn't normally used -- it is
593     * simpler to use expressions such as {@code n - (n >>> 2)} for
594     * the associated resizing threshold.
595 dl 1.99 */
596 dl 1.119 private static final float LOAD_FACTOR = 0.75f;
597 dl 1.99
598     /**
599 dl 1.119 * The buffer size for skipped bins during transfers. The
600     * value is arbitrary but should be large enough to avoid
601     * most locking stalls during resizes.
602 dl 1.21 */
603 dl 1.119 private static final int TRANSFER_BUFFER_SIZE = 32;
604 dl 1.21
605 dl 1.46 /**
606 dl 1.119 * The bin count threshold for using a tree rather than list for a
607     * bin. The value reflects the approximate break-even point for
608     * using tree-based operations.
609     */
610     private static final int TREE_THRESHOLD = 8;
611    
612     /*
613     * Encodings for special uses of Node hash fields. See above for
614     * explanation.
615 dl 1.46 */
616 dl 1.119 static final int MOVED = 0x80000000; // hash field for forwarding nodes
617     static final int LOCKED = 0x40000000; // set/tested only as a bit
618     static final int WAITING = 0xc0000000; // both bits set/tested together
619     static final int HASH_BITS = 0x3fffffff; // usable bits of normal node hash
620 dl 1.46
621 dl 1.4 /* ---------------- Fields -------------- */
622 tim 1.1
623     /**
624 dl 1.119 * The array of bins. Lazily initialized upon first insertion.
625     * Size is always a power of two. Accessed directly by iterators.
626 jsr166 1.59 */
627 dl 1.119 transient volatile Node[] table;
628 tim 1.1
629     /**
630 dl 1.119 * The counter maintaining number of elements.
631 jsr166 1.59 */
632 dl 1.119 private transient final LongAdder counter;
633 tim 1.1
634     /**
635 dl 1.119 * Table initialization and resizing control. When negative, the
636     * table is being initialized or resized. Otherwise, when table is
637     * null, holds the initial table size to use upon creation, or 0
638     * for default. After initialization, holds the next element count
639     * value upon which to resize the table.
640 tim 1.1 */
641 dl 1.119 private transient volatile int sizeCtl;
642 dl 1.4
643 dl 1.119 // views
644 dl 1.137 private transient KeySetView<K,V> keySet;
645 dl 1.119 private transient Values<K,V> values;
646     private transient EntrySet<K,V> entrySet;
647    
648     /** For serialization compatibility. Null unless serialized; see below */
649     private Segment<K,V>[] segments;
650    
651     /* ---------------- Table element access -------------- */
652    
653     /*
654     * Volatile access methods are used for table elements as well as
655     * elements of in-progress next table while resizing. Uses are
656     * null checked by callers, and implicitly bounds-checked, relying
657     * on the invariants that tab arrays have non-zero size, and all
658     * indices are masked with (tab.length - 1) which is never
659     * negative and always less than length. Note that, to be correct
660     * wrt arbitrary concurrency errors by users, bounds checks must
661     * operate on local variables, which accounts for some odd-looking
662     * inline assignments below.
663     */
664    
665     static final Node tabAt(Node[] tab, int i) { // used by Iter
666     return (Node)UNSAFE.getObjectVolatile(tab, ((long)i<<ASHIFT)+ABASE);
667     }
668    
669     private static final boolean casTabAt(Node[] tab, int i, Node c, Node v) {
670     return UNSAFE.compareAndSwapObject(tab, ((long)i<<ASHIFT)+ABASE, c, v);
671     }
672    
673     private static final void setTabAt(Node[] tab, int i, Node v) {
674     UNSAFE.putObjectVolatile(tab, ((long)i<<ASHIFT)+ABASE, v);
675     }
676    
677     /* ---------------- Nodes -------------- */
678 dl 1.4
679 dl 1.99 /**
680 dl 1.119 * Key-value entry. Note that this is never exported out as a
681     * user-visible Map.Entry (see MapEntry below). Nodes with a hash
682     * field of MOVED are special, and do not contain user keys or
683     * values. Otherwise, keys are never null, and null val fields
684     * indicate that a node is in the process of being deleted or
685     * created. For purposes of read-only access, a key may be read
686     * before a val, but can only be used after checking val to be
687     * non-null.
688 dl 1.99 */
689 dl 1.119 static class Node {
690     volatile int hash;
691     final Object key;
692     volatile Object val;
693     volatile Node next;
694 dl 1.99
695 dl 1.119 Node(int hash, Object key, Object val, Node next) {
696 dl 1.99 this.hash = hash;
697     this.key = key;
698 dl 1.119 this.val = val;
699 dl 1.99 this.next = next;
700     }
701    
702 dl 1.119 /** CompareAndSet the hash field */
703     final boolean casHash(int cmp, int val) {
704     return UNSAFE.compareAndSwapInt(this, hashOffset, cmp, val);
705     }
706    
707     /** The number of spins before blocking for a lock */
708     static final int MAX_SPINS =
709     Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1;
710    
711 dl 1.99 /**
712 dl 1.119 * Spins a while if LOCKED bit set and this node is the first
713     * of its bin, and then sets WAITING bits on hash field and
714     * blocks (once) if they are still set. It is OK for this
715     * method to return even if lock is not available upon exit,
716     * which enables these simple single-wait mechanics.
717     *
718     * The corresponding signalling operation is performed within
719     * callers: Upon detecting that WAITING has been set when
720     * unlocking lock (via a failed CAS from non-waiting LOCKED
721     * state), unlockers acquire the sync lock and perform a
722     * notifyAll.
723 dl 1.128 *
724     * The initial sanity check on tab and bounds is not currently
725     * necessary in the only usages of this method, but enables
726     * use in other future contexts.
727 dl 1.99 */
728 dl 1.119 final void tryAwaitLock(Node[] tab, int i) {
729 dl 1.128 if (tab != null && i >= 0 && i < tab.length) { // sanity check
730 dl 1.119 int r = ThreadLocalRandom.current().nextInt(); // randomize spins
731     int spins = MAX_SPINS, h;
732     while (tabAt(tab, i) == this && ((h = hash) & LOCKED) != 0) {
733     if (spins >= 0) {
734     r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift
735     if (r >= 0 && --spins == 0)
736     Thread.yield(); // yield before block
737     }
738     else if (casHash(h, h | WAITING)) {
739     synchronized (this) {
740     if (tabAt(tab, i) == this &&
741     (hash & WAITING) == WAITING) {
742     try {
743     wait();
744     } catch (InterruptedException ie) {
745     Thread.currentThread().interrupt();
746     }
747     }
748     else
749     notifyAll(); // possibly won race vs signaller
750     }
751     break;
752     }
753     }
754     }
755 dl 1.99 }
756    
757 dl 1.119 // Unsafe mechanics for casHash
758     private static final sun.misc.Unsafe UNSAFE;
759     private static final long hashOffset;
760    
761 dl 1.99 static {
762     try {
763     UNSAFE = sun.misc.Unsafe.getUnsafe();
764 dl 1.119 Class<?> k = Node.class;
765     hashOffset = UNSAFE.objectFieldOffset
766     (k.getDeclaredField("hash"));
767 dl 1.99 } catch (Exception e) {
768     throw new Error(e);
769     }
770     }
771     }
772    
773 dl 1.119 /* ---------------- TreeBins -------------- */
774    
775 dl 1.99 /**
776 dl 1.119 * Nodes for use in TreeBins
777 dl 1.99 */
778 dl 1.119 static final class TreeNode extends Node {
779     TreeNode parent; // red-black tree links
780     TreeNode left;
781     TreeNode right;
782     TreeNode prev; // needed to unlink next upon deletion
783     boolean red;
784 dl 1.99
785 dl 1.119 TreeNode(int hash, Object key, Object val, Node next, TreeNode parent) {
786     super(hash, key, val, next);
787     this.parent = parent;
788     }
789 dl 1.99 }
790 tim 1.1
791     /**
792 dl 1.119 * A specialized form of red-black tree for use in bins
793     * whose size exceeds a threshold.
794     *
795     * TreeBins use a special form of comparison for search and
796     * related operations (which is the main reason we cannot use
797     * existing collections such as TreeMaps). TreeBins contain
798     * Comparable elements, but may contain others, as well as
799     * elements that are Comparable but not necessarily Comparable<T>
800     * for the same T, so we cannot invoke compareTo among them. To
801     * handle this, the tree is ordered primarily by hash value, then
802     * by getClass().getName() order, and then by Comparator order
803     * among elements of the same class. On lookup at a node, if
804     * elements are not comparable or compare as 0, both left and
805     * right children may need to be searched in the case of tied hash
806     * values. (This corresponds to the full list search that would be
807     * necessary if all elements were non-Comparable and had tied
808     * hashes.) The red-black balancing code is updated from
809     * pre-jdk-collections
810     * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
811     * based in turn on Cormen, Leiserson, and Rivest "Introduction to
812     * Algorithms" (CLR).
813     *
814     * TreeBins also maintain a separate locking discipline than
815     * regular bins. Because they are forwarded via special MOVED
816     * nodes at bin heads (which can never change once established),
817     * we cannot use those nodes as locks. Instead, TreeBin
818     * extends AbstractQueuedSynchronizer to support a simple form of
819     * read-write lock. For update operations and table validation,
820     * the exclusive form of lock behaves in the same way as bin-head
821     * locks. However, lookups use shared read-lock mechanics to allow
822     * multiple readers in the absence of writers. Additionally,
823     * these lookups do not ever block: While the lock is not
824     * available, they proceed along the slow traversal path (via
825     * next-pointers) until the lock becomes available or the list is
826     * exhausted, whichever comes first. (These cases are not fast,
827     * but maximize aggregate expected throughput.) The AQS mechanics
828     * for doing this are straightforward. The lock state is held as
829     * AQS getState(). Read counts are negative; the write count (1)
830     * is positive. There are no signalling preferences among readers
831     * and writers. Since we don't need to export full Lock API, we
832     * just override the minimal AQS methods and use them directly.
833     */
834     static final class TreeBin extends AbstractQueuedSynchronizer {
835 dl 1.24 private static final long serialVersionUID = 2249069246763182397L;
836 dl 1.119 transient TreeNode root; // root of tree
837     transient TreeNode first; // head of next-pointer list
838 dl 1.24
839 dl 1.119 /* AQS overrides */
840     public final boolean isHeldExclusively() { return getState() > 0; }
841     public final boolean tryAcquire(int ignore) {
842     if (compareAndSetState(0, 1)) {
843     setExclusiveOwnerThread(Thread.currentThread());
844     return true;
845     }
846     return false;
847     }
848     public final boolean tryRelease(int ignore) {
849     setExclusiveOwnerThread(null);
850     setState(0);
851     return true;
852     }
853     public final int tryAcquireShared(int ignore) {
854     for (int c;;) {
855     if ((c = getState()) > 0)
856     return -1;
857     if (compareAndSetState(c, c -1))
858     return 1;
859     }
860     }
861     public final boolean tryReleaseShared(int ignore) {
862     int c;
863     do {} while (!compareAndSetState(c = getState(), c + 1));
864     return c == -1;
865     }
866    
867     /** From CLR */
868     private void rotateLeft(TreeNode p) {
869     if (p != null) {
870     TreeNode r = p.right, pp, rl;
871     if ((rl = p.right = r.left) != null)
872     rl.parent = p;
873     if ((pp = r.parent = p.parent) == null)
874     root = r;
875     else if (pp.left == p)
876     pp.left = r;
877     else
878     pp.right = r;
879     r.left = p;
880     p.parent = r;
881     }
882     }
883 dl 1.4
884 dl 1.119 /** From CLR */
885     private void rotateRight(TreeNode p) {
886     if (p != null) {
887     TreeNode l = p.left, pp, lr;
888     if ((lr = p.left = l.right) != null)
889     lr.parent = p;
890     if ((pp = l.parent = p.parent) == null)
891     root = l;
892     else if (pp.right == p)
893     pp.right = l;
894     else
895     pp.left = l;
896     l.right = p;
897     p.parent = l;
898     }
899     }
900 dl 1.4
901     /**
902 jsr166 1.123 * Returns the TreeNode (or null if not found) for the given key
903 dl 1.119 * starting at given root.
904 dl 1.4 */
905 dl 1.128 @SuppressWarnings("unchecked") final TreeNode getTreeNode
906     (int h, Object k, TreeNode p) {
907 dl 1.119 Class<?> c = k.getClass();
908     while (p != null) {
909     int dir, ph; Object pk; Class<?> pc;
910     if ((ph = p.hash) == h) {
911     if ((pk = p.key) == k || k.equals(pk))
912     return p;
913     if (c != (pc = pk.getClass()) ||
914     !(k instanceof Comparable) ||
915     (dir = ((Comparable)k).compareTo((Comparable)pk)) == 0) {
916     dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName());
917     TreeNode r = null, s = null, pl, pr;
918     if (dir >= 0) {
919     if ((pl = p.left) != null && h <= pl.hash)
920     s = pl;
921 dl 1.99 }
922 dl 1.119 else if ((pr = p.right) != null && h >= pr.hash)
923     s = pr;
924     if (s != null && (r = getTreeNode(h, k, s)) != null)
925     return r;
926 dl 1.45 }
927     }
928 dl 1.119 else
929     dir = (h < ph) ? -1 : 1;
930     p = (dir > 0) ? p.right : p.left;
931 dl 1.33 }
932 dl 1.119 return null;
933 dl 1.33 }
934    
935 dl 1.99 /**
936 dl 1.119 * Wrapper for getTreeNode used by CHM.get. Tries to obtain
937     * read-lock to call getTreeNode, but during failure to get
938     * lock, searches along next links.
939 dl 1.99 */
940 dl 1.119 final Object getValue(int h, Object k) {
941     Node r = null;
942     int c = getState(); // Must read lock state first
943     for (Node e = first; e != null; e = e.next) {
944     if (c <= 0 && compareAndSetState(c, c - 1)) {
945     try {
946     r = getTreeNode(h, k, root);
947     } finally {
948     releaseShared(0);
949 dl 1.99 }
950     break;
951     }
952 dl 1.119 else if ((e.hash & HASH_BITS) == h && k.equals(e.key)) {
953     r = e;
954 dl 1.99 break;
955     }
956 dl 1.119 else
957     c = getState();
958 dl 1.99 }
959 dl 1.119 return r == null ? null : r.val;
960 dl 1.99 }
961    
962     /**
963 dl 1.119 * Finds or adds a node.
964     * @return null if added
965 dl 1.6 */
966 dl 1.128 @SuppressWarnings("unchecked") final TreeNode putTreeNode
967     (int h, Object k, Object v) {
968 dl 1.119 Class<?> c = k.getClass();
969     TreeNode pp = root, p = null;
970     int dir = 0;
971     while (pp != null) { // find existing node or leaf to insert at
972     int ph; Object pk; Class<?> pc;
973     p = pp;
974     if ((ph = p.hash) == h) {
975     if ((pk = p.key) == k || k.equals(pk))
976     return p;
977     if (c != (pc = pk.getClass()) ||
978     !(k instanceof Comparable) ||
979     (dir = ((Comparable)k).compareTo((Comparable)pk)) == 0) {
980     dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName());
981     TreeNode r = null, s = null, pl, pr;
982     if (dir >= 0) {
983     if ((pl = p.left) != null && h <= pl.hash)
984     s = pl;
985 dl 1.99 }
986 dl 1.119 else if ((pr = p.right) != null && h >= pr.hash)
987     s = pr;
988     if (s != null && (r = getTreeNode(h, k, s)) != null)
989     return r;
990 dl 1.99 }
991     }
992 dl 1.119 else
993     dir = (h < ph) ? -1 : 1;
994     pp = (dir > 0) ? p.right : p.left;
995 dl 1.99 }
996    
997 dl 1.119 TreeNode f = first;
998     TreeNode x = first = new TreeNode(h, k, v, f, p);
999     if (p == null)
1000     root = x;
1001     else { // attach and rebalance; adapted from CLR
1002     TreeNode xp, xpp;
1003     if (f != null)
1004     f.prev = x;
1005     if (dir <= 0)
1006     p.left = x;
1007     else
1008     p.right = x;
1009     x.red = true;
1010     while (x != null && (xp = x.parent) != null && xp.red &&
1011     (xpp = xp.parent) != null) {
1012     TreeNode xppl = xpp.left;
1013     if (xp == xppl) {
1014     TreeNode y = xpp.right;
1015     if (y != null && y.red) {
1016     y.red = false;
1017     xp.red = false;
1018     xpp.red = true;
1019     x = xpp;
1020     }
1021     else {
1022     if (x == xp.right) {
1023     rotateLeft(x = xp);
1024     xpp = (xp = x.parent) == null ? null : xp.parent;
1025     }
1026     if (xp != null) {
1027     xp.red = false;
1028     if (xpp != null) {
1029     xpp.red = true;
1030     rotateRight(xpp);
1031     }
1032     }
1033     }
1034     }
1035     else {
1036     TreeNode y = xppl;
1037     if (y != null && y.red) {
1038     y.red = false;
1039     xp.red = false;
1040     xpp.red = true;
1041     x = xpp;
1042     }
1043     else {
1044     if (x == xp.left) {
1045     rotateRight(x = xp);
1046     xpp = (xp = x.parent) == null ? null : xp.parent;
1047     }
1048     if (xp != null) {
1049     xp.red = false;
1050     if (xpp != null) {
1051     xpp.red = true;
1052     rotateLeft(xpp);
1053     }
1054     }
1055 dl 1.99 }
1056     }
1057     }
1058 dl 1.119 TreeNode r = root;
1059     if (r != null && r.red)
1060     r.red = false;
1061 dl 1.99 }
1062 dl 1.119 return null;
1063 dl 1.99 }
1064 dl 1.45
1065 dl 1.119 /**
1066     * Removes the given node, that must be present before this
1067     * call. This is messier than typical red-black deletion code
1068     * because we cannot swap the contents of an interior node
1069     * with a leaf successor that is pinned by "next" pointers
1070     * that are accessible independently of lock. So instead we
1071     * swap the tree linkages.
1072     */
1073     final void deleteTreeNode(TreeNode p) {
1074     TreeNode next = (TreeNode)p.next; // unlink traversal pointers
1075     TreeNode pred = p.prev;
1076     if (pred == null)
1077     first = next;
1078     else
1079     pred.next = next;
1080     if (next != null)
1081     next.prev = pred;
1082     TreeNode replacement;
1083     TreeNode pl = p.left;
1084     TreeNode pr = p.right;
1085     if (pl != null && pr != null) {
1086     TreeNode s = pr, sl;
1087     while ((sl = s.left) != null) // find successor
1088     s = sl;
1089     boolean c = s.red; s.red = p.red; p.red = c; // swap colors
1090     TreeNode sr = s.right;
1091     TreeNode pp = p.parent;
1092     if (s == pr) { // p was s's direct parent
1093     p.parent = s;
1094     s.right = p;
1095     }
1096     else {
1097     TreeNode sp = s.parent;
1098     if ((p.parent = sp) != null) {
1099     if (s == sp.left)
1100     sp.left = p;
1101     else
1102     sp.right = p;
1103 dl 1.45 }
1104 dl 1.119 if ((s.right = pr) != null)
1105     pr.parent = s;
1106 dl 1.4 }
1107 dl 1.119 p.left = null;
1108     if ((p.right = sr) != null)
1109     sr.parent = p;
1110     if ((s.left = pl) != null)
1111     pl.parent = s;
1112     if ((s.parent = pp) == null)
1113     root = s;
1114     else if (p == pp.left)
1115     pp.left = s;
1116     else
1117     pp.right = s;
1118     replacement = sr;
1119     }
1120     else
1121     replacement = (pl != null) ? pl : pr;
1122     TreeNode pp = p.parent;
1123     if (replacement == null) {
1124     if (pp == null) {
1125     root = null;
1126     return;
1127     }
1128     replacement = p;
1129 dl 1.99 }
1130 dl 1.119 else {
1131     replacement.parent = pp;
1132     if (pp == null)
1133     root = replacement;
1134     else if (p == pp.left)
1135     pp.left = replacement;
1136     else
1137     pp.right = replacement;
1138     p.left = p.right = p.parent = null;
1139 dl 1.4 }
1140 dl 1.119 if (!p.red) { // rebalance, from CLR
1141     TreeNode x = replacement;
1142     while (x != null) {
1143     TreeNode xp, xpl;
1144     if (x.red || (xp = x.parent) == null) {
1145     x.red = false;
1146 dl 1.99 break;
1147 dl 1.119 }
1148     if (x == (xpl = xp.left)) {
1149     TreeNode sib = xp.right;
1150     if (sib != null && sib.red) {
1151     sib.red = false;
1152     xp.red = true;
1153     rotateLeft(xp);
1154     sib = (xp = x.parent) == null ? null : xp.right;
1155     }
1156     if (sib == null)
1157     x = xp;
1158     else {
1159     TreeNode sl = sib.left, sr = sib.right;
1160     if ((sr == null || !sr.red) &&
1161     (sl == null || !sl.red)) {
1162     sib.red = true;
1163     x = xp;
1164     }
1165     else {
1166     if (sr == null || !sr.red) {
1167     if (sl != null)
1168     sl.red = false;
1169     sib.red = true;
1170     rotateRight(sib);
1171     sib = (xp = x.parent) == null ? null : xp.right;
1172     }
1173     if (sib != null) {
1174     sib.red = (xp == null) ? false : xp.red;
1175     if ((sr = sib.right) != null)
1176     sr.red = false;
1177     }
1178     if (xp != null) {
1179     xp.red = false;
1180     rotateLeft(xp);
1181     }
1182     x = root;
1183     }
1184     }
1185     }
1186     else { // symmetric
1187     TreeNode sib = xpl;
1188     if (sib != null && sib.red) {
1189     sib.red = false;
1190     xp.red = true;
1191     rotateRight(xp);
1192     sib = (xp = x.parent) == null ? null : xp.left;
1193     }
1194     if (sib == null)
1195     x = xp;
1196     else {
1197     TreeNode sl = sib.left, sr = sib.right;
1198     if ((sl == null || !sl.red) &&
1199     (sr == null || !sr.red)) {
1200     sib.red = true;
1201     x = xp;
1202     }
1203     else {
1204     if (sl == null || !sl.red) {
1205     if (sr != null)
1206     sr.red = false;
1207     sib.red = true;
1208     rotateLeft(sib);
1209     sib = (xp = x.parent) == null ? null : xp.left;
1210     }
1211     if (sib != null) {
1212     sib.red = (xp == null) ? false : xp.red;
1213     if ((sl = sib.left) != null)
1214     sl.red = false;
1215     }
1216     if (xp != null) {
1217     xp.red = false;
1218     rotateRight(xp);
1219     }
1220     x = root;
1221     }
1222     }
1223     }
1224 dl 1.45 }
1225 dl 1.4 }
1226 dl 1.119 if (p == replacement && (pp = p.parent) != null) {
1227     if (p == pp.left) // detach pointers
1228     pp.left = null;
1229     else if (p == pp.right)
1230     pp.right = null;
1231     p.parent = null;
1232     }
1233 dl 1.4 }
1234 tim 1.1 }
1235    
1236 dl 1.119 /* ---------------- Collision reduction methods -------------- */
1237 tim 1.1
1238 dl 1.99 /**
1239 dl 1.119 * Spreads higher bits to lower, and also forces top 2 bits to 0.
1240     * Because the table uses power-of-two masking, sets of hashes
1241     * that vary only in bits above the current mask will always
1242     * collide. (Among known examples are sets of Float keys holding
1243     * consecutive whole numbers in small tables.) To counter this,
1244     * we apply a transform that spreads the impact of higher bits
1245     * downward. There is a tradeoff between speed, utility, and
1246     * quality of bit-spreading. Because many common sets of hashes
1247     * are already reasonably distributed across bits (so don't benefit
1248     * from spreading), and because we use trees to handle large sets
1249     * of collisions in bins, we don't need excessively high quality.
1250     */
1251     private static final int spread(int h) {
1252     h ^= (h >>> 18) ^ (h >>> 12);
1253     return (h ^ (h >>> 10)) & HASH_BITS;
1254 dl 1.99 }
1255    
1256     /**
1257 dl 1.119 * Replaces a list bin with a tree bin. Call only when locked.
1258     * Fails to replace if the given key is non-comparable or table
1259     * is, or needs, resizing.
1260     */
1261     private final void replaceWithTreeBin(Node[] tab, int index, Object key) {
1262     if ((key instanceof Comparable) &&
1263     (tab.length >= MAXIMUM_CAPACITY || counter.sum() < (long)sizeCtl)) {
1264     TreeBin t = new TreeBin();
1265     for (Node e = tabAt(tab, index); e != null; e = e.next)
1266     t.putTreeNode(e.hash & HASH_BITS, e.key, e.val);
1267     setTabAt(tab, index, new Node(MOVED, t, null, null));
1268     }
1269 dl 1.99 }
1270 tim 1.11
1271 dl 1.119 /* ---------------- Internal access and update methods -------------- */
1272    
1273     /** Implementation for get and containsKey */
1274     private final Object internalGet(Object k) {
1275     int h = spread(k.hashCode());
1276     retry: for (Node[] tab = table; tab != null;) {
1277     Node e, p; Object ek, ev; int eh; // locals to read fields once
1278     for (e = tabAt(tab, (tab.length - 1) & h); e != null; e = e.next) {
1279     if ((eh = e.hash) == MOVED) {
1280     if ((ek = e.key) instanceof TreeBin) // search TreeBin
1281     return ((TreeBin)ek).getValue(h, k);
1282     else { // restart with new table
1283     tab = (Node[])ek;
1284     continue retry;
1285     }
1286     }
1287     else if ((eh & HASH_BITS) == h && (ev = e.val) != null &&
1288     ((ek = e.key) == k || k.equals(ek)))
1289     return ev;
1290     }
1291     break;
1292     }
1293     return null;
1294 tim 1.1 }
1295    
1296     /**
1297 dl 1.119 * Implementation for the four public remove/replace methods:
1298     * Replaces node value with v, conditional upon match of cv if
1299     * non-null. If resulting value is null, delete.
1300     */
1301     private final Object internalReplace(Object k, Object v, Object cv) {
1302     int h = spread(k.hashCode());
1303     Object oldVal = null;
1304     for (Node[] tab = table;;) {
1305     Node f; int i, fh; Object fk;
1306     if (tab == null ||
1307     (f = tabAt(tab, i = (tab.length - 1) & h)) == null)
1308     break;
1309     else if ((fh = f.hash) == MOVED) {
1310     if ((fk = f.key) instanceof TreeBin) {
1311     TreeBin t = (TreeBin)fk;
1312     boolean validated = false;
1313     boolean deleted = false;
1314     t.acquire(0);
1315     try {
1316     if (tabAt(tab, i) == f) {
1317     validated = true;
1318     TreeNode p = t.getTreeNode(h, k, t.root);
1319     if (p != null) {
1320     Object pv = p.val;
1321     if (cv == null || cv == pv || cv.equals(pv)) {
1322     oldVal = pv;
1323     if ((p.val = v) == null) {
1324     deleted = true;
1325     t.deleteTreeNode(p);
1326     }
1327     }
1328     }
1329     }
1330     } finally {
1331     t.release(0);
1332     }
1333     if (validated) {
1334     if (deleted)
1335     counter.add(-1L);
1336     break;
1337     }
1338     }
1339     else
1340     tab = (Node[])fk;
1341     }
1342     else if ((fh & HASH_BITS) != h && f.next == null) // precheck
1343     break; // rules out possible existence
1344     else if ((fh & LOCKED) != 0) {
1345     checkForResize(); // try resizing if can't get lock
1346     f.tryAwaitLock(tab, i);
1347     }
1348     else if (f.casHash(fh, fh | LOCKED)) {
1349     boolean validated = false;
1350     boolean deleted = false;
1351     try {
1352     if (tabAt(tab, i) == f) {
1353     validated = true;
1354     for (Node e = f, pred = null;;) {
1355     Object ek, ev;
1356     if ((e.hash & HASH_BITS) == h &&
1357     ((ev = e.val) != null) &&
1358     ((ek = e.key) == k || k.equals(ek))) {
1359     if (cv == null || cv == ev || cv.equals(ev)) {
1360     oldVal = ev;
1361     if ((e.val = v) == null) {
1362     deleted = true;
1363     Node en = e.next;
1364     if (pred != null)
1365     pred.next = en;
1366     else
1367     setTabAt(tab, i, en);
1368     }
1369     }
1370     break;
1371     }
1372     pred = e;
1373     if ((e = e.next) == null)
1374     break;
1375     }
1376     }
1377     } finally {
1378     if (!f.casHash(fh | LOCKED, fh)) {
1379     f.hash = fh;
1380     synchronized (f) { f.notifyAll(); };
1381     }
1382     }
1383     if (validated) {
1384     if (deleted)
1385     counter.add(-1L);
1386     break;
1387     }
1388     }
1389     }
1390     return oldVal;
1391 dl 1.55 }
1392    
1393 dl 1.119 /*
1394 dl 1.126 * Internal versions of the six insertion methods, each a
1395 dl 1.119 * little more complicated than the last. All have
1396     * the same basic structure as the first (internalPut):
1397     * 1. If table uninitialized, create
1398     * 2. If bin empty, try to CAS new node
1399     * 3. If bin stale, use new table
1400     * 4. if bin converted to TreeBin, validate and relay to TreeBin methods
1401     * 5. Lock and validate; if valid, scan and add or update
1402 tim 1.1 *
1403 dl 1.119 * The others interweave other checks and/or alternative actions:
1404     * * Plain put checks for and performs resize after insertion.
1405     * * putIfAbsent prescans for mapping without lock (and fails to add
1406     * if present), which also makes pre-emptive resize checks worthwhile.
1407     * * computeIfAbsent extends form used in putIfAbsent with additional
1408     * mechanics to deal with, calls, potential exceptions and null
1409     * returns from function call.
1410     * * compute uses the same function-call mechanics, but without
1411     * the prescans
1412 dl 1.126 * * merge acts as putIfAbsent in the absent case, but invokes the
1413     * update function if present
1414 dl 1.119 * * putAll attempts to pre-allocate enough table space
1415     * and more lazily performs count updates and checks.
1416 jsr166 1.76 *
1417 dl 1.119 * Someday when details settle down a bit more, it might be worth
1418     * some factoring to reduce sprawl.
1419 tim 1.1 */
1420    
1421 dl 1.119 /** Implementation for put */
1422     private final Object internalPut(Object k, Object v) {
1423     int h = spread(k.hashCode());
1424     int count = 0;
1425     for (Node[] tab = table;;) {
1426     int i; Node f; int fh; Object fk;
1427     if (tab == null)
1428     tab = initTable();
1429     else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1430     if (casTabAt(tab, i, null, new Node(h, k, v, null)))
1431     break; // no lock when adding to empty bin
1432 dl 1.99 }
1433 dl 1.119 else if ((fh = f.hash) == MOVED) {
1434     if ((fk = f.key) instanceof TreeBin) {
1435     TreeBin t = (TreeBin)fk;
1436     Object oldVal = null;
1437     t.acquire(0);
1438     try {
1439     if (tabAt(tab, i) == f) {
1440     count = 2;
1441     TreeNode p = t.putTreeNode(h, k, v);
1442     if (p != null) {
1443     oldVal = p.val;
1444     p.val = v;
1445     }
1446     }
1447     } finally {
1448     t.release(0);
1449     }
1450     if (count != 0) {
1451     if (oldVal != null)
1452     return oldVal;
1453     break;
1454     }
1455     }
1456     else
1457     tab = (Node[])fk;
1458     }
1459     else if ((fh & LOCKED) != 0) {
1460     checkForResize();
1461     f.tryAwaitLock(tab, i);
1462     }
1463     else if (f.casHash(fh, fh | LOCKED)) {
1464     Object oldVal = null;
1465     try { // needed in case equals() throws
1466     if (tabAt(tab, i) == f) {
1467     count = 1;
1468     for (Node e = f;; ++count) {
1469     Object ek, ev;
1470     if ((e.hash & HASH_BITS) == h &&
1471     (ev = e.val) != null &&
1472     ((ek = e.key) == k || k.equals(ek))) {
1473     oldVal = ev;
1474     e.val = v;
1475     break;
1476     }
1477     Node last = e;
1478     if ((e = e.next) == null) {
1479     last.next = new Node(h, k, v, null);
1480     if (count >= TREE_THRESHOLD)
1481     replaceWithTreeBin(tab, i, k);
1482     break;
1483     }
1484     }
1485     }
1486     } finally { // unlock and signal if needed
1487     if (!f.casHash(fh | LOCKED, fh)) {
1488     f.hash = fh;
1489     synchronized (f) { f.notifyAll(); };
1490     }
1491     }
1492     if (count != 0) {
1493     if (oldVal != null)
1494     return oldVal;
1495     if (tab.length <= 64)
1496     count = 2;
1497     break;
1498 dl 1.99 }
1499 dl 1.21 }
1500     }
1501 dl 1.119 counter.add(1L);
1502     if (count > 1)
1503     checkForResize();
1504     return null;
1505 tim 1.1 }
1506    
1507 dl 1.119 /** Implementation for putIfAbsent */
1508     private final Object internalPutIfAbsent(Object k, Object v) {
1509     int h = spread(k.hashCode());
1510     int count = 0;
1511     for (Node[] tab = table;;) {
1512     int i; Node f; int fh; Object fk, fv;
1513     if (tab == null)
1514     tab = initTable();
1515     else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1516     if (casTabAt(tab, i, null, new Node(h, k, v, null)))
1517     break;
1518     }
1519     else if ((fh = f.hash) == MOVED) {
1520     if ((fk = f.key) instanceof TreeBin) {
1521     TreeBin t = (TreeBin)fk;
1522     Object oldVal = null;
1523     t.acquire(0);
1524     try {
1525     if (tabAt(tab, i) == f) {
1526     count = 2;
1527     TreeNode p = t.putTreeNode(h, k, v);
1528     if (p != null)
1529     oldVal = p.val;
1530     }
1531     } finally {
1532     t.release(0);
1533     }
1534     if (count != 0) {
1535     if (oldVal != null)
1536     return oldVal;
1537     break;
1538     }
1539     }
1540     else
1541     tab = (Node[])fk;
1542     }
1543     else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
1544     ((fk = f.key) == k || k.equals(fk)))
1545     return fv;
1546     else {
1547     Node g = f.next;
1548     if (g != null) { // at least 2 nodes -- search and maybe resize
1549     for (Node e = g;;) {
1550     Object ek, ev;
1551     if ((e.hash & HASH_BITS) == h && (ev = e.val) != null &&
1552     ((ek = e.key) == k || k.equals(ek)))
1553     return ev;
1554     if ((e = e.next) == null) {
1555     checkForResize();
1556     break;
1557     }
1558     }
1559     }
1560     if (((fh = f.hash) & LOCKED) != 0) {
1561     checkForResize();
1562     f.tryAwaitLock(tab, i);
1563     }
1564     else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
1565     Object oldVal = null;
1566     try {
1567     if (tabAt(tab, i) == f) {
1568     count = 1;
1569     for (Node e = f;; ++count) {
1570     Object ek, ev;
1571     if ((e.hash & HASH_BITS) == h &&
1572     (ev = e.val) != null &&
1573     ((ek = e.key) == k || k.equals(ek))) {
1574     oldVal = ev;
1575     break;
1576     }
1577     Node last = e;
1578     if ((e = e.next) == null) {
1579     last.next = new Node(h, k, v, null);
1580     if (count >= TREE_THRESHOLD)
1581     replaceWithTreeBin(tab, i, k);
1582     break;
1583     }
1584     }
1585     }
1586     } finally {
1587     if (!f.casHash(fh | LOCKED, fh)) {
1588     f.hash = fh;
1589     synchronized (f) { f.notifyAll(); };
1590     }
1591     }
1592     if (count != 0) {
1593     if (oldVal != null)
1594     return oldVal;
1595     if (tab.length <= 64)
1596     count = 2;
1597     break;
1598     }
1599     }
1600     }
1601 dl 1.45 }
1602 dl 1.119 counter.add(1L);
1603     if (count > 1)
1604     checkForResize();
1605     return null;
1606 dl 1.21 }
1607    
1608 dl 1.119 /** Implementation for computeIfAbsent */
1609     private final Object internalComputeIfAbsent(K k,
1610     Fun<? super K, ?> mf) {
1611     int h = spread(k.hashCode());
1612     Object val = null;
1613     int count = 0;
1614     for (Node[] tab = table;;) {
1615     Node f; int i, fh; Object fk, fv;
1616     if (tab == null)
1617     tab = initTable();
1618     else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1619     Node node = new Node(fh = h | LOCKED, k, null, null);
1620     if (casTabAt(tab, i, null, node)) {
1621     count = 1;
1622     try {
1623     if ((val = mf.apply(k)) != null)
1624     node.val = val;
1625     } finally {
1626     if (val == null)
1627     setTabAt(tab, i, null);
1628     if (!node.casHash(fh, h)) {
1629     node.hash = h;
1630     synchronized (node) { node.notifyAll(); };
1631     }
1632     }
1633     }
1634     if (count != 0)
1635     break;
1636     }
1637     else if ((fh = f.hash) == MOVED) {
1638     if ((fk = f.key) instanceof TreeBin) {
1639     TreeBin t = (TreeBin)fk;
1640     boolean added = false;
1641     t.acquire(0);
1642     try {
1643     if (tabAt(tab, i) == f) {
1644     count = 1;
1645     TreeNode p = t.getTreeNode(h, k, t.root);
1646     if (p != null)
1647     val = p.val;
1648     else if ((val = mf.apply(k)) != null) {
1649     added = true;
1650     count = 2;
1651     t.putTreeNode(h, k, val);
1652     }
1653     }
1654     } finally {
1655     t.release(0);
1656     }
1657     if (count != 0) {
1658     if (!added)
1659     return val;
1660     break;
1661     }
1662     }
1663     else
1664     tab = (Node[])fk;
1665     }
1666     else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
1667     ((fk = f.key) == k || k.equals(fk)))
1668     return fv;
1669     else {
1670     Node g = f.next;
1671     if (g != null) {
1672     for (Node e = g;;) {
1673     Object ek, ev;
1674     if ((e.hash & HASH_BITS) == h && (ev = e.val) != null &&
1675     ((ek = e.key) == k || k.equals(ek)))
1676     return ev;
1677     if ((e = e.next) == null) {
1678     checkForResize();
1679     break;
1680     }
1681     }
1682     }
1683     if (((fh = f.hash) & LOCKED) != 0) {
1684     checkForResize();
1685     f.tryAwaitLock(tab, i);
1686     }
1687     else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
1688     boolean added = false;
1689     try {
1690     if (tabAt(tab, i) == f) {
1691     count = 1;
1692     for (Node e = f;; ++count) {
1693     Object ek, ev;
1694     if ((e.hash & HASH_BITS) == h &&
1695     (ev = e.val) != null &&
1696     ((ek = e.key) == k || k.equals(ek))) {
1697     val = ev;
1698     break;
1699     }
1700     Node last = e;
1701     if ((e = e.next) == null) {
1702     if ((val = mf.apply(k)) != null) {
1703     added = true;
1704     last.next = new Node(h, k, val, null);
1705     if (count >= TREE_THRESHOLD)
1706     replaceWithTreeBin(tab, i, k);
1707     }
1708     break;
1709     }
1710     }
1711     }
1712     } finally {
1713     if (!f.casHash(fh | LOCKED, fh)) {
1714     f.hash = fh;
1715     synchronized (f) { f.notifyAll(); };
1716     }
1717     }
1718     if (count != 0) {
1719     if (!added)
1720     return val;
1721     if (tab.length <= 64)
1722     count = 2;
1723     break;
1724     }
1725     }
1726 dl 1.105 }
1727 dl 1.99 }
1728 dl 1.119 if (val != null) {
1729     counter.add(1L);
1730     if (count > 1)
1731     checkForResize();
1732     }
1733     return val;
1734 tim 1.1 }
1735    
1736 dl 1.119 /** Implementation for compute */
1737 dl 1.128 @SuppressWarnings("unchecked") private final Object internalCompute
1738     (K k, boolean onlyIfPresent, BiFun<? super K, ? super V, ? extends V> mf) {
1739 dl 1.119 int h = spread(k.hashCode());
1740     Object val = null;
1741     int delta = 0;
1742     int count = 0;
1743     for (Node[] tab = table;;) {
1744     Node f; int i, fh; Object fk;
1745     if (tab == null)
1746     tab = initTable();
1747     else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1748     if (onlyIfPresent)
1749     break;
1750     Node node = new Node(fh = h | LOCKED, k, null, null);
1751     if (casTabAt(tab, i, null, node)) {
1752     try {
1753     count = 1;
1754     if ((val = mf.apply(k, null)) != null) {
1755     node.val = val;
1756     delta = 1;
1757     }
1758     } finally {
1759     if (delta == 0)
1760     setTabAt(tab, i, null);
1761     if (!node.casHash(fh, h)) {
1762     node.hash = h;
1763     synchronized (node) { node.notifyAll(); };
1764     }
1765     }
1766     }
1767     if (count != 0)
1768     break;
1769     }
1770     else if ((fh = f.hash) == MOVED) {
1771     if ((fk = f.key) instanceof TreeBin) {
1772     TreeBin t = (TreeBin)fk;
1773     t.acquire(0);
1774     try {
1775     if (tabAt(tab, i) == f) {
1776     count = 1;
1777     TreeNode p = t.getTreeNode(h, k, t.root);
1778     Object pv = (p == null) ? null : p.val;
1779     if ((val = mf.apply(k, (V)pv)) != null) {
1780     if (p != null)
1781     p.val = val;
1782     else {
1783     count = 2;
1784     delta = 1;
1785     t.putTreeNode(h, k, val);
1786     }
1787     }
1788     else if (p != null) {
1789     delta = -1;
1790     t.deleteTreeNode(p);
1791     }
1792     }
1793     } finally {
1794     t.release(0);
1795     }
1796     if (count != 0)
1797     break;
1798     }
1799     else
1800     tab = (Node[])fk;
1801     }
1802     else if ((fh & LOCKED) != 0) {
1803     checkForResize();
1804     f.tryAwaitLock(tab, i);
1805     }
1806     else if (f.casHash(fh, fh | LOCKED)) {
1807     try {
1808     if (tabAt(tab, i) == f) {
1809     count = 1;
1810     for (Node e = f, pred = null;; ++count) {
1811     Object ek, ev;
1812     if ((e.hash & HASH_BITS) == h &&
1813     (ev = e.val) != null &&
1814     ((ek = e.key) == k || k.equals(ek))) {
1815     val = mf.apply(k, (V)ev);
1816     if (val != null)
1817     e.val = val;
1818     else {
1819     delta = -1;
1820     Node en = e.next;
1821     if (pred != null)
1822     pred.next = en;
1823     else
1824     setTabAt(tab, i, en);
1825     }
1826     break;
1827     }
1828     pred = e;
1829     if ((e = e.next) == null) {
1830     if (!onlyIfPresent && (val = mf.apply(k, null)) != null) {
1831     pred.next = new Node(h, k, val, null);
1832     delta = 1;
1833     if (count >= TREE_THRESHOLD)
1834     replaceWithTreeBin(tab, i, k);
1835     }
1836     break;
1837     }
1838     }
1839     }
1840     } finally {
1841     if (!f.casHash(fh | LOCKED, fh)) {
1842     f.hash = fh;
1843     synchronized (f) { f.notifyAll(); };
1844     }
1845     }
1846     if (count != 0) {
1847     if (tab.length <= 64)
1848     count = 2;
1849     break;
1850     }
1851     }
1852     }
1853     if (delta != 0) {
1854     counter.add((long)delta);
1855     if (count > 1)
1856     checkForResize();
1857     }
1858     return val;
1859     }
1860    
1861 dl 1.126 /** Implementation for merge */
1862 dl 1.128 @SuppressWarnings("unchecked") private final Object internalMerge
1863     (K k, V v, BiFun<? super V, ? super V, ? extends V> mf) {
1864 dl 1.119 int h = spread(k.hashCode());
1865     Object val = null;
1866     int delta = 0;
1867     int count = 0;
1868     for (Node[] tab = table;;) {
1869     int i; Node f; int fh; Object fk, fv;
1870     if (tab == null)
1871     tab = initTable();
1872     else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1873     if (casTabAt(tab, i, null, new Node(h, k, v, null))) {
1874     delta = 1;
1875     val = v;
1876     break;
1877     }
1878     }
1879     else if ((fh = f.hash) == MOVED) {
1880     if ((fk = f.key) instanceof TreeBin) {
1881     TreeBin t = (TreeBin)fk;
1882     t.acquire(0);
1883     try {
1884     if (tabAt(tab, i) == f) {
1885     count = 1;
1886     TreeNode p = t.getTreeNode(h, k, t.root);
1887     val = (p == null) ? v : mf.apply((V)p.val, v);
1888     if (val != null) {
1889     if (p != null)
1890     p.val = val;
1891     else {
1892     count = 2;
1893     delta = 1;
1894     t.putTreeNode(h, k, val);
1895     }
1896     }
1897     else if (p != null) {
1898     delta = -1;
1899     t.deleteTreeNode(p);
1900     }
1901     }
1902     } finally {
1903     t.release(0);
1904     }
1905     if (count != 0)
1906     break;
1907     }
1908     else
1909     tab = (Node[])fk;
1910     }
1911     else if ((fh & LOCKED) != 0) {
1912     checkForResize();
1913     f.tryAwaitLock(tab, i);
1914     }
1915     else if (f.casHash(fh, fh | LOCKED)) {
1916     try {
1917     if (tabAt(tab, i) == f) {
1918     count = 1;
1919     for (Node e = f, pred = null;; ++count) {
1920     Object ek, ev;
1921     if ((e.hash & HASH_BITS) == h &&
1922     (ev = e.val) != null &&
1923     ((ek = e.key) == k || k.equals(ek))) {
1924     val = mf.apply(v, (V)ev);
1925     if (val != null)
1926     e.val = val;
1927     else {
1928     delta = -1;
1929     Node en = e.next;
1930     if (pred != null)
1931     pred.next = en;
1932     else
1933     setTabAt(tab, i, en);
1934     }
1935     break;
1936     }
1937     pred = e;
1938     if ((e = e.next) == null) {
1939     val = v;
1940     pred.next = new Node(h, k, val, null);
1941     delta = 1;
1942     if (count >= TREE_THRESHOLD)
1943     replaceWithTreeBin(tab, i, k);
1944     break;
1945     }
1946     }
1947     }
1948     } finally {
1949     if (!f.casHash(fh | LOCKED, fh)) {
1950     f.hash = fh;
1951     synchronized (f) { f.notifyAll(); };
1952     }
1953     }
1954     if (count != 0) {
1955     if (tab.length <= 64)
1956     count = 2;
1957     break;
1958     }
1959 dl 1.105 }
1960 dl 1.99 }
1961 dl 1.119 if (delta != 0) {
1962     counter.add((long)delta);
1963     if (count > 1)
1964     checkForResize();
1965     }
1966     return val;
1967     }
1968    
1969     /** Implementation for putAll */
1970     private final void internalPutAll(Map<?, ?> m) {
1971     tryPresize(m.size());
1972     long delta = 0L; // number of uncommitted additions
1973     boolean npe = false; // to throw exception on exit for nulls
1974     try { // to clean up counts on other exceptions
1975     for (Map.Entry<?, ?> entry : m.entrySet()) {
1976     Object k, v;
1977     if (entry == null || (k = entry.getKey()) == null ||
1978     (v = entry.getValue()) == null) {
1979     npe = true;
1980     break;
1981     }
1982     int h = spread(k.hashCode());
1983     for (Node[] tab = table;;) {
1984     int i; Node f; int fh; Object fk;
1985     if (tab == null)
1986     tab = initTable();
1987     else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null){
1988     if (casTabAt(tab, i, null, new Node(h, k, v, null))) {
1989     ++delta;
1990     break;
1991     }
1992     }
1993     else if ((fh = f.hash) == MOVED) {
1994     if ((fk = f.key) instanceof TreeBin) {
1995     TreeBin t = (TreeBin)fk;
1996     boolean validated = false;
1997     t.acquire(0);
1998     try {
1999     if (tabAt(tab, i) == f) {
2000     validated = true;
2001     TreeNode p = t.getTreeNode(h, k, t.root);
2002     if (p != null)
2003     p.val = v;
2004     else {
2005     t.putTreeNode(h, k, v);
2006     ++delta;
2007     }
2008     }
2009     } finally {
2010     t.release(0);
2011     }
2012     if (validated)
2013     break;
2014     }
2015     else
2016     tab = (Node[])fk;
2017     }
2018     else if ((fh & LOCKED) != 0) {
2019     counter.add(delta);
2020     delta = 0L;
2021     checkForResize();
2022     f.tryAwaitLock(tab, i);
2023     }
2024     else if (f.casHash(fh, fh | LOCKED)) {
2025     int count = 0;
2026     try {
2027     if (tabAt(tab, i) == f) {
2028     count = 1;
2029     for (Node e = f;; ++count) {
2030     Object ek, ev;
2031     if ((e.hash & HASH_BITS) == h &&
2032     (ev = e.val) != null &&
2033     ((ek = e.key) == k || k.equals(ek))) {
2034     e.val = v;
2035     break;
2036     }
2037     Node last = e;
2038     if ((e = e.next) == null) {
2039     ++delta;
2040     last.next = new Node(h, k, v, null);
2041     if (count >= TREE_THRESHOLD)
2042     replaceWithTreeBin(tab, i, k);
2043     break;
2044     }
2045     }
2046     }
2047     } finally {
2048     if (!f.casHash(fh | LOCKED, fh)) {
2049     f.hash = fh;
2050     synchronized (f) { f.notifyAll(); };
2051     }
2052     }
2053     if (count != 0) {
2054     if (count > 1) {
2055     counter.add(delta);
2056     delta = 0L;
2057     checkForResize();
2058 dl 1.99 }
2059 dl 1.119 break;
2060 dl 1.99 }
2061 dl 1.21 }
2062     }
2063 dl 1.45 }
2064     } finally {
2065 dl 1.119 if (delta != 0)
2066     counter.add(delta);
2067 dl 1.45 }
2068 dl 1.119 if (npe)
2069     throw new NullPointerException();
2070 tim 1.1 }
2071 dl 1.19
2072 dl 1.119 /* ---------------- Table Initialization and Resizing -------------- */
2073    
2074 tim 1.1 /**
2075 dl 1.119 * Returns a power of two table size for the given desired capacity.
2076     * See Hackers Delight, sec 3.2
2077 tim 1.1 */
2078 dl 1.119 private static final int tableSizeFor(int c) {
2079     int n = c - 1;
2080     n |= n >>> 1;
2081     n |= n >>> 2;
2082     n |= n >>> 4;
2083     n |= n >>> 8;
2084     n |= n >>> 16;
2085     return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
2086 tim 1.1 }
2087    
2088     /**
2089 dl 1.119 * Initializes table, using the size recorded in sizeCtl.
2090     */
2091     private final Node[] initTable() {
2092     Node[] tab; int sc;
2093     while ((tab = table) == null) {
2094     if ((sc = sizeCtl) < 0)
2095     Thread.yield(); // lost initialization race; just spin
2096     else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
2097     try {
2098     if ((tab = table) == null) {
2099     int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
2100     tab = table = new Node[n];
2101     sc = n - (n >>> 2);
2102     }
2103     } finally {
2104     sizeCtl = sc;
2105     }
2106     break;
2107     }
2108     }
2109     return tab;
2110 dl 1.4 }
2111    
2112     /**
2113 dl 1.119 * If table is too small and not already resizing, creates next
2114     * table and transfers bins. Rechecks occupancy after a transfer
2115     * to see if another resize is already needed because resizings
2116     * are lagging additions.
2117 dl 1.51 */
2118 dl 1.119 private final void checkForResize() {
2119     Node[] tab; int n, sc;
2120     while ((tab = table) != null &&
2121     (n = tab.length) < MAXIMUM_CAPACITY &&
2122     (sc = sizeCtl) >= 0 && counter.sum() >= (long)sc &&
2123     UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
2124     try {
2125     if (tab == table) {
2126     table = rebuild(tab);
2127     sc = (n << 1) - (n >>> 1);
2128     }
2129     } finally {
2130     sizeCtl = sc;
2131     }
2132     }
2133 dl 1.4 }
2134    
2135     /**
2136 dl 1.119 * Tries to presize table to accommodate the given number of elements.
2137 tim 1.1 *
2138 dl 1.119 * @param size number of elements (doesn't need to be perfectly accurate)
2139 tim 1.1 */
2140 dl 1.119 private final void tryPresize(int size) {
2141     int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
2142     tableSizeFor(size + (size >>> 1) + 1);
2143     int sc;
2144     while ((sc = sizeCtl) >= 0) {
2145     Node[] tab = table; int n;
2146     if (tab == null || (n = tab.length) == 0) {
2147     n = (sc > c) ? sc : c;
2148     if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
2149     try {
2150     if (table == tab) {
2151     table = new Node[n];
2152     sc = n - (n >>> 2);
2153     }
2154     } finally {
2155     sizeCtl = sc;
2156     }
2157     }
2158     }
2159     else if (c <= sc || n >= MAXIMUM_CAPACITY)
2160     break;
2161     else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
2162     try {
2163     if (table == tab) {
2164     table = rebuild(tab);
2165     sc = (n << 1) - (n >>> 1);
2166     }
2167     } finally {
2168     sizeCtl = sc;
2169     }
2170     }
2171     }
2172 dl 1.4 }
2173    
2174 dl 1.119 /*
2175     * Moves and/or copies the nodes in each bin to new table. See
2176     * above for explanation.
2177 dl 1.4 *
2178 dl 1.119 * @return the new table
2179 dl 1.4 */
2180 dl 1.119 private static final Node[] rebuild(Node[] tab) {
2181     int n = tab.length;
2182     Node[] nextTab = new Node[n << 1];
2183     Node fwd = new Node(MOVED, nextTab, null, null);
2184     int[] buffer = null; // holds bins to revisit; null until needed
2185     Node rev = null; // reverse forwarder; null until needed
2186     int nbuffered = 0; // the number of bins in buffer list
2187     int bufferIndex = 0; // buffer index of current buffered bin
2188     int bin = n - 1; // current non-buffered bin or -1 if none
2189    
2190     for (int i = bin;;) { // start upwards sweep
2191     int fh; Node f;
2192     if ((f = tabAt(tab, i)) == null) {
2193 dl 1.128 if (bin >= 0) { // Unbuffered; no lock needed (or available)
2194 dl 1.119 if (!casTabAt(tab, i, f, fwd))
2195     continue;
2196     }
2197     else { // transiently use a locked forwarding node
2198     Node g = new Node(MOVED|LOCKED, nextTab, null, null);
2199     if (!casTabAt(tab, i, f, g))
2200     continue;
2201     setTabAt(nextTab, i, null);
2202     setTabAt(nextTab, i + n, null);
2203     setTabAt(tab, i, fwd);
2204     if (!g.casHash(MOVED|LOCKED, MOVED)) {
2205     g.hash = MOVED;
2206     synchronized (g) { g.notifyAll(); }
2207     }
2208     }
2209     }
2210     else if ((fh = f.hash) == MOVED) {
2211     Object fk = f.key;
2212     if (fk instanceof TreeBin) {
2213     TreeBin t = (TreeBin)fk;
2214     boolean validated = false;
2215     t.acquire(0);
2216     try {
2217     if (tabAt(tab, i) == f) {
2218     validated = true;
2219     splitTreeBin(nextTab, i, t);
2220     setTabAt(tab, i, fwd);
2221     }
2222     } finally {
2223     t.release(0);
2224     }
2225     if (!validated)
2226     continue;
2227     }
2228     }
2229     else if ((fh & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) {
2230     boolean validated = false;
2231     try { // split to lo and hi lists; copying as needed
2232     if (tabAt(tab, i) == f) {
2233     validated = true;
2234     splitBin(nextTab, i, f);
2235     setTabAt(tab, i, fwd);
2236     }
2237     } finally {
2238     if (!f.casHash(fh | LOCKED, fh)) {
2239     f.hash = fh;
2240     synchronized (f) { f.notifyAll(); };
2241     }
2242     }
2243     if (!validated)
2244     continue;
2245     }
2246     else {
2247     if (buffer == null) // initialize buffer for revisits
2248     buffer = new int[TRANSFER_BUFFER_SIZE];
2249     if (bin < 0 && bufferIndex > 0) {
2250     int j = buffer[--bufferIndex];
2251     buffer[bufferIndex] = i;
2252     i = j; // swap with another bin
2253     continue;
2254     }
2255     if (bin < 0 || nbuffered >= TRANSFER_BUFFER_SIZE) {
2256     f.tryAwaitLock(tab, i);
2257     continue; // no other options -- block
2258     }
2259     if (rev == null) // initialize reverse-forwarder
2260     rev = new Node(MOVED, tab, null, null);
2261     if (tabAt(tab, i) != f || (f.hash & LOCKED) == 0)
2262     continue; // recheck before adding to list
2263     buffer[nbuffered++] = i;
2264     setTabAt(nextTab, i, rev); // install place-holders
2265     setTabAt(nextTab, i + n, rev);
2266     }
2267    
2268     if (bin > 0)
2269     i = --bin;
2270     else if (buffer != null && nbuffered > 0) {
2271     bin = -1;
2272     i = buffer[bufferIndex = --nbuffered];
2273     }
2274     else
2275     return nextTab;
2276     }
2277 dl 1.4 }
2278 tim 1.1
2279 dl 1.4 /**
2280 dl 1.119 * Splits a normal bin with list headed by e into lo and hi parts;
2281     * installs in given table.
2282 dl 1.4 */
2283 dl 1.119 private static void splitBin(Node[] nextTab, int i, Node e) {
2284     int bit = nextTab.length >>> 1; // bit to split on
2285     int runBit = e.hash & bit;
2286     Node lastRun = e, lo = null, hi = null;
2287     for (Node p = e.next; p != null; p = p.next) {
2288     int b = p.hash & bit;
2289     if (b != runBit) {
2290     runBit = b;
2291     lastRun = p;
2292     }
2293     }
2294     if (runBit == 0)
2295     lo = lastRun;
2296     else
2297     hi = lastRun;
2298     for (Node p = e; p != lastRun; p = p.next) {
2299     int ph = p.hash & HASH_BITS;
2300     Object pk = p.key, pv = p.val;
2301     if ((ph & bit) == 0)
2302     lo = new Node(ph, pk, pv, lo);
2303     else
2304     hi = new Node(ph, pk, pv, hi);
2305     }
2306     setTabAt(nextTab, i, lo);
2307     setTabAt(nextTab, i + bit, hi);
2308     }
2309    
2310     /**
2311     * Splits a tree bin into lo and hi parts; installs in given table.
2312     */
2313     private static void splitTreeBin(Node[] nextTab, int i, TreeBin t) {
2314     int bit = nextTab.length >>> 1;
2315     TreeBin lt = new TreeBin();
2316     TreeBin ht = new TreeBin();
2317     int lc = 0, hc = 0;
2318     for (Node e = t.first; e != null; e = e.next) {
2319     int h = e.hash & HASH_BITS;
2320     Object k = e.key, v = e.val;
2321     if ((h & bit) == 0) {
2322     ++lc;
2323     lt.putTreeNode(h, k, v);
2324     }
2325     else {
2326     ++hc;
2327     ht.putTreeNode(h, k, v);
2328     }
2329     }
2330     Node ln, hn; // throw away trees if too small
2331     if (lc <= (TREE_THRESHOLD >>> 1)) {
2332     ln = null;
2333     for (Node p = lt.first; p != null; p = p.next)
2334     ln = new Node(p.hash, p.key, p.val, ln);
2335     }
2336     else
2337     ln = new Node(MOVED, lt, null, null);
2338     setTabAt(nextTab, i, ln);
2339     if (hc <= (TREE_THRESHOLD >>> 1)) {
2340     hn = null;
2341     for (Node p = ht.first; p != null; p = p.next)
2342     hn = new Node(p.hash, p.key, p.val, hn);
2343     }
2344     else
2345     hn = new Node(MOVED, ht, null, null);
2346     setTabAt(nextTab, i + bit, hn);
2347     }
2348    
2349     /**
2350     * Implementation for clear. Steps through each bin, removing all
2351     * nodes.
2352     */
2353     private final void internalClear() {
2354     long delta = 0L; // negative number of deletions
2355     int i = 0;
2356     Node[] tab = table;
2357     while (tab != null && i < tab.length) {
2358     int fh; Object fk;
2359     Node f = tabAt(tab, i);
2360     if (f == null)
2361     ++i;
2362     else if ((fh = f.hash) == MOVED) {
2363     if ((fk = f.key) instanceof TreeBin) {
2364     TreeBin t = (TreeBin)fk;
2365     t.acquire(0);
2366     try {
2367     if (tabAt(tab, i) == f) {
2368     for (Node p = t.first; p != null; p = p.next) {
2369 dl 1.128 if (p.val != null) { // (currently always true)
2370     p.val = null;
2371     --delta;
2372     }
2373 dl 1.119 }
2374     t.first = null;
2375     t.root = null;
2376     ++i;
2377     }
2378     } finally {
2379     t.release(0);
2380     }
2381     }
2382     else
2383     tab = (Node[])fk;
2384     }
2385     else if ((fh & LOCKED) != 0) {
2386     counter.add(delta); // opportunistically update count
2387     delta = 0L;
2388     f.tryAwaitLock(tab, i);
2389     }
2390     else if (f.casHash(fh, fh | LOCKED)) {
2391     try {
2392     if (tabAt(tab, i) == f) {
2393     for (Node e = f; e != null; e = e.next) {
2394 dl 1.128 if (e.val != null) { // (currently always true)
2395     e.val = null;
2396     --delta;
2397     }
2398 dl 1.119 }
2399     setTabAt(tab, i, null);
2400     ++i;
2401     }
2402     } finally {
2403     if (!f.casHash(fh | LOCKED, fh)) {
2404     f.hash = fh;
2405     synchronized (f) { f.notifyAll(); };
2406     }
2407     }
2408     }
2409     }
2410     if (delta != 0)
2411     counter.add(delta);
2412     }
2413    
2414     /* ----------------Table Traversal -------------- */
2415    
2416     /**
2417     * Encapsulates traversal for methods such as containsValue; also
2418 dl 1.126 * serves as a base class for other iterators and bulk tasks.
2419 dl 1.119 *
2420     * At each step, the iterator snapshots the key ("nextKey") and
2421     * value ("nextVal") of a valid node (i.e., one that, at point of
2422     * snapshot, has a non-null user value). Because val fields can
2423     * change (including to null, indicating deletion), field nextVal
2424     * might not be accurate at point of use, but still maintains the
2425     * weak consistency property of holding a value that was once
2426 dl 1.137 * valid. To support iterator.remove, the nextKey field is not
2427     * updated (nulled out) when the iterator cannot advance.
2428 dl 1.119 *
2429     * Internal traversals directly access these fields, as in:
2430     * {@code while (it.advance() != null) { process(it.nextKey); }}
2431     *
2432     * Exported iterators must track whether the iterator has advanced
2433     * (in hasNext vs next) (by setting/checking/nulling field
2434     * nextVal), and then extract key, value, or key-value pairs as
2435     * return values of next().
2436     *
2437     * The iterator visits once each still-valid node that was
2438     * reachable upon iterator construction. It might miss some that
2439     * were added to a bin after the bin was visited, which is OK wrt
2440     * consistency guarantees. Maintaining this property in the face
2441     * of possible ongoing resizes requires a fair amount of
2442     * bookkeeping state that is difficult to optimize away amidst
2443     * volatile accesses. Even so, traversal maintains reasonable
2444     * throughput.
2445     *
2446     * Normally, iteration proceeds bin-by-bin traversing lists.
2447     * However, if the table has been resized, then all future steps
2448     * must traverse both the bin at the current index as well as at
2449     * (index + baseSize); and so on for further resizings. To
2450     * paranoically cope with potential sharing by users of iterators
2451     * across threads, iteration terminates if a bounds checks fails
2452     * for a table read.
2453     *
2454     * This class extends ForkJoinTask to streamline parallel
2455     * iteration in bulk operations (see BulkTask). This adds only an
2456     * int of space overhead, which is close enough to negligible in
2457 dl 1.126 * cases where it is not needed to not worry about it. Because
2458     * ForkJoinTask is Serializable, but iterators need not be, we
2459     * need to add warning suppressions.
2460 dl 1.119 */
2461 dl 1.128 @SuppressWarnings("serial") static class Traverser<K,V,R> extends ForkJoinTask<R> {
2462 dl 1.119 final ConcurrentHashMap<K, V> map;
2463     Node next; // the next entry to use
2464     Object nextKey; // cached key field of next
2465     Object nextVal; // cached val field of next
2466     Node[] tab; // current table; updated if resized
2467     int index; // index of bin to use next
2468     int baseIndex; // current index of initial table
2469     int baseLimit; // index bound for initial table
2470 dl 1.130 int baseSize; // initial table size
2471 dl 1.119
2472     /** Creates iterator for all entries in the table. */
2473     Traverser(ConcurrentHashMap<K, V> map) {
2474 dl 1.130 this.map = map;
2475 dl 1.119 }
2476    
2477     /** Creates iterator for split() methods */
2478 dl 1.128 Traverser(Traverser<K,V,?> it) {
2479 dl 1.130 ConcurrentHashMap<K, V> m; Node[] t;
2480     if ((m = this.map = it.map) == null)
2481     t = null;
2482     else if ((t = it.tab) == null && // force parent tab initialization
2483     (t = it.tab = m.table) != null)
2484     it.baseLimit = it.baseSize = t.length;
2485     this.tab = t;
2486 dl 1.119 this.baseSize = it.baseSize;
2487 dl 1.128 it.baseLimit = this.index = this.baseIndex =
2488     ((this.baseLimit = it.baseLimit) + it.baseIndex + 1) >>> 1;
2489 dl 1.119 }
2490    
2491     /**
2492     * Advances next; returns nextVal or null if terminated.
2493     * See above for explanation.
2494     */
2495     final Object advance() {
2496 dl 1.137 Node e = next;
2497 dl 1.119 Object ev = null;
2498     outer: do {
2499     if (e != null) // advance past used/skipped node
2500     e = e.next;
2501     while (e == null) { // get to next non-null bin
2502 dl 1.130 ConcurrentHashMap<K, V> m;
2503 dl 1.119 Node[] t; int b, i, n; Object ek; // checks must use locals
2504 dl 1.130 if ((t = tab) != null)
2505     n = t.length;
2506     else if ((m = map) != null && (t = tab = m.table) != null)
2507     n = baseLimit = baseSize = t.length;
2508     else
2509 dl 1.119 break outer;
2510 dl 1.130 if ((b = baseIndex) >= baseLimit ||
2511     (i = index) < 0 || i >= n)
2512     break outer;
2513     if ((e = tabAt(t, i)) != null && e.hash == MOVED) {
2514 dl 1.119 if ((ek = e.key) instanceof TreeBin)
2515     e = ((TreeBin)ek).first;
2516     else {
2517     tab = (Node[])ek;
2518     continue; // restarts due to null val
2519     }
2520     } // visit upper slots if present
2521     index = (i += baseSize) < n ? i : (baseIndex = b + 1);
2522     }
2523     nextKey = e.key;
2524     } while ((ev = e.val) == null); // skip deleted or special nodes
2525     next = e;
2526     return nextVal = ev;
2527     }
2528    
2529     public final void remove() {
2530 dl 1.137 Object k = nextKey;
2531     if (k == null && (advance() == null || (k = nextKey) == null))
2532 dl 1.119 throw new IllegalStateException();
2533 dl 1.137 map.internalReplace(k, null, null);
2534 dl 1.119 }
2535    
2536     public final boolean hasNext() {
2537     return nextVal != null || advance() != null;
2538     }
2539    
2540     public final boolean hasMoreElements() { return hasNext(); }
2541     public final void setRawResult(Object x) { }
2542     public R getRawResult() { return null; }
2543     public boolean exec() { return true; }
2544     }
2545    
2546     /* ---------------- Public operations -------------- */
2547    
2548     /**
2549     * Creates a new, empty map with the default initial table size (16).
2550     */
2551     public ConcurrentHashMap() {
2552     this.counter = new LongAdder();
2553     }
2554    
2555     /**
2556     * Creates a new, empty map with an initial table size
2557     * accommodating the specified number of elements without the need
2558     * to dynamically resize.
2559     *
2560     * @param initialCapacity The implementation performs internal
2561     * sizing to accommodate this many elements.
2562     * @throws IllegalArgumentException if the initial capacity of
2563     * elements is negative
2564     */
2565     public ConcurrentHashMap(int initialCapacity) {
2566     if (initialCapacity < 0)
2567     throw new IllegalArgumentException();
2568     int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
2569     MAXIMUM_CAPACITY :
2570     tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
2571     this.counter = new LongAdder();
2572     this.sizeCtl = cap;
2573     }
2574    
2575     /**
2576     * Creates a new map with the same mappings as the given map.
2577     *
2578     * @param m the map
2579     */
2580     public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
2581     this.counter = new LongAdder();
2582     this.sizeCtl = DEFAULT_CAPACITY;
2583     internalPutAll(m);
2584     }
2585    
2586     /**
2587     * Creates a new, empty map with an initial table size based on
2588     * the given number of elements ({@code initialCapacity}) and
2589     * initial table density ({@code loadFactor}).
2590     *
2591     * @param initialCapacity the initial capacity. The implementation
2592     * performs internal sizing to accommodate this many elements,
2593     * given the specified load factor.
2594     * @param loadFactor the load factor (table density) for
2595     * establishing the initial table size
2596     * @throws IllegalArgumentException if the initial capacity of
2597     * elements is negative or the load factor is nonpositive
2598     *
2599     * @since 1.6
2600     */
2601     public ConcurrentHashMap(int initialCapacity, float loadFactor) {
2602     this(initialCapacity, loadFactor, 1);
2603     }
2604    
2605     /**
2606     * Creates a new, empty map with an initial table size based on
2607     * the given number of elements ({@code initialCapacity}), table
2608     * density ({@code loadFactor}), and number of concurrently
2609     * updating threads ({@code concurrencyLevel}).
2610     *
2611     * @param initialCapacity the initial capacity. The implementation
2612     * performs internal sizing to accommodate this many elements,
2613     * given the specified load factor.
2614     * @param loadFactor the load factor (table density) for
2615     * establishing the initial table size
2616     * @param concurrencyLevel the estimated number of concurrently
2617     * updating threads. The implementation may use this value as
2618     * a sizing hint.
2619     * @throws IllegalArgumentException if the initial capacity is
2620     * negative or the load factor or concurrencyLevel are
2621     * nonpositive
2622     */
2623     public ConcurrentHashMap(int initialCapacity,
2624     float loadFactor, int concurrencyLevel) {
2625     if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
2626     throw new IllegalArgumentException();
2627     if (initialCapacity < concurrencyLevel) // Use at least as many bins
2628     initialCapacity = concurrencyLevel; // as estimated threads
2629     long size = (long)(1.0 + (long)initialCapacity / loadFactor);
2630     int cap = (size >= (long)MAXIMUM_CAPACITY) ?
2631     MAXIMUM_CAPACITY : tableSizeFor((int)size);
2632     this.counter = new LongAdder();
2633     this.sizeCtl = cap;
2634     }
2635    
2636     /**
2637 dl 1.137 * Creates a new {@link Set} backed by a ConcurrentHashMap
2638     * from the given type to {@code Boolean.TRUE}.
2639     *
2640     * @return the new set
2641     */
2642     public static <K> KeySetView<K,Boolean> newKeySet() {
2643     return new KeySetView<K,Boolean>(new ConcurrentHashMap<K,Boolean>(),
2644     Boolean.TRUE);
2645     }
2646    
2647     /**
2648     * Creates a new {@link Set} backed by a ConcurrentHashMap
2649     * from the given type to {@code Boolean.TRUE}.
2650     *
2651     * @param initialCapacity The implementation performs internal
2652     * sizing to accommodate this many elements.
2653     * @throws IllegalArgumentException if the initial capacity of
2654     * elements is negative
2655     * @return the new set
2656     */
2657     public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2658     return new KeySetView<K,Boolean>(new ConcurrentHashMap<K,Boolean>(initialCapacity),
2659     Boolean.TRUE);
2660     }
2661    
2662     /**
2663 dl 1.119 * {@inheritDoc}
2664     */
2665     public boolean isEmpty() {
2666     return counter.sum() <= 0L; // ignore transient negative values
2667     }
2668    
2669     /**
2670     * {@inheritDoc}
2671     */
2672     public int size() {
2673     long n = counter.sum();
2674     return ((n < 0L) ? 0 :
2675     (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
2676     (int)n);
2677     }
2678    
2679     /**
2680     * Returns the number of mappings. This method should be used
2681     * instead of {@link #size} because a ConcurrentHashMap may
2682     * contain more mappings than can be represented as an int. The
2683     * value returned is a snapshot; the actual count may differ if
2684 dl 1.127 * there are ongoing concurrent insertions or removals.
2685 dl 1.119 *
2686     * @return the number of mappings
2687     */
2688     public long mappingCount() {
2689     long n = counter.sum();
2690 dl 1.126 return (n < 0L) ? 0L : n; // ignore transient negative values
2691 dl 1.119 }
2692    
2693     /**
2694     * Returns the value to which the specified key is mapped,
2695     * or {@code null} if this map contains no mapping for the key.
2696     *
2697     * <p>More formally, if this map contains a mapping from a key
2698     * {@code k} to a value {@code v} such that {@code key.equals(k)},
2699     * then this method returns {@code v}; otherwise it returns
2700     * {@code null}. (There can be at most one such mapping.)
2701     *
2702     * @throws NullPointerException if the specified key is null
2703     */
2704 dl 1.128 @SuppressWarnings("unchecked") public V get(Object key) {
2705 dl 1.119 if (key == null)
2706     throw new NullPointerException();
2707     return (V)internalGet(key);
2708     }
2709    
2710     /**
2711 dl 1.129 * Returns the value to which the specified key is mapped,
2712 jsr166 1.133 * or the given defaultValue if this map contains no mapping for the key.
2713 dl 1.129 *
2714     * @param key the key
2715     * @param defaultValue the value to return if this map contains
2716 jsr166 1.134 * no mapping for the given key
2717 dl 1.129 * @return the mapping for the key, if present; else the defaultValue
2718     * @throws NullPointerException if the specified key is null
2719     */
2720     @SuppressWarnings("unchecked") public V getValueOrDefault(Object key, V defaultValue) {
2721     if (key == null)
2722     throw new NullPointerException();
2723     V v = (V) internalGet(key);
2724     return v == null ? defaultValue : v;
2725     }
2726    
2727     /**
2728 dl 1.119 * Tests if the specified object is a key in this table.
2729     *
2730     * @param key possible key
2731     * @return {@code true} if and only if the specified object
2732     * is a key in this table, as determined by the
2733     * {@code equals} method; {@code false} otherwise
2734     * @throws NullPointerException if the specified key is null
2735     */
2736     public boolean containsKey(Object key) {
2737     if (key == null)
2738     throw new NullPointerException();
2739     return internalGet(key) != null;
2740     }
2741    
2742     /**
2743     * Returns {@code true} if this map maps one or more keys to the
2744     * specified value. Note: This method may require a full traversal
2745     * of the map, and is much slower than method {@code containsKey}.
2746     *
2747     * @param value value whose presence in this map is to be tested
2748     * @return {@code true} if this map maps one or more keys to the
2749     * specified value
2750     * @throws NullPointerException if the specified value is null
2751     */
2752     public boolean containsValue(Object value) {
2753     if (value == null)
2754     throw new NullPointerException();
2755     Object v;
2756     Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
2757     while ((v = it.advance()) != null) {
2758     if (v == value || value.equals(v))
2759     return true;
2760     }
2761     return false;
2762     }
2763    
2764     /**
2765     * Legacy method testing if some key maps into the specified value
2766     * in this table. This method is identical in functionality to
2767     * {@link #containsValue}, and exists solely to ensure
2768     * full compatibility with class {@link java.util.Hashtable},
2769     * which supported this method prior to introduction of the
2770     * Java Collections framework.
2771     *
2772     * @param value a value to search for
2773     * @return {@code true} if and only if some key maps to the
2774     * {@code value} argument in this table as
2775     * determined by the {@code equals} method;
2776     * {@code false} otherwise
2777     * @throws NullPointerException if the specified value is null
2778     */
2779     public boolean contains(Object value) {
2780     return containsValue(value);
2781     }
2782    
2783     /**
2784     * Maps the specified key to the specified value in this table.
2785     * Neither the key nor the value can be null.
2786     *
2787     * <p> The value can be retrieved by calling the {@code get} method
2788     * with a key that is equal to the original key.
2789     *
2790     * @param key key with which the specified value is to be associated
2791     * @param value value to be associated with the specified key
2792     * @return the previous value associated with {@code key}, or
2793     * {@code null} if there was no mapping for {@code key}
2794     * @throws NullPointerException if the specified key or value is null
2795     */
2796 dl 1.128 @SuppressWarnings("unchecked") public V put(K key, V value) {
2797 dl 1.119 if (key == null || value == null)
2798     throw new NullPointerException();
2799     return (V)internalPut(key, value);
2800     }
2801    
2802     /**
2803     * {@inheritDoc}
2804     *
2805     * @return the previous value associated with the specified key,
2806     * or {@code null} if there was no mapping for the key
2807     * @throws NullPointerException if the specified key or value is null
2808     */
2809 dl 1.128 @SuppressWarnings("unchecked") public V putIfAbsent(K key, V value) {
2810 dl 1.119 if (key == null || value == null)
2811     throw new NullPointerException();
2812     return (V)internalPutIfAbsent(key, value);
2813     }
2814    
2815     /**
2816     * Copies all of the mappings from the specified map to this one.
2817     * These mappings replace any mappings that this map had for any of the
2818     * keys currently in the specified map.
2819     *
2820     * @param m mappings to be stored in this map
2821     */
2822     public void putAll(Map<? extends K, ? extends V> m) {
2823     internalPutAll(m);
2824     }
2825    
2826     /**
2827     * If the specified key is not already associated with a value,
2828     * computes its value using the given mappingFunction and enters
2829     * it into the map unless null. This is equivalent to
2830     * <pre> {@code
2831     * if (map.containsKey(key))
2832     * return map.get(key);
2833     * value = mappingFunction.apply(key);
2834     * if (value != null)
2835     * map.put(key, value);
2836     * return value;}</pre>
2837     *
2838     * except that the action is performed atomically. If the
2839     * function returns {@code null} no mapping is recorded. If the
2840     * function itself throws an (unchecked) exception, the exception
2841     * is rethrown to its caller, and no mapping is recorded. Some
2842     * attempted update operations on this map by other threads may be
2843     * blocked while computation is in progress, so the computation
2844     * should be short and simple, and must not attempt to update any
2845     * other mappings of this Map. The most appropriate usage is to
2846     * construct a new object serving as an initial mapped value, or
2847     * memoized result, as in:
2848     *
2849     * <pre> {@code
2850     * map.computeIfAbsent(key, new Fun<K, V>() {
2851     * public V map(K k) { return new Value(f(k)); }});}</pre>
2852     *
2853     * @param key key with which the specified value is to be associated
2854     * @param mappingFunction the function to compute a value
2855     * @return the current (existing or computed) value associated with
2856 jsr166 1.134 * the specified key, or null if the computed value is null
2857 dl 1.119 * @throws NullPointerException if the specified key or mappingFunction
2858     * is null
2859     * @throws IllegalStateException if the computation detectably
2860     * attempts a recursive update to this map that would
2861     * otherwise never complete
2862     * @throws RuntimeException or Error if the mappingFunction does so,
2863     * in which case the mapping is left unestablished
2864     */
2865 dl 1.128 @SuppressWarnings("unchecked") public V computeIfAbsent
2866     (K key, Fun<? super K, ? extends V> mappingFunction) {
2867 dl 1.119 if (key == null || mappingFunction == null)
2868     throw new NullPointerException();
2869     return (V)internalComputeIfAbsent(key, mappingFunction);
2870     }
2871    
2872     /**
2873     * If the given key is present, computes a new mapping value given a key and
2874     * its current mapped value. This is equivalent to
2875     * <pre> {@code
2876     * if (map.containsKey(key)) {
2877     * value = remappingFunction.apply(key, map.get(key));
2878     * if (value != null)
2879     * map.put(key, value);
2880     * else
2881     * map.remove(key);
2882     * }
2883     * }</pre>
2884     *
2885     * except that the action is performed atomically. If the
2886     * function returns {@code null}, the mapping is removed. If the
2887     * function itself throws an (unchecked) exception, the exception
2888     * is rethrown to its caller, and the current mapping is left
2889     * unchanged. Some attempted update operations on this map by
2890     * other threads may be blocked while computation is in progress,
2891     * so the computation should be short and simple, and must not
2892     * attempt to update any other mappings of this Map. For example,
2893     * to either create or append new messages to a value mapping:
2894     *
2895     * @param key key with which the specified value is to be associated
2896     * @param remappingFunction the function to compute a value
2897 jsr166 1.123 * @return the new value associated with the specified key, or null if none
2898 dl 1.119 * @throws NullPointerException if the specified key or remappingFunction
2899     * is null
2900     * @throws IllegalStateException if the computation detectably
2901     * attempts a recursive update to this map that would
2902     * otherwise never complete
2903     * @throws RuntimeException or Error if the remappingFunction does so,
2904     * in which case the mapping is unchanged
2905     */
2906 dl 1.128 @SuppressWarnings("unchecked") public V computeIfPresent
2907     (K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
2908 dl 1.119 if (key == null || remappingFunction == null)
2909     throw new NullPointerException();
2910     return (V)internalCompute(key, true, remappingFunction);
2911     }
2912    
2913     /**
2914     * Computes a new mapping value given a key and
2915     * its current mapped value (or {@code null} if there is no current
2916     * mapping). This is equivalent to
2917     * <pre> {@code
2918     * value = remappingFunction.apply(key, map.get(key));
2919     * if (value != null)
2920     * map.put(key, value);
2921     * else
2922     * map.remove(key);
2923     * }</pre>
2924     *
2925     * except that the action is performed atomically. If the
2926     * function returns {@code null}, the mapping is removed. If the
2927     * function itself throws an (unchecked) exception, the exception
2928     * is rethrown to its caller, and the current mapping is left
2929     * unchanged. Some attempted update operations on this map by
2930     * other threads may be blocked while computation is in progress,
2931     * so the computation should be short and simple, and must not
2932     * attempt to update any other mappings of this Map. For example,
2933     * to either create or append new messages to a value mapping:
2934     *
2935     * <pre> {@code
2936     * Map<Key, String> map = ...;
2937     * final String msg = ...;
2938     * map.compute(key, new BiFun<Key, String, String>() {
2939     * public String apply(Key k, String v) {
2940     * return (v == null) ? msg : v + msg;});}}</pre>
2941     *
2942     * @param key key with which the specified value is to be associated
2943     * @param remappingFunction the function to compute a value
2944 jsr166 1.123 * @return the new value associated with the specified key, or null if none
2945 dl 1.119 * @throws NullPointerException if the specified key or remappingFunction
2946     * is null
2947     * @throws IllegalStateException if the computation detectably
2948     * attempts a recursive update to this map that would
2949     * otherwise never complete
2950     * @throws RuntimeException or Error if the remappingFunction does so,
2951     * in which case the mapping is unchanged
2952     */
2953 dl 1.128 @SuppressWarnings("unchecked") public V compute
2954     (K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
2955 dl 1.119 if (key == null || remappingFunction == null)
2956     throw new NullPointerException();
2957     return (V)internalCompute(key, false, remappingFunction);
2958     }
2959    
2960     /**
2961     * If the specified key is not already associated
2962     * with a value, associate it with the given value.
2963     * Otherwise, replace the value with the results of
2964     * the given remapping function. This is equivalent to:
2965     * <pre> {@code
2966     * if (!map.containsKey(key))
2967     * map.put(value);
2968     * else {
2969     * newValue = remappingFunction.apply(map.get(key), value);
2970     * if (value != null)
2971     * map.put(key, value);
2972     * else
2973     * map.remove(key);
2974     * }
2975     * }</pre>
2976     * except that the action is performed atomically. If the
2977     * function returns {@code null}, the mapping is removed. If the
2978     * function itself throws an (unchecked) exception, the exception
2979     * is rethrown to its caller, and the current mapping is left
2980     * unchanged. Some attempted update operations on this map by
2981     * other threads may be blocked while computation is in progress,
2982     * so the computation should be short and simple, and must not
2983     * attempt to update any other mappings of this Map.
2984     */
2985 dl 1.128 @SuppressWarnings("unchecked") public V merge
2986     (K key, V value, BiFun<? super V, ? super V, ? extends V> remappingFunction) {
2987 dl 1.119 if (key == null || value == null || remappingFunction == null)
2988     throw new NullPointerException();
2989     return (V)internalMerge(key, value, remappingFunction);
2990     }
2991    
2992     /**
2993     * Removes the key (and its corresponding value) from this map.
2994     * This method does nothing if the key is not in the map.
2995     *
2996     * @param key the key that needs to be removed
2997     * @return the previous value associated with {@code key}, or
2998     * {@code null} if there was no mapping for {@code key}
2999     * @throws NullPointerException if the specified key is null
3000     */
3001 dl 1.128 @SuppressWarnings("unchecked") public V remove(Object key) {
3002 dl 1.119 if (key == null)
3003     throw new NullPointerException();
3004     return (V)internalReplace(key, null, null);
3005     }
3006    
3007     /**
3008     * {@inheritDoc}
3009     *
3010     * @throws NullPointerException if the specified key is null
3011     */
3012     public boolean remove(Object key, Object value) {
3013     if (key == null)
3014     throw new NullPointerException();
3015     if (value == null)
3016     return false;
3017     return internalReplace(key, null, value) != null;
3018 tim 1.1 }
3019 dl 1.31
3020     /**
3021 jsr166 1.68 * {@inheritDoc}
3022     *
3023     * @throws NullPointerException if any of the arguments are null
3024 dl 1.31 */
3025     public boolean replace(K key, V oldValue, V newValue) {
3026 dl 1.119 if (key == null || oldValue == null || newValue == null)
3027 dl 1.31 throw new NullPointerException();
3028 dl 1.119 return internalReplace(key, newValue, oldValue) != null;
3029 dl 1.32 }
3030    
3031     /**
3032 jsr166 1.68 * {@inheritDoc}
3033     *
3034     * @return the previous value associated with the specified key,
3035 dl 1.119 * or {@code null} if there was no mapping for the key
3036 jsr166 1.68 * @throws NullPointerException if the specified key or value is null
3037 dl 1.32 */
3038 dl 1.128 @SuppressWarnings("unchecked") public V replace(K key, V value) {
3039 dl 1.119 if (key == null || value == null)
3040 dl 1.32 throw new NullPointerException();
3041 dl 1.119 return (V)internalReplace(key, value, null);
3042 dl 1.31 }
3043    
3044 tim 1.1 /**
3045 jsr166 1.68 * Removes all of the mappings from this map.
3046 tim 1.1 */
3047     public void clear() {
3048 dl 1.119 internalClear();
3049 tim 1.1 }
3050    
3051     /**
3052 jsr166 1.68 * Returns a {@link Set} view of the keys contained in this map.
3053     * The set is backed by the map, so changes to the map are
3054 dl 1.137 * reflected in the set, and vice-versa.
3055     *
3056     * @return the set view
3057     */
3058     public KeySetView<K,V> keySet() {
3059     KeySetView<K,V> ks = keySet;
3060     return (ks != null) ? ks : (keySet = new KeySetView<K,V>(this, null));
3061     }
3062    
3063     /**
3064     * Returns a {@link Set} view of the keys in this map, using the
3065     * given common mapped value for any additions (i.e., {@link
3066     * Collection#add} and {@link Collection#addAll}). This is of
3067     * course only appropriate if it is acceptable to use the same
3068     * value for all additions from this view.
3069 jsr166 1.68 *
3070 dl 1.137 * @param mappedValue the mapped value to use for any
3071     * additions.
3072     * @return the set view
3073     * @throws NullPointerException if the mappedValue is null
3074 tim 1.1 */
3075 dl 1.137 public KeySetView<K,V> keySet(V mappedValue) {
3076     if (mappedValue == null)
3077     throw new NullPointerException();
3078     return new KeySetView<K,V>(this, mappedValue);
3079 tim 1.1 }
3080    
3081     /**
3082 jsr166 1.68 * Returns a {@link Collection} view of the values contained in this map.
3083     * The collection is backed by the map, so changes to the map are
3084     * reflected in the collection, and vice-versa. The collection
3085     * supports element removal, which removes the corresponding
3086 dl 1.119 * mapping from this map, via the {@code Iterator.remove},
3087     * {@code Collection.remove}, {@code removeAll},
3088     * {@code retainAll}, and {@code clear} operations. It does not
3089     * support the {@code add} or {@code addAll} operations.
3090 jsr166 1.68 *
3091 dl 1.119 * <p>The view's {@code iterator} is a "weakly consistent" iterator
3092 jsr166 1.68 * that will never throw {@link ConcurrentModificationException},
3093 dl 1.14 * and guarantees to traverse elements as they existed upon
3094     * construction of the iterator, and may (but is not guaranteed to)
3095     * reflect any modifications subsequent to construction.
3096 tim 1.1 */
3097 dl 1.119 public Collection<V> values() {
3098     Values<K,V> vs = values;
3099     return (vs != null) ? vs : (values = new Values<K,V>(this));
3100     }
3101    
3102     /**
3103     * Returns a {@link Set} view of the mappings contained in this map.
3104     * The set is backed by the map, so changes to the map are
3105     * reflected in the set, and vice-versa. The set supports element
3106     * removal, which removes the corresponding mapping from the map,
3107     * via the {@code Iterator.remove}, {@code Set.remove},
3108     * {@code removeAll}, {@code retainAll}, and {@code clear}
3109     * operations. It does not support the {@code add} or
3110     * {@code addAll} operations.
3111     *
3112     * <p>The view's {@code iterator} is a "weakly consistent" iterator
3113     * that will never throw {@link ConcurrentModificationException},
3114     * and guarantees to traverse elements as they existed upon
3115     * construction of the iterator, and may (but is not guaranteed to)
3116     * reflect any modifications subsequent to construction.
3117     */
3118     public Set<Map.Entry<K,V>> entrySet() {
3119     EntrySet<K,V> es = entrySet;
3120     return (es != null) ? es : (entrySet = new EntrySet<K,V>(this));
3121     }
3122    
3123     /**
3124     * Returns an enumeration of the keys in this table.
3125     *
3126     * @return an enumeration of the keys in this table
3127     * @see #keySet()
3128     */
3129     public Enumeration<K> keys() {
3130     return new KeyIterator<K,V>(this);
3131     }
3132    
3133     /**
3134     * Returns an enumeration of the values in this table.
3135     *
3136     * @return an enumeration of the values in this table
3137     * @see #values()
3138     */
3139     public Enumeration<V> elements() {
3140     return new ValueIterator<K,V>(this);
3141     }
3142    
3143     /**
3144 jsr166 1.122 * Returns a partitionable iterator of the keys in this map.
3145 dl 1.119 *
3146 jsr166 1.122 * @return a partitionable iterator of the keys in this map
3147 dl 1.119 */
3148     public Spliterator<K> keySpliterator() {
3149     return new KeyIterator<K,V>(this);
3150     }
3151    
3152     /**
3153 jsr166 1.122 * Returns a partitionable iterator of the values in this map.
3154 dl 1.119 *
3155 jsr166 1.122 * @return a partitionable iterator of the values in this map
3156 dl 1.119 */
3157     public Spliterator<V> valueSpliterator() {
3158     return new ValueIterator<K,V>(this);
3159     }
3160    
3161     /**
3162 jsr166 1.122 * Returns a partitionable iterator of the entries in this map.
3163 dl 1.119 *
3164 jsr166 1.122 * @return a partitionable iterator of the entries in this map
3165 dl 1.119 */
3166     public Spliterator<Map.Entry<K,V>> entrySpliterator() {
3167     return new EntryIterator<K,V>(this);
3168     }
3169    
3170     /**
3171     * Returns the hash code value for this {@link Map}, i.e.,
3172     * the sum of, for each key-value pair in the map,
3173     * {@code key.hashCode() ^ value.hashCode()}.
3174     *
3175     * @return the hash code value for this map
3176     */
3177     public int hashCode() {
3178     int h = 0;
3179     Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3180     Object v;
3181     while ((v = it.advance()) != null) {
3182     h += it.nextKey.hashCode() ^ v.hashCode();
3183     }
3184     return h;
3185     }
3186    
3187     /**
3188     * Returns a string representation of this map. The string
3189     * representation consists of a list of key-value mappings (in no
3190     * particular order) enclosed in braces ("{@code {}}"). Adjacent
3191     * mappings are separated by the characters {@code ", "} (comma
3192     * and space). Each key-value mapping is rendered as the key
3193     * followed by an equals sign ("{@code =}") followed by the
3194     * associated value.
3195     *
3196     * @return a string representation of this map
3197     */
3198     public String toString() {
3199     Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3200     StringBuilder sb = new StringBuilder();
3201     sb.append('{');
3202     Object v;
3203     if ((v = it.advance()) != null) {
3204     for (;;) {
3205     Object k = it.nextKey;
3206     sb.append(k == this ? "(this Map)" : k);
3207     sb.append('=');
3208     sb.append(v == this ? "(this Map)" : v);
3209     if ((v = it.advance()) == null)
3210     break;
3211     sb.append(',').append(' ');
3212     }
3213     }
3214     return sb.append('}').toString();
3215     }
3216    
3217     /**
3218     * Compares the specified object with this map for equality.
3219     * Returns {@code true} if the given object is a map with the same
3220     * mappings as this map. This operation may return misleading
3221     * results if either map is concurrently modified during execution
3222     * of this method.
3223     *
3224     * @param o object to be compared for equality with this map
3225     * @return {@code true} if the specified object is equal to this map
3226     */
3227     public boolean equals(Object o) {
3228     if (o != this) {
3229     if (!(o instanceof Map))
3230     return false;
3231     Map<?,?> m = (Map<?,?>) o;
3232     Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3233     Object val;
3234     while ((val = it.advance()) != null) {
3235     Object v = m.get(it.nextKey);
3236     if (v == null || (v != val && !v.equals(val)))
3237     return false;
3238     }
3239     for (Map.Entry<?,?> e : m.entrySet()) {
3240     Object mk, mv, v;
3241     if ((mk = e.getKey()) == null ||
3242     (mv = e.getValue()) == null ||
3243     (v = internalGet(mk)) == null ||
3244     (mv != v && !mv.equals(v)))
3245     return false;
3246     }
3247     }
3248     return true;
3249     }
3250    
3251     /* ----------------Iterators -------------- */
3252    
3253 dl 1.128 @SuppressWarnings("serial") static final class KeyIterator<K,V> extends Traverser<K,V,Object>
3254 dl 1.119 implements Spliterator<K>, Enumeration<K> {
3255     KeyIterator(ConcurrentHashMap<K, V> map) { super(map); }
3256 dl 1.128 KeyIterator(Traverser<K,V,Object> it) {
3257     super(it);
3258 dl 1.119 }
3259     public KeyIterator<K,V> split() {
3260 dl 1.137 if (nextKey != null)
3261 dl 1.119 throw new IllegalStateException();
3262 dl 1.128 return new KeyIterator<K,V>(this);
3263 dl 1.119 }
3264 dl 1.128 @SuppressWarnings("unchecked") public final K next() {
3265 dl 1.119 if (nextVal == null && advance() == null)
3266     throw new NoSuchElementException();
3267     Object k = nextKey;
3268     nextVal = null;
3269     return (K) k;
3270     }
3271    
3272     public final K nextElement() { return next(); }
3273     }
3274    
3275 dl 1.128 @SuppressWarnings("serial") static final class ValueIterator<K,V> extends Traverser<K,V,Object>
3276 dl 1.119 implements Spliterator<V>, Enumeration<V> {
3277     ValueIterator(ConcurrentHashMap<K, V> map) { super(map); }
3278 dl 1.128 ValueIterator(Traverser<K,V,Object> it) {
3279     super(it);
3280 dl 1.119 }
3281     public ValueIterator<K,V> split() {
3282 dl 1.137 if (nextKey != null)
3283 dl 1.119 throw new IllegalStateException();
3284 dl 1.128 return new ValueIterator<K,V>(this);
3285 dl 1.119 }
3286    
3287 dl 1.128 @SuppressWarnings("unchecked") public final V next() {
3288 dl 1.119 Object v;
3289     if ((v = nextVal) == null && (v = advance()) == null)
3290     throw new NoSuchElementException();
3291     nextVal = null;
3292     return (V) v;
3293     }
3294    
3295     public final V nextElement() { return next(); }
3296     }
3297    
3298 dl 1.128 @SuppressWarnings("serial") static final class EntryIterator<K,V> extends Traverser<K,V,Object>
3299 dl 1.119 implements Spliterator<Map.Entry<K,V>> {
3300     EntryIterator(ConcurrentHashMap<K, V> map) { super(map); }
3301 dl 1.128 EntryIterator(Traverser<K,V,Object> it) {
3302     super(it);
3303 dl 1.119 }
3304     public EntryIterator<K,V> split() {
3305 dl 1.137 if (nextKey != null)
3306 dl 1.119 throw new IllegalStateException();
3307 dl 1.128 return new EntryIterator<K,V>(this);
3308 dl 1.119 }
3309    
3310 dl 1.128 @SuppressWarnings("unchecked") public final Map.Entry<K,V> next() {
3311 dl 1.119 Object v;
3312     if ((v = nextVal) == null && (v = advance()) == null)
3313     throw new NoSuchElementException();
3314     Object k = nextKey;
3315     nextVal = null;
3316     return new MapEntry<K,V>((K)k, (V)v, map);
3317     }
3318     }
3319    
3320     /**
3321     * Exported Entry for iterators
3322     */
3323     static final class MapEntry<K,V> implements Map.Entry<K, V> {
3324     final K key; // non-null
3325     V val; // non-null
3326     final ConcurrentHashMap<K, V> map;
3327     MapEntry(K key, V val, ConcurrentHashMap<K, V> map) {
3328     this.key = key;
3329     this.val = val;
3330     this.map = map;
3331     }
3332     public final K getKey() { return key; }
3333     public final V getValue() { return val; }
3334     public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
3335     public final String toString(){ return key + "=" + val; }
3336    
3337     public final boolean equals(Object o) {
3338     Object k, v; Map.Entry<?,?> e;
3339     return ((o instanceof Map.Entry) &&
3340     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3341     (v = e.getValue()) != null &&
3342     (k == key || k.equals(key)) &&
3343     (v == val || v.equals(val)));
3344     }
3345    
3346     /**
3347     * Sets our entry's value and writes through to the map. The
3348     * value to return is somewhat arbitrary here. Since we do not
3349     * necessarily track asynchronous changes, the most recent
3350     * "previous" value could be different from what we return (or
3351     * could even have been removed in which case the put will
3352     * re-establish). We do not and cannot guarantee more.
3353     */
3354     public final V setValue(V value) {
3355     if (value == null) throw new NullPointerException();
3356     V v = val;
3357     val = value;
3358     map.put(key, value);
3359     return v;
3360     }
3361     }
3362    
3363     /* ----------------Views -------------- */
3364    
3365     /**
3366     * Base class for views.
3367     */
3368     static abstract class CHMView<K, V> {
3369     final ConcurrentHashMap<K, V> map;
3370     CHMView(ConcurrentHashMap<K, V> map) { this.map = map; }
3371     public final int size() { return map.size(); }
3372     public final boolean isEmpty() { return map.isEmpty(); }
3373     public final void clear() { map.clear(); }
3374    
3375     // implementations below rely on concrete classes supplying these
3376     abstract public Iterator<?> iterator();
3377     abstract public boolean contains(Object o);
3378     abstract public boolean remove(Object o);
3379    
3380     private static final String oomeMsg = "Required array size too large";
3381    
3382     public final Object[] toArray() {
3383     long sz = map.mappingCount();
3384     if (sz > (long)(MAX_ARRAY_SIZE))
3385     throw new OutOfMemoryError(oomeMsg);
3386     int n = (int)sz;
3387     Object[] r = new Object[n];
3388     int i = 0;
3389     Iterator<?> it = iterator();
3390     while (it.hasNext()) {
3391     if (i == n) {
3392     if (n >= MAX_ARRAY_SIZE)
3393     throw new OutOfMemoryError(oomeMsg);
3394     if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
3395     n = MAX_ARRAY_SIZE;
3396     else
3397     n += (n >>> 1) + 1;
3398     r = Arrays.copyOf(r, n);
3399     }
3400     r[i++] = it.next();
3401     }
3402     return (i == n) ? r : Arrays.copyOf(r, i);
3403     }
3404    
3405 dl 1.128 @SuppressWarnings("unchecked") public final <T> T[] toArray(T[] a) {
3406 dl 1.119 long sz = map.mappingCount();
3407     if (sz > (long)(MAX_ARRAY_SIZE))
3408     throw new OutOfMemoryError(oomeMsg);
3409     int m = (int)sz;
3410     T[] r = (a.length >= m) ? a :
3411     (T[])java.lang.reflect.Array
3412     .newInstance(a.getClass().getComponentType(), m);
3413     int n = r.length;
3414     int i = 0;
3415     Iterator<?> it = iterator();
3416     while (it.hasNext()) {
3417     if (i == n) {
3418     if (n >= MAX_ARRAY_SIZE)
3419     throw new OutOfMemoryError(oomeMsg);
3420     if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
3421     n = MAX_ARRAY_SIZE;
3422     else
3423     n += (n >>> 1) + 1;
3424     r = Arrays.copyOf(r, n);
3425     }
3426     r[i++] = (T)it.next();
3427     }
3428     if (a == r && i < n) {
3429     r[i] = null; // null-terminate
3430     return r;
3431     }
3432     return (i == n) ? r : Arrays.copyOf(r, i);
3433     }
3434    
3435     public final int hashCode() {
3436     int h = 0;
3437     for (Iterator<?> it = iterator(); it.hasNext();)
3438     h += it.next().hashCode();
3439     return h;
3440     }
3441    
3442     public final String toString() {
3443     StringBuilder sb = new StringBuilder();
3444     sb.append('[');
3445     Iterator<?> it = iterator();
3446     if (it.hasNext()) {
3447     for (;;) {
3448     Object e = it.next();
3449     sb.append(e == this ? "(this Collection)" : e);
3450     if (!it.hasNext())
3451     break;
3452     sb.append(',').append(' ');
3453     }
3454     }
3455     return sb.append(']').toString();
3456     }
3457    
3458     public final boolean containsAll(Collection<?> c) {
3459     if (c != this) {
3460     for (Iterator<?> it = c.iterator(); it.hasNext();) {
3461     Object e = it.next();
3462     if (e == null || !contains(e))
3463     return false;
3464     }
3465     }
3466     return true;
3467     }
3468    
3469     public final boolean removeAll(Collection<?> c) {
3470     boolean modified = false;
3471     for (Iterator<?> it = iterator(); it.hasNext();) {
3472     if (c.contains(it.next())) {
3473     it.remove();
3474     modified = true;
3475     }
3476     }
3477     return modified;
3478     }
3479    
3480     public final boolean retainAll(Collection<?> c) {
3481     boolean modified = false;
3482     for (Iterator<?> it = iterator(); it.hasNext();) {
3483     if (!c.contains(it.next())) {
3484     it.remove();
3485     modified = true;
3486     }
3487     }
3488     return modified;
3489     }
3490    
3491     }
3492    
3493     static final class Values<K,V> extends CHMView<K,V>
3494     implements Collection<V> {
3495     Values(ConcurrentHashMap<K, V> map) { super(map); }
3496     public final boolean contains(Object o) { return map.containsValue(o); }
3497     public final boolean remove(Object o) {
3498     if (o != null) {
3499     Iterator<V> it = new ValueIterator<K,V>(map);
3500     while (it.hasNext()) {
3501     if (o.equals(it.next())) {
3502     it.remove();
3503     return true;
3504     }
3505     }
3506     }
3507     return false;
3508     }
3509     public final Iterator<V> iterator() {
3510     return new ValueIterator<K,V>(map);
3511     }
3512     public final boolean add(V e) {
3513     throw new UnsupportedOperationException();
3514     }
3515     public final boolean addAll(Collection<? extends V> c) {
3516     throw new UnsupportedOperationException();
3517     }
3518    
3519     }
3520    
3521     static final class EntrySet<K,V> extends CHMView<K,V>
3522     implements Set<Map.Entry<K,V>> {
3523     EntrySet(ConcurrentHashMap<K, V> map) { super(map); }
3524     public final boolean contains(Object o) {
3525     Object k, v, r; Map.Entry<?,?> e;
3526     return ((o instanceof Map.Entry) &&
3527     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3528     (r = map.get(k)) != null &&
3529     (v = e.getValue()) != null &&
3530     (v == r || v.equals(r)));
3531     }
3532     public final boolean remove(Object o) {
3533     Object k, v; Map.Entry<?,?> e;
3534     return ((o instanceof Map.Entry) &&
3535     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3536     (v = e.getValue()) != null &&
3537     map.remove(k, v));
3538     }
3539     public final Iterator<Map.Entry<K,V>> iterator() {
3540     return new EntryIterator<K,V>(map);
3541     }
3542     public final boolean add(Entry<K,V> e) {
3543     throw new UnsupportedOperationException();
3544     }
3545     public final boolean addAll(Collection<? extends Entry<K,V>> c) {
3546     throw new UnsupportedOperationException();
3547     }
3548     public boolean equals(Object o) {
3549     Set<?> c;
3550     return ((o instanceof Set) &&
3551     ((c = (Set<?>)o) == this ||
3552     (containsAll(c) && c.containsAll(this))));
3553     }
3554     }
3555    
3556     /* ---------------- Serialization Support -------------- */
3557    
3558     /**
3559     * Stripped-down version of helper class used in previous version,
3560     * declared for the sake of serialization compatibility
3561     */
3562     static class Segment<K,V> implements Serializable {
3563     private static final long serialVersionUID = 2249069246763182397L;
3564     final float loadFactor;
3565     Segment(float lf) { this.loadFactor = lf; }
3566     }
3567    
3568     /**
3569     * Saves the state of the {@code ConcurrentHashMap} instance to a
3570     * stream (i.e., serializes it).
3571     * @param s the stream
3572     * @serialData
3573     * the key (Object) and value (Object)
3574     * for each key-value mapping, followed by a null pair.
3575     * The key-value mappings are emitted in no particular order.
3576     */
3577 dl 1.128 @SuppressWarnings("unchecked") private void writeObject(java.io.ObjectOutputStream s)
3578 dl 1.119 throws java.io.IOException {
3579     if (segments == null) { // for serialization compatibility
3580     segments = (Segment<K,V>[])
3581     new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
3582     for (int i = 0; i < segments.length; ++i)
3583     segments[i] = new Segment<K,V>(LOAD_FACTOR);
3584     }
3585     s.defaultWriteObject();
3586     Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3587     Object v;
3588     while ((v = it.advance()) != null) {
3589     s.writeObject(it.nextKey);
3590     s.writeObject(v);
3591     }
3592     s.writeObject(null);
3593     s.writeObject(null);
3594     segments = null; // throw away
3595     }
3596    
3597     /**
3598     * Reconstitutes the instance from a stream (that is, deserializes it).
3599     * @param s the stream
3600     */
3601 dl 1.128 @SuppressWarnings("unchecked") private void readObject(java.io.ObjectInputStream s)
3602 dl 1.119 throws java.io.IOException, ClassNotFoundException {
3603     s.defaultReadObject();
3604     this.segments = null; // unneeded
3605     // initialize transient final field
3606     UNSAFE.putObjectVolatile(this, counterOffset, new LongAdder());
3607    
3608     // Create all nodes, then place in table once size is known
3609     long size = 0L;
3610     Node p = null;
3611     for (;;) {
3612     K k = (K) s.readObject();
3613     V v = (V) s.readObject();
3614     if (k != null && v != null) {
3615     int h = spread(k.hashCode());
3616     p = new Node(h, k, v, p);
3617     ++size;
3618     }
3619     else
3620     break;
3621     }
3622     if (p != null) {
3623     boolean init = false;
3624     int n;
3625     if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
3626     n = MAXIMUM_CAPACITY;
3627     else {
3628     int sz = (int)size;
3629     n = tableSizeFor(sz + (sz >>> 1) + 1);
3630     }
3631     int sc = sizeCtl;
3632     boolean collide = false;
3633     if (n > sc &&
3634     UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
3635     try {
3636     if (table == null) {
3637     init = true;
3638     Node[] tab = new Node[n];
3639     int mask = n - 1;
3640     while (p != null) {
3641     int j = p.hash & mask;
3642     Node next = p.next;
3643     Node q = p.next = tabAt(tab, j);
3644     setTabAt(tab, j, p);
3645     if (!collide && q != null && q.hash == p.hash)
3646     collide = true;
3647     p = next;
3648     }
3649     table = tab;
3650     counter.add(size);
3651     sc = n - (n >>> 2);
3652     }
3653     } finally {
3654     sizeCtl = sc;
3655     }
3656     if (collide) { // rescan and convert to TreeBins
3657     Node[] tab = table;
3658     for (int i = 0; i < tab.length; ++i) {
3659     int c = 0;
3660     for (Node e = tabAt(tab, i); e != null; e = e.next) {
3661     if (++c > TREE_THRESHOLD &&
3662     (e.key instanceof Comparable)) {
3663     replaceWithTreeBin(tab, i, e.key);
3664     break;
3665     }
3666     }
3667     }
3668     }
3669     }
3670     if (!init) { // Can only happen if unsafely published.
3671     while (p != null) {
3672     internalPut(p.key, p.val);
3673     p = p.next;
3674     }
3675     }
3676     }
3677     }
3678    
3679    
3680     // -------------------------------------------------------
3681    
3682     // Sams
3683     /** Interface describing a void action of one argument */
3684     public interface Action<A> { void apply(A a); }
3685     /** Interface describing a void action of two arguments */
3686     public interface BiAction<A,B> { void apply(A a, B b); }
3687     /** Interface describing a function of one argument */
3688     public interface Fun<A,T> { T apply(A a); }
3689     /** Interface describing a function of two arguments */
3690     public interface BiFun<A,B,T> { T apply(A a, B b); }
3691     /** Interface describing a function of no arguments */
3692     public interface Generator<T> { T apply(); }
3693     /** Interface describing a function mapping its argument to a double */
3694     public interface ObjectToDouble<A> { double apply(A a); }
3695     /** Interface describing a function mapping its argument to a long */
3696     public interface ObjectToLong<A> { long apply(A a); }
3697     /** Interface describing a function mapping its argument to an int */
3698     public interface ObjectToInt<A> {int apply(A a); }
3699     /** Interface describing a function mapping two arguments to a double */
3700     public interface ObjectByObjectToDouble<A,B> { double apply(A a, B b); }
3701     /** Interface describing a function mapping two arguments to a long */
3702     public interface ObjectByObjectToLong<A,B> { long apply(A a, B b); }
3703     /** Interface describing a function mapping two arguments to an int */
3704     public interface ObjectByObjectToInt<A,B> {int apply(A a, B b); }
3705     /** Interface describing a function mapping a double to a double */
3706     public interface DoubleToDouble { double apply(double a); }
3707     /** Interface describing a function mapping a long to a long */
3708     public interface LongToLong { long apply(long a); }
3709     /** Interface describing a function mapping an int to an int */
3710     public interface IntToInt { int apply(int a); }
3711     /** Interface describing a function mapping two doubles to a double */
3712     public interface DoubleByDoubleToDouble { double apply(double a, double b); }
3713     /** Interface describing a function mapping two longs to a long */
3714     public interface LongByLongToLong { long apply(long a, long b); }
3715     /** Interface describing a function mapping two ints to an int */
3716     public interface IntByIntToInt { int apply(int a, int b); }
3717    
3718    
3719     // -------------------------------------------------------
3720    
3721     /**
3722 dl 1.137 * Performs the given action for each (key, value).
3723 dl 1.119 *
3724 dl 1.137 * @param action the action
3725 dl 1.119 */
3726 dl 1.137 public void forEach(BiAction<K,V> action) {
3727     ForkJoinTasks.forEach
3728     (this, action).invoke();
3729 dl 1.119 }
3730    
3731     /**
3732 dl 1.137 * Performs the given action for each non-null transformation
3733     * of each (key, value).
3734     *
3735     * @param transformer a function returning the transformation
3736     * for an element, or null of there is no transformation (in
3737     * which case the action is not applied).
3738     * @param action the action
3739 dl 1.119 */
3740 dl 1.137 public <U> void forEach(BiFun<? super K, ? super V, ? extends U> transformer,
3741     Action<U> action) {
3742     ForkJoinTasks.forEach
3743     (this, transformer, action).invoke();
3744     }
3745    
3746     /**
3747     * Returns a non-null result from applying the given search
3748     * function on each (key, value), or null if none. Upon
3749     * success, further element processing is suppressed and the
3750     * results of any other parallel invocations of the search
3751     * function are ignored.
3752     *
3753     * @param searchFunction a function returning a non-null
3754     * result on success, else null
3755     * @return a non-null result from applying the given search
3756     * function on each (key, value), or null if none
3757     */
3758     public <U> U search(BiFun<? super K, ? super V, ? extends U> searchFunction) {
3759     return ForkJoinTasks.search
3760     (this, searchFunction).invoke();
3761     }
3762    
3763     /**
3764     * Returns the result of accumulating the given transformation
3765     * of all (key, value) pairs using the given reducer to
3766     * combine values, or null if none.
3767     *
3768     * @param transformer a function returning the transformation
3769     * for an element, or null of there is no transformation (in
3770     * which case it is not combined).
3771     * @param reducer a commutative associative combining function
3772     * @return the result of accumulating the given transformation
3773     * of all (key, value) pairs
3774     */
3775     public <U> U reduce(BiFun<? super K, ? super V, ? extends U> transformer,
3776     BiFun<? super U, ? super U, ? extends U> reducer) {
3777     return ForkJoinTasks.reduce
3778     (this, transformer, reducer).invoke();
3779     }
3780    
3781     /**
3782     * Returns the result of accumulating the given transformation
3783     * of all (key, value) pairs using the given reducer to
3784     * combine values, and the given basis as an identity value.
3785     *
3786     * @param transformer a function returning the transformation
3787     * for an element
3788     * @param basis the identity (initial default value) for the reduction
3789     * @param reducer a commutative associative combining function
3790     * @return the result of accumulating the given transformation
3791     * of all (key, value) pairs
3792     */
3793     public double reduceToDouble(ObjectByObjectToDouble<? super K, ? super V> transformer,
3794     double basis,
3795     DoubleByDoubleToDouble reducer) {
3796     return ForkJoinTasks.reduceToDouble
3797     (this, transformer, basis, reducer).invoke();
3798     }
3799 dl 1.119
3800 dl 1.137 /**
3801     * Returns the result of accumulating the given transformation
3802     * of all (key, value) pairs using the given reducer to
3803     * combine values, and the given basis as an identity value.
3804     *
3805     * @param transformer a function returning the transformation
3806     * for an element
3807     * @param basis the identity (initial default value) for the reduction
3808     * @param reducer a commutative associative combining function
3809     * @return the result of accumulating the given transformation
3810     * of all (key, value) pairs
3811     */
3812     public long reduceToLong(ObjectByObjectToLong<? super K, ? super V> transformer,
3813     long basis,
3814     LongByLongToLong reducer) {
3815     return ForkJoinTasks.reduceToLong
3816     (this, transformer, basis, reducer).invoke();
3817     }
3818    
3819     /**
3820     * Returns the result of accumulating the given transformation
3821     * of all (key, value) pairs using the given reducer to
3822     * combine values, and the given basis as an identity value.
3823     *
3824     * @param transformer a function returning the transformation
3825     * for an element
3826     * @param basis the identity (initial default value) for the reduction
3827     * @param reducer a commutative associative combining function
3828     * @return the result of accumulating the given transformation
3829     * of all (key, value) pairs
3830     */
3831     public int reduceToInt(ObjectByObjectToInt<? super K, ? super V> transformer,
3832     int basis,
3833     IntByIntToInt reducer) {
3834     return ForkJoinTasks.reduceToInt
3835     (this, transformer, basis, reducer).invoke();
3836     }
3837    
3838     /**
3839     * Performs the given action for each key.
3840     *
3841     * @param action the action
3842     */
3843     public void forEachKey(Action<K> action) {
3844     ForkJoinTasks.forEachKey
3845     (this, action).invoke();
3846     }
3847 dl 1.119
3848 dl 1.137 /**
3849     * Performs the given action for each non-null transformation
3850     * of each key.
3851     *
3852     * @param transformer a function returning the transformation
3853     * for an element, or null of there is no transformation (in
3854     * which case the action is not applied).
3855     * @param action the action
3856     */
3857     public <U> void forEachKey(Fun<? super K, ? extends U> transformer,
3858     Action<U> action) {
3859     ForkJoinTasks.forEachKey
3860     (this, transformer, action).invoke();
3861     }
3862 dl 1.119
3863 dl 1.137 /**
3864     * Returns a non-null result from applying the given search
3865     * function on each key, or null if none. Upon success,
3866     * further element processing is suppressed and the results of
3867     * any other parallel invocations of the search function are
3868     * ignored.
3869     *
3870     * @param searchFunction a function returning a non-null
3871     * result on success, else null
3872     * @return a non-null result from applying the given search
3873     * function on each key, or null if none
3874     */
3875     public <U> U searchKeys(Fun<? super K, ? extends U> searchFunction) {
3876     return ForkJoinTasks.searchKeys
3877     (this, searchFunction).invoke();
3878     }
3879 dl 1.119
3880 dl 1.137 /**
3881     * Returns the result of accumulating all keys using the given
3882     * reducer to combine values, or null if none.
3883     *
3884     * @param reducer a commutative associative combining function
3885     * @return the result of accumulating all keys using the given
3886     * reducer to combine values, or null if none
3887     */
3888     public K reduceKeys(BiFun<? super K, ? super K, ? extends K> reducer) {
3889     return ForkJoinTasks.reduceKeys
3890     (this, reducer).invoke();
3891     }
3892 dl 1.119
3893 dl 1.137 /**
3894     * Returns the result of accumulating the given transformation
3895     * of all keys using the given reducer to combine values, or
3896     * null if none.
3897     *
3898     * @param transformer a function returning the transformation
3899     * for an element, or null of there is no transformation (in
3900     * which case it is not combined).
3901     * @param reducer a commutative associative combining function
3902     * @return the result of accumulating the given transformation
3903     * of all keys
3904     */
3905     public <U> U reduceKeys(Fun<? super K, ? extends U> transformer,
3906 dl 1.119 BiFun<? super U, ? super U, ? extends U> reducer) {
3907 dl 1.137 return ForkJoinTasks.reduceKeys
3908     (this, transformer, reducer).invoke();
3909     }
3910 dl 1.119
3911 dl 1.137 /**
3912     * Returns the result of accumulating the given transformation
3913     * of all keys using the given reducer to combine values, and
3914     * the given basis as an identity value.
3915     *
3916     * @param transformer a function returning the transformation
3917     * for an element
3918     * @param basis the identity (initial default value) for the reduction
3919     * @param reducer a commutative associative combining function
3920     * @return the result of accumulating the given transformation
3921     * of all keys
3922     */
3923     public double reduceKeysToDouble(ObjectToDouble<? super K> transformer,
3924 dl 1.119 double basis,
3925     DoubleByDoubleToDouble reducer) {
3926 dl 1.137 return ForkJoinTasks.reduceKeysToDouble
3927     (this, transformer, basis, reducer).invoke();
3928     }
3929 dl 1.119
3930 dl 1.137 /**
3931     * Returns the result of accumulating the given transformation
3932     * of all keys using the given reducer to combine values, and
3933     * the given basis as an identity value.
3934     *
3935     * @param transformer a function returning the transformation
3936     * for an element
3937     * @param basis the identity (initial default value) for the reduction
3938     * @param reducer a commutative associative combining function
3939     * @return the result of accumulating the given transformation
3940     * of all keys
3941     */
3942     public long reduceKeysToLong(ObjectToLong<? super K> transformer,
3943 dl 1.119 long basis,
3944     LongByLongToLong reducer) {
3945 dl 1.137 return ForkJoinTasks.reduceKeysToLong
3946     (this, transformer, basis, reducer).invoke();
3947     }
3948 dl 1.119
3949 dl 1.137 /**
3950     * Returns the result of accumulating the given transformation
3951     * of all keys using the given reducer to combine values, and
3952     * the given basis as an identity value.
3953     *
3954     * @param transformer a function returning the transformation
3955     * for an element
3956     * @param basis the identity (initial default value) for the reduction
3957     * @param reducer a commutative associative combining function
3958     * @return the result of accumulating the given transformation
3959     * of all keys
3960     */
3961     public int reduceKeysToInt(ObjectToInt<? super K> transformer,
3962 dl 1.119 int basis,
3963     IntByIntToInt reducer) {
3964 dl 1.137 return ForkJoinTasks.reduceKeysToInt
3965     (this, transformer, basis, reducer).invoke();
3966     }
3967 dl 1.119
3968 dl 1.137 /**
3969     * Performs the given action for each value.
3970     *
3971     * @param action the action
3972     */
3973     public void forEachValue(Action<V> action) {
3974     ForkJoinTasks.forEachValue
3975     (this, action).invoke();
3976     }
3977 dl 1.119
3978 dl 1.137 /**
3979     * Performs the given action for each non-null transformation
3980     * of each value.
3981     *
3982     * @param transformer a function returning the transformation
3983     * for an element, or null of there is no transformation (in
3984     * which case the action is not applied).
3985     */
3986     public <U> void forEachValue(Fun<? super V, ? extends U> transformer,
3987     Action<U> action) {
3988     ForkJoinTasks.forEachValue
3989     (this, transformer, action).invoke();
3990     }
3991 dl 1.119
3992 dl 1.137 /**
3993     * Returns a non-null result from applying the given search
3994     * function on each value, or null if none. Upon success,
3995     * further element processing is suppressed and the results of
3996     * any other parallel invocations of the search function are
3997     * ignored.
3998     *
3999     * @param searchFunction a function returning a non-null
4000     * result on success, else null
4001     * @return a non-null result from applying the given search
4002     * function on each value, or null if none
4003     *
4004     */
4005     public <U> U searchValues(Fun<? super V, ? extends U> searchFunction) {
4006     return ForkJoinTasks.searchValues
4007     (this, searchFunction).invoke();
4008     }
4009 dl 1.119
4010 dl 1.137 /**
4011     * Returns the result of accumulating all values using the
4012     * given reducer to combine values, or null if none.
4013     *
4014     * @param reducer a commutative associative combining function
4015     * @return the result of accumulating all values
4016     */
4017     public V reduceValues(BiFun<? super V, ? super V, ? extends V> reducer) {
4018     return ForkJoinTasks.reduceValues
4019     (this, reducer).invoke();
4020     }
4021 dl 1.119
4022 dl 1.137 /**
4023     * Returns the result of accumulating the given transformation
4024     * of all values using the given reducer to combine values, or
4025     * null if none.
4026     *
4027     * @param transformer a function returning the transformation
4028     * for an element, or null of there is no transformation (in
4029     * which case it is not combined).
4030     * @param reducer a commutative associative combining function
4031     * @return the result of accumulating the given transformation
4032     * of all values
4033     */
4034     public <U> U reduceValues(Fun<? super V, ? extends U> transformer,
4035     BiFun<? super U, ? super U, ? extends U> reducer) {
4036     return ForkJoinTasks.reduceValues
4037     (this, transformer, reducer).invoke();
4038     }
4039 dl 1.119
4040 dl 1.137 /**
4041     * Returns the result of accumulating the given transformation
4042     * of all values using the given reducer to combine values,
4043     * and the given basis as an identity value.
4044     *
4045     * @param transformer a function returning the transformation
4046     * for an element
4047     * @param basis the identity (initial default value) for the reduction
4048     * @param reducer a commutative associative combining function
4049     * @return the result of accumulating the given transformation
4050     * of all values
4051     */
4052     public double reduceValuesToDouble(ObjectToDouble<? super V> transformer,
4053     double basis,
4054     DoubleByDoubleToDouble reducer) {
4055     return ForkJoinTasks.reduceValuesToDouble
4056     (this, transformer, basis, reducer).invoke();
4057     }
4058 dl 1.119
4059 dl 1.137 /**
4060     * Returns the result of accumulating the given transformation
4061     * of all values using the given reducer to combine values,
4062     * and the given basis as an identity value.
4063     *
4064     * @param transformer a function returning the transformation
4065     * for an element
4066     * @param basis the identity (initial default value) for the reduction
4067     * @param reducer a commutative associative combining function
4068     * @return the result of accumulating the given transformation
4069     * of all values
4070     */
4071     public long reduceValuesToLong(ObjectToLong<? super V> transformer,
4072     long basis,
4073     LongByLongToLong reducer) {
4074     return ForkJoinTasks.reduceValuesToLong
4075     (this, transformer, basis, reducer).invoke();
4076     }
4077 dl 1.119
4078 dl 1.137 /**
4079     * Returns the result of accumulating the given transformation
4080     * of all values using the given reducer to combine values,
4081     * and the given basis as an identity value.
4082     *
4083     * @param transformer a function returning the transformation
4084     * for an element
4085     * @param basis the identity (initial default value) for the reduction
4086     * @param reducer a commutative associative combining function
4087     * @return the result of accumulating the given transformation
4088     * of all values
4089     */
4090     public int reduceValuesToInt(ObjectToInt<? super V> transformer,
4091     int basis,
4092     IntByIntToInt reducer) {
4093     return ForkJoinTasks.reduceValuesToInt
4094     (this, transformer, basis, reducer).invoke();
4095     }
4096 dl 1.119
4097 dl 1.137 /**
4098     * Performs the given action for each entry.
4099     *
4100     * @param action the action
4101     */
4102     public void forEachEntry(Action<Map.Entry<K,V>> action) {
4103     ForkJoinTasks.forEachEntry
4104     (this, action).invoke();
4105     }
4106 dl 1.119
4107 dl 1.137 /**
4108     * Performs the given action for each non-null transformation
4109     * of each entry.
4110     *
4111     * @param transformer a function returning the transformation
4112     * for an element, or null of there is no transformation (in
4113     * which case the action is not applied).
4114     * @param action the action
4115     */
4116     public <U> void forEachEntry(Fun<Map.Entry<K,V>, ? extends U> transformer,
4117     Action<U> action) {
4118     ForkJoinTasks.forEachEntry
4119     (this, transformer, action).invoke();
4120     }
4121 dl 1.119
4122 dl 1.137 /**
4123     * Returns a non-null result from applying the given search
4124     * function on each entry, or null if none. Upon success,
4125     * further element processing is suppressed and the results of
4126     * any other parallel invocations of the search function are
4127     * ignored.
4128     *
4129     * @param searchFunction a function returning a non-null
4130     * result on success, else null
4131     * @return a non-null result from applying the given search
4132     * function on each entry, or null if none
4133     */
4134     public <U> U searchEntries(Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
4135     return ForkJoinTasks.searchEntries
4136     (this, searchFunction).invoke();
4137     }
4138 dl 1.119
4139 dl 1.137 /**
4140     * Returns the result of accumulating all entries using the
4141     * given reducer to combine values, or null if none.
4142     *
4143     * @param reducer a commutative associative combining function
4144     * @return the result of accumulating all entries
4145     */
4146     public Map.Entry<K,V> reduceEntries(BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4147     return ForkJoinTasks.reduceEntries
4148     (this, reducer).invoke();
4149     }
4150 dl 1.119
4151 dl 1.137 /**
4152     * Returns the result of accumulating the given transformation
4153     * of all entries using the given reducer to combine values,
4154     * or null if none.
4155     *
4156     * @param transformer a function returning the transformation
4157     * for an element, or null of there is no transformation (in
4158     * which case it is not combined).
4159     * @param reducer a commutative associative combining function
4160     * @return the result of accumulating the given transformation
4161     * of all entries
4162     */
4163     public <U> U reduceEntries(Fun<Map.Entry<K,V>, ? extends U> transformer,
4164     BiFun<? super U, ? super U, ? extends U> reducer) {
4165     return ForkJoinTasks.reduceEntries
4166     (this, transformer, reducer).invoke();
4167     }
4168 dl 1.119
4169 dl 1.137 /**
4170     * Returns the result of accumulating the given transformation
4171     * of all entries using the given reducer to combine values,
4172     * and the given basis as an identity value.
4173     *
4174     * @param transformer a function returning the transformation
4175     * for an element
4176     * @param basis the identity (initial default value) for the reduction
4177     * @param reducer a commutative associative combining function
4178     * @return the result of accumulating the given transformation
4179     * of all entries
4180     */
4181     public double reduceEntriesToDouble(ObjectToDouble<Map.Entry<K,V>> transformer,
4182     double basis,
4183     DoubleByDoubleToDouble reducer) {
4184     return ForkJoinTasks.reduceEntriesToDouble
4185     (this, transformer, basis, reducer).invoke();
4186     }
4187 dl 1.119
4188 dl 1.137 /**
4189     * Returns the result of accumulating the given transformation
4190     * of all entries using the given reducer to combine values,
4191     * and the given basis as an identity value.
4192     *
4193     * @param transformer a function returning the transformation
4194     * for an element
4195     * @param basis the identity (initial default value) for the reduction
4196     * @param reducer a commutative associative combining function
4197     * @return the result of accumulating the given transformation
4198     * of all entries
4199     */
4200     public long reduceEntriesToLong(ObjectToLong<Map.Entry<K,V>> transformer,
4201     long basis,
4202     LongByLongToLong reducer) {
4203     return ForkJoinTasks.reduceEntriesToLong
4204     (this, transformer, basis, reducer).invoke();
4205     }
4206 dl 1.119
4207 dl 1.137 /**
4208     * Returns the result of accumulating the given transformation
4209     * of all entries using the given reducer to combine values,
4210     * and the given basis as an identity value.
4211     *
4212     * @param transformer a function returning the transformation
4213     * for an element
4214     * @param basis the identity (initial default value) for the reduction
4215     * @param reducer a commutative associative combining function
4216     * @return the result of accumulating the given transformation
4217     * of all entries
4218     */
4219     public int reduceEntriesToInt(ObjectToInt<Map.Entry<K,V>> transformer,
4220     int basis,
4221     IntByIntToInt reducer) {
4222     return ForkJoinTasks.reduceEntriesToInt
4223     (this, transformer, basis, reducer).invoke();
4224 dl 1.119 }
4225    
4226     // ---------------------------------------------------------------------
4227    
4228     /**
4229     * Predefined tasks for performing bulk parallel operations on
4230     * ConcurrentHashMaps. These tasks follow the forms and rules used
4231 dl 1.137 * for bulk operations. Each method has the same name, but returns
4232     * a task rather than invoking it. These methods may be useful in
4233     * custom applications such as submitting a task without waiting
4234     * for completion, using a custom pool, or combining with other
4235     * tasks.
4236 dl 1.119 */
4237     public static class ForkJoinTasks {
4238     private ForkJoinTasks() {}
4239    
4240     /**
4241     * Returns a task that when invoked, performs the given
4242     * action for each (key, value)
4243     *
4244     * @param map the map
4245     * @param action the action
4246     * @return the task
4247     */
4248 jsr166 1.120 public static <K,V> ForkJoinTask<Void> forEach
4249 dl 1.119 (ConcurrentHashMap<K,V> map,
4250     BiAction<K,V> action) {
4251     if (action == null) throw new NullPointerException();
4252 dl 1.137 return new ForEachMappingTask<K,V>(map, null, -1, null, action);
4253 dl 1.119 }
4254    
4255     /**
4256     * Returns a task that when invoked, performs the given
4257     * action for each non-null transformation of each (key, value)
4258     *
4259     * @param map the map
4260     * @param transformer a function returning the transformation
4261 jsr166 1.135 * for an element, or null if there is no transformation (in
4262 jsr166 1.134 * which case the action is not applied)
4263 dl 1.119 * @param action the action
4264     * @return the task
4265     */
4266 jsr166 1.120 public static <K,V,U> ForkJoinTask<Void> forEach
4267 dl 1.119 (ConcurrentHashMap<K,V> map,
4268     BiFun<? super K, ? super V, ? extends U> transformer,
4269     Action<U> action) {
4270     if (transformer == null || action == null)
4271     throw new NullPointerException();
4272     return new ForEachTransformedMappingTask<K,V,U>
4273 dl 1.137 (map, null, -1, null, transformer, action);
4274 dl 1.119 }
4275    
4276     /**
4277 dl 1.126 * Returns a task that when invoked, returns a non-null result
4278     * from applying the given search function on each (key,
4279     * value), or null if none. Upon success, further element
4280     * processing is suppressed and the results of any other
4281     * parallel invocations of the search function are ignored.
4282 dl 1.119 *
4283     * @param map the map
4284     * @param searchFunction a function returning a non-null
4285     * result on success, else null
4286     * @return the task
4287     */
4288     public static <K,V,U> ForkJoinTask<U> search
4289     (ConcurrentHashMap<K,V> map,
4290     BiFun<? super K, ? super V, ? extends U> searchFunction) {
4291     if (searchFunction == null) throw new NullPointerException();
4292     return new SearchMappingsTask<K,V,U>
4293 dl 1.137 (map, null, -1, null, searchFunction,
4294 dl 1.119 new AtomicReference<U>());
4295     }
4296    
4297     /**
4298     * Returns a task that when invoked, returns the result of
4299     * accumulating the given transformation of all (key, value) pairs
4300     * using the given reducer to combine values, or null if none.
4301     *
4302     * @param map the map
4303     * @param transformer a function returning the transformation
4304 jsr166 1.135 * for an element, or null if there is no transformation (in
4305 dl 1.119 * which case it is not combined).
4306     * @param reducer a commutative associative combining function
4307     * @return the task
4308     */
4309     public static <K,V,U> ForkJoinTask<U> reduce
4310     (ConcurrentHashMap<K,V> map,
4311     BiFun<? super K, ? super V, ? extends U> transformer,
4312     BiFun<? super U, ? super U, ? extends U> reducer) {
4313     if (transformer == null || reducer == null)
4314     throw new NullPointerException();
4315     return new MapReduceMappingsTask<K,V,U>
4316 dl 1.130 (map, null, -1, null, transformer, reducer);
4317 dl 1.119 }
4318    
4319     /**
4320     * Returns a task that when invoked, returns the result of
4321     * accumulating the given transformation of all (key, value) pairs
4322     * using the given reducer to combine values, and the given
4323     * basis as an identity value.
4324     *
4325     * @param map the map
4326     * @param transformer a function returning the transformation
4327     * for an element
4328     * @param basis the identity (initial default value) for the reduction
4329     * @param reducer a commutative associative combining function
4330     * @return the task
4331     */
4332     public static <K,V> ForkJoinTask<Double> reduceToDouble
4333     (ConcurrentHashMap<K,V> map,
4334     ObjectByObjectToDouble<? super K, ? super V> transformer,
4335     double basis,
4336     DoubleByDoubleToDouble reducer) {
4337     if (transformer == null || reducer == null)
4338     throw new NullPointerException();
4339     return new MapReduceMappingsToDoubleTask<K,V>
4340 dl 1.130 (map, null, -1, null, transformer, basis, reducer);
4341 dl 1.119 }
4342    
4343     /**
4344     * Returns a task that when invoked, returns the result of
4345     * accumulating the given transformation of all (key, value) pairs
4346     * using the given reducer to combine values, and the given
4347     * basis as an identity value.
4348     *
4349     * @param map the map
4350     * @param transformer a function returning the transformation
4351     * for an element
4352     * @param basis the identity (initial default value) for the reduction
4353     * @param reducer a commutative associative combining function
4354     * @return the task
4355     */
4356     public static <K,V> ForkJoinTask<Long> reduceToLong
4357     (ConcurrentHashMap<K,V> map,
4358     ObjectByObjectToLong<? super K, ? super V> transformer,
4359     long basis,
4360     LongByLongToLong reducer) {
4361     if (transformer == null || reducer == null)
4362     throw new NullPointerException();
4363     return new MapReduceMappingsToLongTask<K,V>
4364 dl 1.130 (map, null, -1, null, transformer, basis, reducer);
4365 dl 1.119 }
4366    
4367     /**
4368     * Returns a task that when invoked, returns the result of
4369     * accumulating the given transformation of all (key, value) pairs
4370     * using the given reducer to combine values, and the given
4371     * basis as an identity value.
4372     *
4373     * @param transformer a function returning the transformation
4374     * for an element
4375     * @param basis the identity (initial default value) for the reduction
4376     * @param reducer a commutative associative combining function
4377     * @return the task
4378     */
4379     public static <K,V> ForkJoinTask<Integer> reduceToInt
4380     (ConcurrentHashMap<K,V> map,
4381     ObjectByObjectToInt<? super K, ? super V> transformer,
4382     int basis,
4383     IntByIntToInt reducer) {
4384     if (transformer == null || reducer == null)
4385     throw new NullPointerException();
4386     return new MapReduceMappingsToIntTask<K,V>
4387 dl 1.130 (map, null, -1, null, transformer, basis, reducer);
4388 dl 1.119 }
4389    
4390     /**
4391     * Returns a task that when invoked, performs the given action
4392 jsr166 1.123 * for each key.
4393 dl 1.119 *
4394     * @param map the map
4395     * @param action the action
4396     * @return the task
4397     */
4398 jsr166 1.120 public static <K,V> ForkJoinTask<Void> forEachKey
4399 dl 1.119 (ConcurrentHashMap<K,V> map,
4400     Action<K> action) {
4401     if (action == null) throw new NullPointerException();
4402 dl 1.137 return new ForEachKeyTask<K,V>(map, null, -1, null, action);
4403 dl 1.119 }
4404    
4405     /**
4406     * Returns a task that when invoked, performs the given action
4407 jsr166 1.123 * for each non-null transformation of each key.
4408 dl 1.119 *
4409     * @param map the map
4410     * @param transformer a function returning the transformation
4411 jsr166 1.135 * for an element, or null if there is no transformation (in
4412 jsr166 1.134 * which case the action is not applied)
4413 dl 1.119 * @param action the action
4414     * @return the task
4415     */
4416 jsr166 1.120 public static <K,V,U> ForkJoinTask<Void> forEachKey
4417 dl 1.119 (ConcurrentHashMap<K,V> map,
4418     Fun<? super K, ? extends U> transformer,
4419     Action<U> action) {
4420     if (transformer == null || action == null)
4421     throw new NullPointerException();
4422     return new ForEachTransformedKeyTask<K,V,U>
4423 dl 1.137 (map, null, -1, null, transformer, action);
4424 dl 1.119 }
4425    
4426     /**
4427     * Returns a task that when invoked, returns a non-null result
4428     * from applying the given search function on each key, or
4429 dl 1.126 * null if none. Upon success, further element processing is
4430     * suppressed and the results of any other parallel
4431     * invocations of the search function are ignored.
4432 dl 1.119 *
4433     * @param map the map
4434     * @param searchFunction a function returning a non-null
4435     * result on success, else null
4436     * @return the task
4437     */
4438     public static <K,V,U> ForkJoinTask<U> searchKeys
4439     (ConcurrentHashMap<K,V> map,
4440     Fun<? super K, ? extends U> searchFunction) {
4441     if (searchFunction == null) throw new NullPointerException();
4442     return new SearchKeysTask<K,V,U>
4443 dl 1.137 (map, null, -1, null, searchFunction,
4444 dl 1.119 new AtomicReference<U>());
4445     }
4446    
4447     /**
4448     * Returns a task that when invoked, returns the result of
4449     * accumulating all keys using the given reducer to combine
4450     * values, or null if none.
4451     *
4452     * @param map the map
4453     * @param reducer a commutative associative combining function
4454     * @return the task
4455     */
4456     public static <K,V> ForkJoinTask<K> reduceKeys
4457     (ConcurrentHashMap<K,V> map,
4458     BiFun<? super K, ? super K, ? extends K> reducer) {
4459     if (reducer == null) throw new NullPointerException();
4460     return new ReduceKeysTask<K,V>
4461 dl 1.130 (map, null, -1, null, reducer);
4462 dl 1.119 }
4463 jsr166 1.125
4464 dl 1.119 /**
4465     * Returns a task that when invoked, returns the result of
4466     * accumulating the given transformation of all keys using the given
4467     * reducer to combine values, or null if none.
4468     *
4469     * @param map the map
4470     * @param transformer a function returning the transformation
4471 jsr166 1.135 * for an element, or null if there is no transformation (in
4472 dl 1.119 * which case it is not combined).
4473     * @param reducer a commutative associative combining function
4474     * @return the task
4475     */
4476     public static <K,V,U> ForkJoinTask<U> reduceKeys
4477     (ConcurrentHashMap<K,V> map,
4478     Fun<? super K, ? extends U> transformer,
4479     BiFun<? super U, ? super U, ? extends U> reducer) {
4480     if (transformer == null || reducer == null)
4481     throw new NullPointerException();
4482     return new MapReduceKeysTask<K,V,U>
4483 dl 1.130 (map, null, -1, null, transformer, reducer);
4484 dl 1.119 }
4485    
4486     /**
4487     * Returns a task that when invoked, returns the result of
4488     * accumulating the given transformation of all keys using the given
4489     * reducer to combine values, and the given basis as an
4490     * identity value.
4491     *
4492     * @param map the map
4493     * @param transformer a function returning the transformation
4494     * for an element
4495     * @param basis the identity (initial default value) for the reduction
4496     * @param reducer a commutative associative combining function
4497     * @return the task
4498     */
4499     public static <K,V> ForkJoinTask<Double> reduceKeysToDouble
4500     (ConcurrentHashMap<K,V> map,
4501     ObjectToDouble<? super K> transformer,
4502     double basis,
4503     DoubleByDoubleToDouble reducer) {
4504     if (transformer == null || reducer == null)
4505     throw new NullPointerException();
4506     return new MapReduceKeysToDoubleTask<K,V>
4507 dl 1.130 (map, null, -1, null, transformer, basis, reducer);
4508 dl 1.119 }
4509    
4510     /**
4511     * Returns a task that when invoked, returns the result of
4512     * accumulating the given transformation of all keys using the given
4513     * reducer to combine values, and the given basis as an
4514     * identity value.
4515     *
4516     * @param map the map
4517     * @param transformer a function returning the transformation
4518     * for an element
4519     * @param basis the identity (initial default value) for the reduction
4520     * @param reducer a commutative associative combining function
4521     * @return the task
4522     */
4523     public static <K,V> ForkJoinTask<Long> reduceKeysToLong
4524     (ConcurrentHashMap<K,V> map,
4525     ObjectToLong<? super K> transformer,
4526     long basis,
4527     LongByLongToLong reducer) {
4528     if (transformer == null || reducer == null)
4529     throw new NullPointerException();
4530     return new MapReduceKeysToLongTask<K,V>
4531 dl 1.130 (map, null, -1, null, transformer, basis, reducer);
4532 dl 1.119 }
4533    
4534     /**
4535     * Returns a task that when invoked, returns the result of
4536     * accumulating the given transformation of all keys using the given
4537     * reducer to combine values, and the given basis as an
4538     * identity value.
4539     *
4540     * @param map the map
4541     * @param transformer a function returning the transformation
4542     * for an element
4543     * @param basis the identity (initial default value) for the reduction
4544     * @param reducer a commutative associative combining function
4545     * @return the task
4546     */
4547     public static <K,V> ForkJoinTask<Integer> reduceKeysToInt
4548     (ConcurrentHashMap<K,V> map,
4549     ObjectToInt<? super K> transformer,
4550     int basis,
4551     IntByIntToInt reducer) {
4552     if (transformer == null || reducer == null)
4553     throw new NullPointerException();
4554     return new MapReduceKeysToIntTask<K,V>
4555 dl 1.130 (map, null, -1, null, transformer, basis, reducer);
4556 dl 1.119 }
4557    
4558     /**
4559     * Returns a task that when invoked, performs the given action
4560 jsr166 1.123 * for each value.
4561 dl 1.119 *
4562     * @param map the map
4563     * @param action the action
4564     */
4565 jsr166 1.120 public static <K,V> ForkJoinTask<Void> forEachValue
4566 dl 1.119 (ConcurrentHashMap<K,V> map,
4567     Action<V> action) {
4568     if (action == null) throw new NullPointerException();
4569 dl 1.137 return new ForEachValueTask<K,V>(map, null, -1, null, action);
4570 dl 1.119 }
4571    
4572     /**
4573     * Returns a task that when invoked, performs the given action
4574 jsr166 1.123 * for each non-null transformation of each value.
4575 dl 1.119 *
4576     * @param map the map
4577     * @param transformer a function returning the transformation
4578 jsr166 1.135 * for an element, or null if there is no transformation (in
4579 jsr166 1.134 * which case the action is not applied)
4580 dl 1.119 * @param action the action
4581     */
4582 jsr166 1.120 public static <K,V,U> ForkJoinTask<Void> forEachValue
4583 dl 1.119 (ConcurrentHashMap<K,V> map,
4584     Fun<? super V, ? extends U> transformer,
4585     Action<U> action) {
4586     if (transformer == null || action == null)
4587     throw new NullPointerException();
4588     return new ForEachTransformedValueTask<K,V,U>
4589 dl 1.137 (map, null, -1, null, transformer, action);
4590 dl 1.119 }
4591    
4592     /**
4593     * Returns a task that when invoked, returns a non-null result
4594     * from applying the given search function on each value, or
4595 dl 1.126 * null if none. Upon success, further element processing is
4596     * suppressed and the results of any other parallel
4597     * invocations of the search function are ignored.
4598 dl 1.119 *
4599     * @param map the map
4600     * @param searchFunction a function returning a non-null
4601     * result on success, else null
4602     * @return the task
4603     */
4604     public static <K,V,U> ForkJoinTask<U> searchValues
4605     (ConcurrentHashMap<K,V> map,
4606     Fun<? super V, ? extends U> searchFunction) {
4607     if (searchFunction == null) throw new NullPointerException();
4608     return new SearchValuesTask<K,V,U>
4609 dl 1.137 (map, null, -1, null, searchFunction,
4610 dl 1.119 new AtomicReference<U>());
4611     }
4612    
4613     /**
4614     * Returns a task that when invoked, returns the result of
4615     * accumulating all values using the given reducer to combine
4616     * values, or null if none.
4617     *
4618     * @param map the map
4619     * @param reducer a commutative associative combining function
4620     * @return the task
4621     */
4622     public static <K,V> ForkJoinTask<V> reduceValues
4623     (ConcurrentHashMap<K,V> map,
4624     BiFun<? super V, ? super V, ? extends V> reducer) {
4625     if (reducer == null) throw new NullPointerException();
4626     return new ReduceValuesTask<K,V>
4627 dl 1.130 (map, null, -1, null, reducer);
4628 dl 1.119 }
4629    
4630     /**
4631     * Returns a task that when invoked, returns the result of
4632     * accumulating the given transformation of all values using the
4633     * given reducer to combine values, or null if none.
4634     *
4635     * @param map the map
4636     * @param transformer a function returning the transformation
4637 jsr166 1.135 * for an element, or null if there is no transformation (in
4638 dl 1.119 * which case it is not combined).
4639     * @param reducer a commutative associative combining function
4640     * @return the task
4641     */
4642     public static <K,V,U> ForkJoinTask<U> reduceValues
4643     (ConcurrentHashMap<K,V> map,
4644     Fun<? super V, ? extends U> transformer,
4645     BiFun<? super U, ? super U, ? extends U> reducer) {
4646     if (transformer == null || reducer == null)
4647     throw new NullPointerException();
4648     return new MapReduceValuesTask<K,V,U>
4649 dl 1.130 (map, null, -1, null, transformer, reducer);
4650 dl 1.119 }
4651    
4652     /**
4653     * Returns a task that when invoked, returns the result of
4654     * accumulating the given transformation of all values using the
4655     * given reducer to combine values, and the given basis as an
4656     * identity value.
4657     *
4658     * @param map the map
4659     * @param transformer a function returning the transformation
4660     * for an element
4661     * @param basis the identity (initial default value) for the reduction
4662     * @param reducer a commutative associative combining function
4663     * @return the task
4664     */
4665     public static <K,V> ForkJoinTask<Double> reduceValuesToDouble
4666     (ConcurrentHashMap<K,V> map,
4667     ObjectToDouble<? super V> transformer,
4668     double basis,
4669     DoubleByDoubleToDouble reducer) {
4670     if (transformer == null || reducer == null)
4671     throw new NullPointerException();
4672     return new MapReduceValuesToDoubleTask<K,V>
4673 dl 1.130 (map, null, -1, null, transformer, basis, reducer);
4674 dl 1.119 }
4675    
4676     /**
4677     * Returns a task that when invoked, returns the result of
4678     * accumulating the given transformation of all values using the
4679     * given reducer to combine values, and the given basis as an
4680     * identity value.
4681     *
4682     * @param map the map
4683     * @param transformer a function returning the transformation
4684     * for an element
4685     * @param basis the identity (initial default value) for the reduction
4686     * @param reducer a commutative associative combining function
4687     * @return the task
4688     */
4689     public static <K,V> ForkJoinTask<Long> reduceValuesToLong
4690     (ConcurrentHashMap<K,V> map,
4691     ObjectToLong<? super V> transformer,
4692     long basis,
4693     LongByLongToLong reducer) {
4694     if (transformer == null || reducer == null)
4695     throw new NullPointerException();
4696     return new MapReduceValuesToLongTask<K,V>
4697 dl 1.130 (map, null, -1, null, transformer, basis, reducer);
4698 dl 1.119 }
4699    
4700     /**
4701     * Returns a task that when invoked, returns the result of
4702     * accumulating the given transformation of all values using the
4703     * given reducer to combine values, and the given basis as an
4704     * identity value.
4705     *
4706     * @param map the map
4707     * @param transformer a function returning the transformation
4708     * for an element
4709     * @param basis the identity (initial default value) for the reduction
4710     * @param reducer a commutative associative combining function
4711     * @return the task
4712     */
4713     public static <K,V> ForkJoinTask<Integer> reduceValuesToInt
4714     (ConcurrentHashMap<K,V> map,
4715     ObjectToInt<? super V> transformer,
4716     int basis,
4717     IntByIntToInt reducer) {
4718     if (transformer == null || reducer == null)
4719     throw new NullPointerException();
4720     return new MapReduceValuesToIntTask<K,V>
4721 dl 1.130 (map, null, -1, null, transformer, basis, reducer);
4722 dl 1.119 }
4723    
4724     /**
4725     * Returns a task that when invoked, perform the given action
4726 jsr166 1.123 * for each entry.
4727 dl 1.119 *
4728     * @param map the map
4729     * @param action the action
4730     */
4731 jsr166 1.120 public static <K,V> ForkJoinTask<Void> forEachEntry
4732 dl 1.119 (ConcurrentHashMap<K,V> map,
4733     Action<Map.Entry<K,V>> action) {
4734     if (action == null) throw new NullPointerException();
4735 dl 1.137 return new ForEachEntryTask<K,V>(map, null, -1, null, action);
4736 dl 1.119 }
4737    
4738     /**
4739     * Returns a task that when invoked, perform the given action
4740 jsr166 1.123 * for each non-null transformation of each entry.
4741 dl 1.119 *
4742     * @param map the map
4743     * @param transformer a function returning the transformation
4744 jsr166 1.135 * for an element, or null if there is no transformation (in
4745 jsr166 1.134 * which case the action is not applied)
4746 dl 1.119 * @param action the action
4747     */
4748 jsr166 1.120 public static <K,V,U> ForkJoinTask<Void> forEachEntry
4749 dl 1.119 (ConcurrentHashMap<K,V> map,
4750     Fun<Map.Entry<K,V>, ? extends U> transformer,
4751     Action<U> action) {
4752     if (transformer == null || action == null)
4753     throw new NullPointerException();
4754     return new ForEachTransformedEntryTask<K,V,U>
4755 dl 1.137 (map, null, -1, null, transformer, action);
4756 dl 1.119 }
4757    
4758     /**
4759     * Returns a task that when invoked, returns a non-null result
4760     * from applying the given search function on each entry, or
4761 dl 1.126 * null if none. Upon success, further element processing is
4762     * suppressed and the results of any other parallel
4763     * invocations of the search function are ignored.
4764 dl 1.119 *
4765     * @param map the map
4766     * @param searchFunction a function returning a non-null
4767     * result on success, else null
4768     * @return the task
4769     */
4770     public static <K,V,U> ForkJoinTask<U> searchEntries
4771     (ConcurrentHashMap<K,V> map,
4772     Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
4773     if (searchFunction == null) throw new NullPointerException();
4774     return new SearchEntriesTask<K,V,U>
4775 dl 1.137 (map, null, -1, null, searchFunction,
4776 dl 1.119 new AtomicReference<U>());
4777     }
4778    
4779     /**
4780     * Returns a task that when invoked, returns the result of
4781     * accumulating all entries using the given reducer to combine
4782     * values, or null if none.
4783     *
4784     * @param map the map
4785     * @param reducer a commutative associative combining function
4786     * @return the task
4787     */
4788     public static <K,V> ForkJoinTask<Map.Entry<K,V>> reduceEntries
4789     (ConcurrentHashMap<K,V> map,
4790     BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4791     if (reducer == null) throw new NullPointerException();
4792     return new ReduceEntriesTask<K,V>
4793 dl 1.130 (map, null, -1, null, reducer);
4794 dl 1.119 }
4795    
4796     /**
4797     * Returns a task that when invoked, returns the result of
4798     * accumulating the given transformation of all entries using the
4799     * given reducer to combine values, or null if none.
4800     *
4801     * @param map the map
4802     * @param transformer a function returning the transformation
4803 jsr166 1.135 * for an element, or null if there is no transformation (in
4804 dl 1.119 * which case it is not combined).
4805     * @param reducer a commutative associative combining function
4806     * @return the task
4807     */
4808     public static <K,V,U> ForkJoinTask<U> reduceEntries
4809     (ConcurrentHashMap<K,V> map,
4810     Fun<Map.Entry<K,V>, ? extends U> transformer,
4811     BiFun<? super U, ? super U, ? extends U> reducer) {
4812     if (transformer == null || reducer == null)
4813     throw new NullPointerException();
4814     return new MapReduceEntriesTask<K,V,U>
4815 dl 1.130 (map, null, -1, null, transformer, reducer);
4816 dl 1.119 }
4817    
4818     /**
4819     * Returns a task that when invoked, returns the result of
4820     * accumulating the given transformation of all entries using the
4821     * given reducer to combine values, and the given basis as an
4822     * identity value.
4823     *
4824     * @param map the map
4825     * @param transformer a function returning the transformation
4826     * for an element
4827     * @param basis the identity (initial default value) for the reduction
4828     * @param reducer a commutative associative combining function
4829     * @return the task
4830     */
4831     public static <K,V> ForkJoinTask<Double> reduceEntriesToDouble
4832     (ConcurrentHashMap<K,V> map,
4833     ObjectToDouble<Map.Entry<K,V>> transformer,
4834     double basis,
4835     DoubleByDoubleToDouble reducer) {
4836     if (transformer == null || reducer == null)
4837     throw new NullPointerException();
4838     return new MapReduceEntriesToDoubleTask<K,V>
4839 dl 1.130 (map, null, -1, null, transformer, basis, reducer);
4840 dl 1.119 }
4841    
4842     /**
4843     * Returns a task that when invoked, returns the result of
4844     * accumulating the given transformation of all entries using the
4845     * given reducer to combine values, and the given basis as an
4846     * identity value.
4847     *
4848     * @param map the map
4849     * @param transformer a function returning the transformation
4850     * for an element
4851     * @param basis the identity (initial default value) for the reduction
4852     * @param reducer a commutative associative combining function
4853     * @return the task
4854     */
4855     public static <K,V> ForkJoinTask<Long> reduceEntriesToLong
4856     (ConcurrentHashMap<K,V> map,
4857     ObjectToLong<Map.Entry<K,V>> transformer,
4858     long basis,
4859     LongByLongToLong reducer) {
4860     if (transformer == null || reducer == null)
4861     throw new NullPointerException();
4862     return new MapReduceEntriesToLongTask<K,V>
4863 dl 1.130 (map, null, -1, null, transformer, basis, reducer);
4864 dl 1.119 }
4865    
4866     /**
4867     * Returns a task that when invoked, returns the result of
4868     * accumulating the given transformation of all entries using the
4869     * given reducer to combine values, and the given basis as an
4870     * identity value.
4871     *
4872     * @param map the map
4873     * @param transformer a function returning the transformation
4874     * for an element
4875     * @param basis the identity (initial default value) for the reduction
4876     * @param reducer a commutative associative combining function
4877     * @return the task
4878     */
4879     public static <K,V> ForkJoinTask<Integer> reduceEntriesToInt
4880     (ConcurrentHashMap<K,V> map,
4881     ObjectToInt<Map.Entry<K,V>> transformer,
4882     int basis,
4883     IntByIntToInt reducer) {
4884     if (transformer == null || reducer == null)
4885     throw new NullPointerException();
4886     return new MapReduceEntriesToIntTask<K,V>
4887 dl 1.130 (map, null, -1, null, transformer, basis, reducer);
4888 dl 1.119 }
4889     }
4890    
4891     // -------------------------------------------------------
4892    
4893     /**
4894     * Base for FJ tasks for bulk operations. This adds a variant of
4895 jsr166 1.122 * CountedCompleters and some split and merge bookkeeping to
4896 dl 1.119 * iterator functionality. The forEach and reduce methods are
4897     * similar to those illustrated in CountedCompleter documentation,
4898     * except that bottom-up reduction completions perform them within
4899     * their compute methods. The search methods are like forEach
4900     * except they continually poll for success and exit early. Also,
4901     * exceptions are handled in a simpler manner, by just trying to
4902     * complete root task exceptionally.
4903     */
4904 dl 1.128 @SuppressWarnings("serial") static abstract class BulkTask<K,V,R> extends Traverser<K,V,R> {
4905 dl 1.119 final BulkTask<K,V,?> parent; // completion target
4906 dl 1.130 int batch; // split control; -1 for unknown
4907 dl 1.119 int pending; // completion control
4908    
4909 jsr166 1.131 BulkTask(ConcurrentHashMap<K,V> map, BulkTask<K,V,?> parent,
4910 dl 1.130 int batch) {
4911 dl 1.119 super(map);
4912     this.parent = parent;
4913     this.batch = batch;
4914 dl 1.130 if (parent != null && map != null) { // split parent
4915     Node[] t;
4916     if ((t = parent.tab) == null &&
4917     (t = parent.tab = map.table) != null)
4918     parent.baseLimit = parent.baseSize = t.length;
4919     this.tab = t;
4920     this.baseSize = parent.baseSize;
4921     int hi = this.baseLimit = parent.baseLimit;
4922     parent.baseLimit = this.index = this.baseIndex =
4923     (hi + parent.baseIndex + 1) >>> 1;
4924     }
4925 dl 1.119 }
4926    
4927     /**
4928 dl 1.128 * Forces root task to complete.
4929     * @param ex if null, complete normally, else exceptionally
4930     * @return false to simplify use
4931 dl 1.119 */
4932 dl 1.128 final boolean tryCompleteComputation(Throwable ex) {
4933 dl 1.119 for (BulkTask<K,V,?> a = this;;) {
4934     BulkTask<K,V,?> p = a.parent;
4935     if (p == null) {
4936 dl 1.128 if (ex != null)
4937     a.completeExceptionally(ex);
4938     else
4939     a.quietlyComplete();
4940     return false;
4941 dl 1.119 }
4942     a = p;
4943     }
4944     }
4945    
4946 dl 1.128 /**
4947     * Version of tryCompleteComputation for function screening checks
4948     */
4949     final boolean abortOnNullFunction() {
4950     return tryCompleteComputation(new Error("Unexpected null function"));
4951 dl 1.119 }
4952    
4953     // utilities
4954    
4955     /** CompareAndSet pending count */
4956     final boolean casPending(int cmp, int val) {
4957     return U.compareAndSwapInt(this, PENDING, cmp, val);
4958     }
4959    
4960     /**
4961 jsr166 1.123 * Returns approx exp2 of the number of times (minus one) to
4962 dl 1.119 * split task by two before executing leaf action. This value
4963     * is faster to compute and more convenient to use as a guide
4964     * to splitting than is the depth, since it is used while
4965     * dividing by two anyway.
4966     */
4967     final int batch() {
4968 dl 1.137 ConcurrentHashMap<K, V> m; int b; Node[] t; ForkJoinPool pool;
4969 dl 1.130 if ((b = batch) < 0 && (m = map) != null) { // force initialization
4970     if ((t = tab) == null && (t = tab = m.table) != null)
4971     baseLimit = baseSize = t.length;
4972     if (t != null) {
4973     long n = m.counter.sum();
4974 jsr166 1.139 int par = ((pool = getPool()) == null) ?
4975 dl 1.137 ForkJoinPool.getCommonPoolParallelism() :
4976     pool.getParallelism();
4977     int sp = par << 3; // slack of 8
4978 dl 1.130 b = batch = (n <= 0L) ? 0 : (n < (long)sp) ? (int)n : sp;
4979     }
4980 dl 1.119 }
4981     return b;
4982     }
4983    
4984     /**
4985 jsr166 1.123 * Returns exportable snapshot entry.
4986 dl 1.119 */
4987     static <K,V> AbstractMap.SimpleEntry<K,V> entryFor(K k, V v) {
4988 dl 1.126 return new AbstractMap.SimpleEntry<K,V>(k, v);
4989 dl 1.119 }
4990    
4991     // Unsafe mechanics
4992     private static final sun.misc.Unsafe U;
4993     private static final long PENDING;
4994     static {
4995     try {
4996     U = sun.misc.Unsafe.getUnsafe();
4997     PENDING = U.objectFieldOffset
4998     (BulkTask.class.getDeclaredField("pending"));
4999     } catch (Exception e) {
5000     throw new Error(e);
5001     }
5002     }
5003     }
5004    
5005 dl 1.138 /**
5006     * Base class for non-reductive actions
5007     */
5008     @SuppressWarnings("serial") static abstract class BulkAction<K,V,R> extends BulkTask<K,V,R> {
5009     BulkAction<K,V,?> nextTask;
5010     BulkAction(ConcurrentHashMap<K,V> map, BulkTask<K,V,?> parent,
5011     int batch, BulkAction<K,V,?> nextTask) {
5012     super(map, parent, batch);
5013     this.nextTask = nextTask;
5014     }
5015    
5016     /**
5017     * Try to complete task and upward parents. Upon hitting
5018     * non-completed parent, if a non-FJ task, try to help out the
5019     * computation.
5020     */
5021     final void tryComplete(BulkAction<K,V,?> subtasks) {
5022     BulkTask<K,V,?> a = this, s = a;
5023     for (int c;;) {
5024     if ((c = a.pending) == 0) {
5025     if ((a = (s = a).parent) == null) {
5026     s.quietlyComplete();
5027     break;
5028     }
5029     }
5030     else if (a.casPending(c, c - 1)) {
5031     if (subtasks != null && !inForkJoinPool()) {
5032     while ((s = a.parent) != null)
5033     a = s;
5034     while (!a.isDone()) {
5035     BulkAction<K,V,?> next = subtasks.nextTask;
5036     if (subtasks.tryUnfork())
5037     subtasks.exec();
5038     if ((subtasks = next) == null)
5039     break;
5040     }
5041     }
5042     break;
5043     }
5044     }
5045     }
5046    
5047     }
5048    
5049 dl 1.119 /*
5050     * Task classes. Coded in a regular but ugly format/style to
5051     * simplify checks that each variant differs in the right way from
5052     * others.
5053     */
5054    
5055 dl 1.128 @SuppressWarnings("serial") static final class ForEachKeyTask<K,V>
5056 dl 1.138 extends BulkAction<K,V,Void> {
5057 dl 1.119 final Action<K> action;
5058     ForEachKeyTask
5059 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5060 dl 1.138 ForEachKeyTask<K,V> nextTask,
5061 dl 1.119 Action<K> action) {
5062 dl 1.138 super(m, p, b, nextTask);
5063 dl 1.119 this.action = action;
5064     }
5065 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5066 dl 1.119 final Action<K> action = this.action;
5067     if (action == null)
5068 dl 1.128 return abortOnNullFunction();
5069 dl 1.138 ForEachKeyTask<K,V> subtasks = null;
5070 dl 1.128 try {
5071     int b = batch(), c;
5072     while (b > 1 && baseIndex != baseLimit) {
5073     do {} while (!casPending(c = pending, c+1));
5074 dl 1.138 (subtasks = new ForEachKeyTask<K,V>
5075     (map, this, b >>>= 1, subtasks, action)).fork();
5076 dl 1.128 }
5077     while (advance() != null)
5078     action.apply((K)nextKey);
5079     } catch (Throwable ex) {
5080     return tryCompleteComputation(ex);
5081 dl 1.119 }
5082 dl 1.138 tryComplete(subtasks);
5083 dl 1.128 return false;
5084 dl 1.119 }
5085     }
5086    
5087 dl 1.128 @SuppressWarnings("serial") static final class ForEachValueTask<K,V>
5088 dl 1.138 extends BulkAction<K,V,Void> {
5089 dl 1.119 final Action<V> action;
5090     ForEachValueTask
5091 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5092 dl 1.138 ForEachValueTask<K,V> nextTask,
5093 dl 1.119 Action<V> action) {
5094 dl 1.138 super(m, p, b, nextTask);
5095 dl 1.119 this.action = action;
5096     }
5097 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5098 dl 1.119 final Action<V> action = this.action;
5099     if (action == null)
5100 dl 1.128 return abortOnNullFunction();
5101 dl 1.138 ForEachValueTask<K,V> subtasks = null;
5102 dl 1.128 try {
5103     int b = batch(), c;
5104     while (b > 1 && baseIndex != baseLimit) {
5105     do {} while (!casPending(c = pending, c+1));
5106 dl 1.138 (subtasks = new ForEachValueTask<K,V>
5107     (map, this, b >>>= 1, subtasks, action)).fork();
5108 dl 1.128 }
5109     Object v;
5110     while ((v = advance()) != null)
5111     action.apply((V)v);
5112     } catch (Throwable ex) {
5113     return tryCompleteComputation(ex);
5114 dl 1.119 }
5115 dl 1.138 tryComplete(subtasks);
5116 dl 1.128 return false;
5117 dl 1.119 }
5118     }
5119    
5120 dl 1.128 @SuppressWarnings("serial") static final class ForEachEntryTask<K,V>
5121 dl 1.138 extends BulkAction<K,V,Void> {
5122 dl 1.119 final Action<Entry<K,V>> action;
5123     ForEachEntryTask
5124 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5125 dl 1.138 ForEachEntryTask<K,V> nextTask,
5126 dl 1.119 Action<Entry<K,V>> action) {
5127 dl 1.138 super(m, p, b, nextTask);
5128 dl 1.119 this.action = action;
5129     }
5130 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5131 dl 1.119 final Action<Entry<K,V>> action = this.action;
5132     if (action == null)
5133 dl 1.128 return abortOnNullFunction();
5134 dl 1.138 ForEachEntryTask<K,V> subtasks = null;
5135 dl 1.128 try {
5136     int b = batch(), c;
5137     while (b > 1 && baseIndex != baseLimit) {
5138     do {} while (!casPending(c = pending, c+1));
5139 dl 1.138 (subtasks = new ForEachEntryTask<K,V>
5140     (map, this, b >>>= 1, subtasks, action)).fork();
5141 dl 1.128 }
5142     Object v;
5143     while ((v = advance()) != null)
5144     action.apply(entryFor((K)nextKey, (V)v));
5145     } catch (Throwable ex) {
5146     return tryCompleteComputation(ex);
5147 dl 1.119 }
5148 dl 1.138 tryComplete(subtasks);
5149 dl 1.128 return false;
5150 dl 1.119 }
5151     }
5152    
5153 dl 1.128 @SuppressWarnings("serial") static final class ForEachMappingTask<K,V>
5154 dl 1.138 extends BulkAction<K,V,Void> {
5155 dl 1.119 final BiAction<K,V> action;
5156     ForEachMappingTask
5157 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5158 dl 1.138 ForEachMappingTask<K,V> nextTask,
5159 dl 1.119 BiAction<K,V> action) {
5160 dl 1.138 super(m, p, b, nextTask);
5161 dl 1.119 this.action = action;
5162     }
5163 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5164 dl 1.119 final BiAction<K,V> action = this.action;
5165     if (action == null)
5166 dl 1.128 return abortOnNullFunction();
5167 dl 1.138 ForEachMappingTask<K,V> subtasks = null;
5168 dl 1.128 try {
5169     int b = batch(), c;
5170     while (b > 1 && baseIndex != baseLimit) {
5171     do {} while (!casPending(c = pending, c+1));
5172 dl 1.138 (subtasks = new ForEachMappingTask<K,V>
5173     (map, this, b >>>= 1, subtasks, action)).fork();
5174 dl 1.128 }
5175     Object v;
5176     while ((v = advance()) != null)
5177     action.apply((K)nextKey, (V)v);
5178     } catch (Throwable ex) {
5179     return tryCompleteComputation(ex);
5180 dl 1.119 }
5181 dl 1.138 tryComplete(subtasks);
5182 dl 1.128 return false;
5183 dl 1.119 }
5184     }
5185    
5186 dl 1.128 @SuppressWarnings("serial") static final class ForEachTransformedKeyTask<K,V,U>
5187 dl 1.138 extends BulkAction<K,V,Void> {
5188 dl 1.119 final Fun<? super K, ? extends U> transformer;
5189     final Action<U> action;
5190     ForEachTransformedKeyTask
5191 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5192 dl 1.138 ForEachTransformedKeyTask<K,V,U> nextTask,
5193 dl 1.119 Fun<? super K, ? extends U> transformer,
5194     Action<U> action) {
5195 dl 1.138 super(m, p, b, nextTask);
5196 dl 1.119 this.transformer = transformer;
5197     this.action = action;
5198    
5199     }
5200 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5201 dl 1.119 final Fun<? super K, ? extends U> transformer =
5202     this.transformer;
5203     final Action<U> action = this.action;
5204     if (transformer == null || action == null)
5205 dl 1.128 return abortOnNullFunction();
5206 dl 1.138 ForEachTransformedKeyTask<K,V,U> subtasks = null;
5207 dl 1.128 try {
5208     int b = batch(), c;
5209     while (b > 1 && baseIndex != baseLimit) {
5210     do {} while (!casPending(c = pending, c+1));
5211 dl 1.138 (subtasks = new ForEachTransformedKeyTask<K,V,U>
5212     (map, this, b >>>= 1, subtasks, transformer, action)).fork();
5213 dl 1.128 }
5214     U u;
5215     while (advance() != null) {
5216     if ((u = transformer.apply((K)nextKey)) != null)
5217     action.apply(u);
5218     }
5219     } catch (Throwable ex) {
5220     return tryCompleteComputation(ex);
5221 dl 1.119 }
5222 dl 1.138 tryComplete(subtasks);
5223 dl 1.128 return false;
5224 dl 1.119 }
5225     }
5226    
5227 dl 1.128 @SuppressWarnings("serial") static final class ForEachTransformedValueTask<K,V,U>
5228 dl 1.138 extends BulkAction<K,V,Void> {
5229 dl 1.119 final Fun<? super V, ? extends U> transformer;
5230     final Action<U> action;
5231     ForEachTransformedValueTask
5232 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5233 dl 1.138 ForEachTransformedValueTask<K,V,U> nextTask,
5234 dl 1.119 Fun<? super V, ? extends U> transformer,
5235     Action<U> action) {
5236 dl 1.138 super(m, p, b, nextTask);
5237 dl 1.119 this.transformer = transformer;
5238     this.action = action;
5239    
5240     }
5241 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5242 dl 1.119 final Fun<? super V, ? extends U> transformer =
5243     this.transformer;
5244     final Action<U> action = this.action;
5245     if (transformer == null || action == null)
5246 dl 1.128 return abortOnNullFunction();
5247 dl 1.138 ForEachTransformedValueTask<K,V,U> subtasks = null;
5248 dl 1.128 try {
5249     int b = batch(), c;
5250     while (b > 1 && baseIndex != baseLimit) {
5251     do {} while (!casPending(c = pending, c+1));
5252 dl 1.138 (subtasks = new ForEachTransformedValueTask<K,V,U>
5253     (map, this, b >>>= 1, subtasks, transformer, action)).fork();
5254 dl 1.128 }
5255     Object v; U u;
5256     while ((v = advance()) != null) {
5257     if ((u = transformer.apply((V)v)) != null)
5258     action.apply(u);
5259     }
5260     } catch (Throwable ex) {
5261     return tryCompleteComputation(ex);
5262 dl 1.119 }
5263 dl 1.138 tryComplete(subtasks);
5264 dl 1.128 return false;
5265 dl 1.119 }
5266 tim 1.1 }
5267    
5268 dl 1.128 @SuppressWarnings("serial") static final class ForEachTransformedEntryTask<K,V,U>
5269 dl 1.138 extends BulkAction<K,V,Void> {
5270 dl 1.119 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5271     final Action<U> action;
5272     ForEachTransformedEntryTask
5273 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5274 dl 1.138 ForEachTransformedEntryTask<K,V,U> nextTask,
5275 dl 1.119 Fun<Map.Entry<K,V>, ? extends U> transformer,
5276     Action<U> action) {
5277 dl 1.138 super(m, p, b, nextTask);
5278 dl 1.119 this.transformer = transformer;
5279     this.action = action;
5280    
5281     }
5282 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5283 dl 1.119 final Fun<Map.Entry<K,V>, ? extends U> transformer =
5284     this.transformer;
5285     final Action<U> action = this.action;
5286     if (transformer == null || action == null)
5287 dl 1.128 return abortOnNullFunction();
5288 dl 1.138 ForEachTransformedEntryTask<K,V,U> subtasks = null;
5289 dl 1.128 try {
5290     int b = batch(), c;
5291     while (b > 1 && baseIndex != baseLimit) {
5292     do {} while (!casPending(c = pending, c+1));
5293 dl 1.138 (subtasks = new ForEachTransformedEntryTask<K,V,U>
5294     (map, this, b >>>= 1, subtasks, transformer, action)).fork();
5295 dl 1.128 }
5296     Object v; U u;
5297     while ((v = advance()) != null) {
5298     if ((u = transformer.apply(entryFor((K)nextKey, (V)v))) != null)
5299     action.apply(u);
5300     }
5301     } catch (Throwable ex) {
5302     return tryCompleteComputation(ex);
5303 dl 1.119 }
5304 dl 1.138 tryComplete(subtasks);
5305 dl 1.128 return false;
5306 dl 1.119 }
5307 tim 1.1 }
5308    
5309 dl 1.128 @SuppressWarnings("serial") static final class ForEachTransformedMappingTask<K,V,U>
5310 dl 1.138 extends BulkAction<K,V,Void> {
5311 dl 1.119 final BiFun<? super K, ? super V, ? extends U> transformer;
5312     final Action<U> action;
5313     ForEachTransformedMappingTask
5314 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5315 dl 1.138 ForEachTransformedMappingTask<K,V,U> nextTask,
5316 dl 1.119 BiFun<? super K, ? super V, ? extends U> transformer,
5317     Action<U> action) {
5318 dl 1.138 super(m, p, b, nextTask);
5319 dl 1.119 this.transformer = transformer;
5320     this.action = action;
5321    
5322     }
5323 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5324 dl 1.119 final BiFun<? super K, ? super V, ? extends U> transformer =
5325     this.transformer;
5326     final Action<U> action = this.action;
5327     if (transformer == null || action == null)
5328 dl 1.128 return abortOnNullFunction();
5329 dl 1.138 ForEachTransformedMappingTask<K,V,U> subtasks = null;
5330 dl 1.128 try {
5331     int b = batch(), c;
5332     while (b > 1 && baseIndex != baseLimit) {
5333     do {} while (!casPending(c = pending, c+1));
5334 dl 1.138 (subtasks = new ForEachTransformedMappingTask<K,V,U>
5335     (map, this, b >>>= 1, subtasks, transformer, action)).fork();
5336 dl 1.128 }
5337     Object v; U u;
5338     while ((v = advance()) != null) {
5339     if ((u = transformer.apply((K)nextKey, (V)v)) != null)
5340     action.apply(u);
5341     }
5342     } catch (Throwable ex) {
5343     return tryCompleteComputation(ex);
5344 dl 1.119 }
5345 dl 1.138 tryComplete(subtasks);
5346 dl 1.128 return false;
5347 dl 1.119 }
5348 tim 1.1 }
5349    
5350 dl 1.128 @SuppressWarnings("serial") static final class SearchKeysTask<K,V,U>
5351 dl 1.138 extends BulkAction<K,V,U> {
5352 dl 1.119 final Fun<? super K, ? extends U> searchFunction;
5353     final AtomicReference<U> result;
5354     SearchKeysTask
5355 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5356 dl 1.138 SearchKeysTask<K,V,U> nextTask,
5357 dl 1.119 Fun<? super K, ? extends U> searchFunction,
5358     AtomicReference<U> result) {
5359 dl 1.138 super(m, p, b, nextTask);
5360 dl 1.119 this.searchFunction = searchFunction; this.result = result;
5361     }
5362 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5363 dl 1.119 AtomicReference<U> result = this.result;
5364     final Fun<? super K, ? extends U> searchFunction =
5365     this.searchFunction;
5366     if (searchFunction == null || result == null)
5367 dl 1.128 return abortOnNullFunction();
5368 dl 1.138 SearchKeysTask<K,V,U> subtasks = null;
5369 dl 1.128 try {
5370     int b = batch(), c;
5371     while (b > 1 && baseIndex != baseLimit && result.get() == null) {
5372     do {} while (!casPending(c = pending, c+1));
5373 dl 1.138 (subtasks = new SearchKeysTask<K,V,U>
5374     (map, this, b >>>= 1, subtasks, searchFunction, result)).fork();
5375 dl 1.128 }
5376     U u;
5377     while (result.get() == null && advance() != null) {
5378     if ((u = searchFunction.apply((K)nextKey)) != null) {
5379     if (result.compareAndSet(null, u))
5380     tryCompleteComputation(null);
5381     break;
5382 dl 1.126 }
5383 dl 1.119 }
5384 dl 1.128 } catch (Throwable ex) {
5385     return tryCompleteComputation(ex);
5386 dl 1.119 }
5387 dl 1.138 tryComplete(subtasks);
5388 dl 1.128 return false;
5389 dl 1.119 }
5390     public final U getRawResult() { return result.get(); }
5391 tim 1.1 }
5392    
5393 dl 1.128 @SuppressWarnings("serial") static final class SearchValuesTask<K,V,U>
5394 dl 1.138 extends BulkAction<K,V,U> {
5395 dl 1.119 final Fun<? super V, ? extends U> searchFunction;
5396     final AtomicReference<U> result;
5397     SearchValuesTask
5398 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5399 dl 1.138 SearchValuesTask<K,V,U> nextTask,
5400 dl 1.119 Fun<? super V, ? extends U> searchFunction,
5401     AtomicReference<U> result) {
5402 dl 1.138 super(m, p, b, nextTask);
5403 dl 1.119 this.searchFunction = searchFunction; this.result = result;
5404     }
5405 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5406 dl 1.119 AtomicReference<U> result = this.result;
5407     final Fun<? super V, ? extends U> searchFunction =
5408     this.searchFunction;
5409     if (searchFunction == null || result == null)
5410 dl 1.128 return abortOnNullFunction();
5411 dl 1.138 SearchValuesTask<K,V,U> subtasks = null;
5412 dl 1.128 try {
5413     int b = batch(), c;
5414     while (b > 1 && baseIndex != baseLimit && result.get() == null) {
5415     do {} while (!casPending(c = pending, c+1));
5416 dl 1.138 (subtasks = new SearchValuesTask<K,V,U>
5417     (map, this, b >>>= 1, subtasks, searchFunction, result)).fork();
5418 dl 1.128 }
5419     Object v; U u;
5420     while (result.get() == null && (v = advance()) != null) {
5421     if ((u = searchFunction.apply((V)v)) != null) {
5422     if (result.compareAndSet(null, u))
5423     tryCompleteComputation(null);
5424     break;
5425 dl 1.126 }
5426 dl 1.119 }
5427 dl 1.128 } catch (Throwable ex) {
5428     return tryCompleteComputation(ex);
5429 dl 1.119 }
5430 dl 1.138 tryComplete(subtasks);
5431 dl 1.128 return false;
5432 dl 1.119 }
5433     public final U getRawResult() { return result.get(); }
5434     }
5435 tim 1.11
5436 dl 1.128 @SuppressWarnings("serial") static final class SearchEntriesTask<K,V,U>
5437 dl 1.138 extends BulkAction<K,V,U> {
5438 dl 1.119 final Fun<Entry<K,V>, ? extends U> searchFunction;
5439     final AtomicReference<U> result;
5440     SearchEntriesTask
5441 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5442 dl 1.138 SearchEntriesTask<K,V,U> nextTask,
5443 dl 1.119 Fun<Entry<K,V>, ? extends U> searchFunction,
5444     AtomicReference<U> result) {
5445 dl 1.138 super(m, p, b, nextTask);
5446 dl 1.119 this.searchFunction = searchFunction; this.result = result;
5447     }
5448 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5449 dl 1.119 AtomicReference<U> result = this.result;
5450     final Fun<Entry<K,V>, ? extends U> searchFunction =
5451     this.searchFunction;
5452     if (searchFunction == null || result == null)
5453 dl 1.128 return abortOnNullFunction();
5454 dl 1.138 SearchEntriesTask<K,V,U> subtasks = null;
5455 dl 1.128 try {
5456     int b = batch(), c;
5457     while (b > 1 && baseIndex != baseLimit && result.get() == null) {
5458     do {} while (!casPending(c = pending, c+1));
5459 dl 1.138 (subtasks = new SearchEntriesTask<K,V,U>
5460     (map, this, b >>>= 1, subtasks, searchFunction, result)).fork();
5461 dl 1.128 }
5462     Object v; U u;
5463     while (result.get() == null && (v = advance()) != null) {
5464     if ((u = searchFunction.apply(entryFor((K)nextKey, (V)v))) != null) {
5465     if (result.compareAndSet(null, u))
5466     tryCompleteComputation(null);
5467     break;
5468 dl 1.126 }
5469 dl 1.119 }
5470 dl 1.128 } catch (Throwable ex) {
5471     return tryCompleteComputation(ex);
5472 dl 1.119 }
5473 dl 1.138 tryComplete(subtasks);
5474 dl 1.128 return false;
5475 dl 1.119 }
5476     public final U getRawResult() { return result.get(); }
5477     }
5478 tim 1.1
5479 dl 1.128 @SuppressWarnings("serial") static final class SearchMappingsTask<K,V,U>
5480 dl 1.138 extends BulkAction<K,V,U> {
5481 dl 1.119 final BiFun<? super K, ? super V, ? extends U> searchFunction;
5482     final AtomicReference<U> result;
5483     SearchMappingsTask
5484 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5485 dl 1.138 SearchMappingsTask<K,V,U> nextTask,
5486 dl 1.119 BiFun<? super K, ? super V, ? extends U> searchFunction,
5487     AtomicReference<U> result) {
5488 dl 1.138 super(m, p, b, nextTask);
5489 dl 1.119 this.searchFunction = searchFunction; this.result = result;
5490     }
5491 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5492 dl 1.119 AtomicReference<U> result = this.result;
5493     final BiFun<? super K, ? super V, ? extends U> searchFunction =
5494     this.searchFunction;
5495     if (searchFunction == null || result == null)
5496 dl 1.128 return abortOnNullFunction();
5497 dl 1.138 SearchMappingsTask<K,V,U> subtasks = null;
5498 dl 1.128 try {
5499     int b = batch(), c;
5500     while (b > 1 && baseIndex != baseLimit && result.get() == null) {
5501     do {} while (!casPending(c = pending, c+1));
5502 dl 1.138 (subtasks = new SearchMappingsTask<K,V,U>
5503     (map, this, b >>>= 1, subtasks, searchFunction, result)).fork();
5504 dl 1.128 }
5505     Object v; U u;
5506     while (result.get() == null && (v = advance()) != null) {
5507     if ((u = searchFunction.apply((K)nextKey, (V)v)) != null) {
5508     if (result.compareAndSet(null, u))
5509     tryCompleteComputation(null);
5510     break;
5511 dl 1.126 }
5512 dl 1.119 }
5513 dl 1.128 } catch (Throwable ex) {
5514     return tryCompleteComputation(ex);
5515 dl 1.119 }
5516 dl 1.138 tryComplete(subtasks);
5517 dl 1.128 return false;
5518 tim 1.1 }
5519 dl 1.119 public final U getRawResult() { return result.get(); }
5520     }
5521 tim 1.1
5522 dl 1.128 @SuppressWarnings("serial") static final class ReduceKeysTask<K,V>
5523 dl 1.119 extends BulkTask<K,V,K> {
5524     final BiFun<? super K, ? super K, ? extends K> reducer;
5525     K result;
5526 dl 1.128 ReduceKeysTask<K,V> rights, nextRight;
5527 dl 1.119 ReduceKeysTask
5528 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5529 dl 1.128 ReduceKeysTask<K,V> nextRight,
5530 dl 1.119 BiFun<? super K, ? super K, ? extends K> reducer) {
5531 dl 1.130 super(m, p, b); this.nextRight = nextRight;
5532 dl 1.119 this.reducer = reducer;
5533     }
5534 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5535 dl 1.119 final BiFun<? super K, ? super K, ? extends K> reducer =
5536     this.reducer;
5537     if (reducer == null)
5538 dl 1.128 return abortOnNullFunction();
5539     try {
5540     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5541     do {} while (!casPending(c = pending, c+1));
5542     (rights = new ReduceKeysTask<K,V>
5543 dl 1.130 (map, this, b >>>= 1, rights, reducer)).fork();
5544 dl 1.128 }
5545     K r = null;
5546     while (advance() != null) {
5547     K u = (K)nextKey;
5548     r = (r == null) ? u : reducer.apply(r, u);
5549 dl 1.99 }
5550 dl 1.128 result = r;
5551     for (ReduceKeysTask<K,V> t = this, s;;) {
5552     int c; BulkTask<K,V,?> par; K tr, sr;
5553     if ((c = t.pending) == 0) {
5554     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5555     if ((sr = s.result) != null)
5556 jsr166 1.132 t.result = ((tr = t.result) == null) ? sr : reducer.apply(tr, sr);
5557 dl 1.128 }
5558     if ((par = t.parent) == null ||
5559     !(par instanceof ReduceKeysTask)) {
5560     t.quietlyComplete();
5561     break;
5562     }
5563     t = (ReduceKeysTask<K,V>)par;
5564     }
5565     else if (t.casPending(c, c - 1))
5566     break;
5567 tim 1.1 }
5568 dl 1.128 } catch (Throwable ex) {
5569     return tryCompleteComputation(ex);
5570 tim 1.1 }
5571 dl 1.138 ReduceKeysTask<K,V> s = rights;
5572     if (s != null && !inForkJoinPool()) {
5573     do {
5574     if (s.tryUnfork())
5575     s.exec();
5576     } while ((s = s.nextRight) != null);
5577     }
5578 dl 1.128 return false;
5579 tim 1.1 }
5580 dl 1.119 public final K getRawResult() { return result; }
5581     }
5582 tim 1.1
5583 dl 1.128 @SuppressWarnings("serial") static final class ReduceValuesTask<K,V>
5584 dl 1.119 extends BulkTask<K,V,V> {
5585     final BiFun<? super V, ? super V, ? extends V> reducer;
5586     V result;
5587 dl 1.128 ReduceValuesTask<K,V> rights, nextRight;
5588 dl 1.119 ReduceValuesTask
5589 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5590 dl 1.128 ReduceValuesTask<K,V> nextRight,
5591 dl 1.119 BiFun<? super V, ? super V, ? extends V> reducer) {
5592 dl 1.130 super(m, p, b); this.nextRight = nextRight;
5593 dl 1.119 this.reducer = reducer;
5594     }
5595 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5596 dl 1.119 final BiFun<? super V, ? super V, ? extends V> reducer =
5597     this.reducer;
5598     if (reducer == null)
5599 dl 1.128 return abortOnNullFunction();
5600     try {
5601     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5602     do {} while (!casPending(c = pending, c+1));
5603     (rights = new ReduceValuesTask<K,V>
5604 dl 1.130 (map, this, b >>>= 1, rights, reducer)).fork();
5605 dl 1.128 }
5606     V r = null;
5607     Object v;
5608     while ((v = advance()) != null) {
5609     V u = (V)v;
5610     r = (r == null) ? u : reducer.apply(r, u);
5611 dl 1.119 }
5612 dl 1.128 result = r;
5613     for (ReduceValuesTask<K,V> t = this, s;;) {
5614     int c; BulkTask<K,V,?> par; V tr, sr;
5615     if ((c = t.pending) == 0) {
5616     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5617     if ((sr = s.result) != null)
5618 jsr166 1.132 t.result = ((tr = t.result) == null) ? sr : reducer.apply(tr, sr);
5619 dl 1.128 }
5620     if ((par = t.parent) == null ||
5621     !(par instanceof ReduceValuesTask)) {
5622     t.quietlyComplete();
5623     break;
5624     }
5625     t = (ReduceValuesTask<K,V>)par;
5626     }
5627     else if (t.casPending(c, c - 1))
5628     break;
5629 dl 1.119 }
5630 dl 1.128 } catch (Throwable ex) {
5631     return tryCompleteComputation(ex);
5632 dl 1.119 }
5633 dl 1.138 ReduceValuesTask<K,V> s = rights;
5634     if (s != null && !inForkJoinPool()) {
5635     do {
5636     if (s.tryUnfork())
5637     s.exec();
5638     } while ((s = s.nextRight) != null);
5639     }
5640 dl 1.128 return false;
5641 tim 1.1 }
5642 dl 1.119 public final V getRawResult() { return result; }
5643     }
5644 tim 1.1
5645 dl 1.128 @SuppressWarnings("serial") static final class ReduceEntriesTask<K,V>
5646 dl 1.119 extends BulkTask<K,V,Map.Entry<K,V>> {
5647     final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5648     Map.Entry<K,V> result;
5649 dl 1.128 ReduceEntriesTask<K,V> rights, nextRight;
5650 dl 1.119 ReduceEntriesTask
5651 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5652     ReduceEntriesTask<K,V> nextRight,
5653 dl 1.119 BiFun<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5654 dl 1.130 super(m, p, b); this.nextRight = nextRight;
5655 dl 1.119 this.reducer = reducer;
5656     }
5657 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5658 dl 1.119 final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer =
5659     this.reducer;
5660     if (reducer == null)
5661 dl 1.128 return abortOnNullFunction();
5662     try {
5663     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5664     do {} while (!casPending(c = pending, c+1));
5665     (rights = new ReduceEntriesTask<K,V>
5666 dl 1.130 (map, this, b >>>= 1, rights, reducer)).fork();
5667 dl 1.128 }
5668     Map.Entry<K,V> r = null;
5669     Object v;
5670     while ((v = advance()) != null) {
5671     Map.Entry<K,V> u = entryFor((K)nextKey, (V)v);
5672     r = (r == null) ? u : reducer.apply(r, u);
5673 dl 1.119 }
5674 dl 1.128 result = r;
5675     for (ReduceEntriesTask<K,V> t = this, s;;) {
5676     int c; BulkTask<K,V,?> par; Map.Entry<K,V> tr, sr;
5677     if ((c = t.pending) == 0) {
5678     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5679     if ((sr = s.result) != null)
5680 jsr166 1.132 t.result = ((tr = t.result) == null) ? sr : reducer.apply(tr, sr);
5681 dl 1.128 }
5682     if ((par = t.parent) == null ||
5683     !(par instanceof ReduceEntriesTask)) {
5684     t.quietlyComplete();
5685     break;
5686     }
5687     t = (ReduceEntriesTask<K,V>)par;
5688     }
5689     else if (t.casPending(c, c - 1))
5690     break;
5691 dl 1.119 }
5692 dl 1.128 } catch (Throwable ex) {
5693     return tryCompleteComputation(ex);
5694 dl 1.119 }
5695 dl 1.138 ReduceEntriesTask<K,V> s = rights;
5696     if (s != null && !inForkJoinPool()) {
5697     do {
5698     if (s.tryUnfork())
5699     s.exec();
5700     } while ((s = s.nextRight) != null);
5701     }
5702 dl 1.128 return false;
5703 dl 1.119 }
5704     public final Map.Entry<K,V> getRawResult() { return result; }
5705     }
5706 dl 1.99
5707 dl 1.128 @SuppressWarnings("serial") static final class MapReduceKeysTask<K,V,U>
5708 dl 1.119 extends BulkTask<K,V,U> {
5709     final Fun<? super K, ? extends U> transformer;
5710     final BiFun<? super U, ? super U, ? extends U> reducer;
5711     U result;
5712 dl 1.128 MapReduceKeysTask<K,V,U> rights, nextRight;
5713 dl 1.119 MapReduceKeysTask
5714 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5715 dl 1.128 MapReduceKeysTask<K,V,U> nextRight,
5716 dl 1.119 Fun<? super K, ? extends U> transformer,
5717     BiFun<? super U, ? super U, ? extends U> reducer) {
5718 dl 1.130 super(m, p, b); this.nextRight = nextRight;
5719 dl 1.119 this.transformer = transformer;
5720     this.reducer = reducer;
5721     }
5722 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5723 dl 1.119 final Fun<? super K, ? extends U> transformer =
5724     this.transformer;
5725     final BiFun<? super U, ? super U, ? extends U> reducer =
5726     this.reducer;
5727     if (transformer == null || reducer == null)
5728 dl 1.128 return abortOnNullFunction();
5729     try {
5730     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5731     do {} while (!casPending(c = pending, c+1));
5732     (rights = new MapReduceKeysTask<K,V,U>
5733 dl 1.130 (map, this, b >>>= 1, rights, transformer, reducer)).fork();
5734 dl 1.128 }
5735     U r = null, u;
5736     while (advance() != null) {
5737     if ((u = transformer.apply((K)nextKey)) != null)
5738     r = (r == null) ? u : reducer.apply(r, u);
5739 dl 1.119 }
5740 dl 1.128 result = r;
5741     for (MapReduceKeysTask<K,V,U> t = this, s;;) {
5742     int c; BulkTask<K,V,?> par; U tr, sr;
5743     if ((c = t.pending) == 0) {
5744     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5745     if ((sr = s.result) != null)
5746 jsr166 1.132 t.result = ((tr = t.result) == null) ? sr : reducer.apply(tr, sr);
5747 dl 1.128 }
5748     if ((par = t.parent) == null ||
5749     !(par instanceof MapReduceKeysTask)) {
5750     t.quietlyComplete();
5751     break;
5752     }
5753     t = (MapReduceKeysTask<K,V,U>)par;
5754     }
5755     else if (t.casPending(c, c - 1))
5756     break;
5757 dl 1.119 }
5758 dl 1.128 } catch (Throwable ex) {
5759     return tryCompleteComputation(ex);
5760 dl 1.119 }
5761 dl 1.138 MapReduceKeysTask<K,V,U> s = rights;
5762     if (s != null && !inForkJoinPool()) {
5763     do {
5764     if (s.tryUnfork())
5765     s.exec();
5766     } while ((s = s.nextRight) != null);
5767     }
5768 dl 1.128 return false;
5769 tim 1.1 }
5770 dl 1.119 public final U getRawResult() { return result; }
5771 dl 1.4 }
5772    
5773 dl 1.128 @SuppressWarnings("serial") static final class MapReduceValuesTask<K,V,U>
5774 dl 1.119 extends BulkTask<K,V,U> {
5775     final Fun<? super V, ? extends U> transformer;
5776     final BiFun<? super U, ? super U, ? extends U> reducer;
5777     U result;
5778 dl 1.128 MapReduceValuesTask<K,V,U> rights, nextRight;
5779 dl 1.119 MapReduceValuesTask
5780 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5781 dl 1.128 MapReduceValuesTask<K,V,U> nextRight,
5782 dl 1.119 Fun<? super V, ? extends U> transformer,
5783     BiFun<? super U, ? super U, ? extends U> reducer) {
5784 dl 1.130 super(m, p, b); this.nextRight = nextRight;
5785 dl 1.119 this.transformer = transformer;
5786     this.reducer = reducer;
5787     }
5788 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5789 dl 1.119 final Fun<? super V, ? extends U> transformer =
5790     this.transformer;
5791     final BiFun<? super U, ? super U, ? extends U> reducer =
5792     this.reducer;
5793     if (transformer == null || reducer == null)
5794 dl 1.128 return abortOnNullFunction();
5795     try {
5796     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5797     do {} while (!casPending(c = pending, c+1));
5798     (rights = new MapReduceValuesTask<K,V,U>
5799 dl 1.130 (map, this, b >>>= 1, rights, transformer, reducer)).fork();
5800 dl 1.128 }
5801     U r = null, u;
5802     Object v;
5803     while ((v = advance()) != null) {
5804     if ((u = transformer.apply((V)v)) != null)
5805     r = (r == null) ? u : reducer.apply(r, u);
5806 dl 1.119 }
5807 dl 1.128 result = r;
5808     for (MapReduceValuesTask<K,V,U> t = this, s;;) {
5809     int c; BulkTask<K,V,?> par; U tr, sr;
5810     if ((c = t.pending) == 0) {
5811     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5812     if ((sr = s.result) != null)
5813 jsr166 1.132 t.result = ((tr = t.result) == null) ? sr : reducer.apply(tr, sr);
5814 dl 1.128 }
5815     if ((par = t.parent) == null ||
5816     !(par instanceof MapReduceValuesTask)) {
5817     t.quietlyComplete();
5818     break;
5819     }
5820     t = (MapReduceValuesTask<K,V,U>)par;
5821     }
5822     else if (t.casPending(c, c - 1))
5823     break;
5824 dl 1.119 }
5825 dl 1.128 } catch (Throwable ex) {
5826     return tryCompleteComputation(ex);
5827 dl 1.119 }
5828 dl 1.138 MapReduceValuesTask<K,V,U> s = rights;
5829     if (s != null && !inForkJoinPool()) {
5830     do {
5831     if (s.tryUnfork())
5832     s.exec();
5833     } while ((s = s.nextRight) != null);
5834     }
5835 dl 1.128 return false;
5836 dl 1.119 }
5837     public final U getRawResult() { return result; }
5838 dl 1.4 }
5839    
5840 dl 1.128 @SuppressWarnings("serial") static final class MapReduceEntriesTask<K,V,U>
5841 dl 1.119 extends BulkTask<K,V,U> {
5842     final Fun<Map.Entry<K,V>, ? extends U> transformer;
5843     final BiFun<? super U, ? super U, ? extends U> reducer;
5844     U result;
5845 dl 1.128 MapReduceEntriesTask<K,V,U> rights, nextRight;
5846 dl 1.119 MapReduceEntriesTask
5847 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5848 dl 1.128 MapReduceEntriesTask<K,V,U> nextRight,
5849 dl 1.119 Fun<Map.Entry<K,V>, ? extends U> transformer,
5850     BiFun<? super U, ? super U, ? extends U> reducer) {
5851 dl 1.130 super(m, p, b); this.nextRight = nextRight;
5852 dl 1.119 this.transformer = transformer;
5853     this.reducer = reducer;
5854     }
5855 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5856 dl 1.119 final Fun<Map.Entry<K,V>, ? extends U> transformer =
5857     this.transformer;
5858     final BiFun<? super U, ? super U, ? extends U> reducer =
5859     this.reducer;
5860     if (transformer == null || reducer == null)
5861 dl 1.128 return abortOnNullFunction();
5862     try {
5863     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5864     do {} while (!casPending(c = pending, c+1));
5865     (rights = new MapReduceEntriesTask<K,V,U>
5866 dl 1.130 (map, this, b >>>= 1, rights, transformer, reducer)).fork();
5867 dl 1.128 }
5868     U r = null, u;
5869     Object v;
5870     while ((v = advance()) != null) {
5871     if ((u = transformer.apply(entryFor((K)nextKey, (V)v))) != null)
5872     r = (r == null) ? u : reducer.apply(r, u);
5873 dl 1.119 }
5874 dl 1.128 result = r;
5875     for (MapReduceEntriesTask<K,V,U> t = this, s;;) {
5876     int c; BulkTask<K,V,?> par; U tr, sr;
5877     if ((c = t.pending) == 0) {
5878     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5879     if ((sr = s.result) != null)
5880 jsr166 1.132 t.result = ((tr = t.result) == null) ? sr : reducer.apply(tr, sr);
5881 dl 1.128 }
5882     if ((par = t.parent) == null ||
5883     !(par instanceof MapReduceEntriesTask)) {
5884     t.quietlyComplete();
5885     break;
5886     }
5887     t = (MapReduceEntriesTask<K,V,U>)par;
5888     }
5889     else if (t.casPending(c, c - 1))
5890     break;
5891 dl 1.119 }
5892 dl 1.128 } catch (Throwable ex) {
5893     return tryCompleteComputation(ex);
5894 dl 1.119 }
5895 dl 1.138 MapReduceEntriesTask<K,V,U> s = rights;
5896     if (s != null && !inForkJoinPool()) {
5897     do {
5898     if (s.tryUnfork())
5899     s.exec();
5900     } while ((s = s.nextRight) != null);
5901     }
5902 dl 1.128 return false;
5903 dl 1.119 }
5904     public final U getRawResult() { return result; }
5905 dl 1.4 }
5906 tim 1.1
5907 dl 1.128 @SuppressWarnings("serial") static final class MapReduceMappingsTask<K,V,U>
5908 dl 1.119 extends BulkTask<K,V,U> {
5909     final BiFun<? super K, ? super V, ? extends U> transformer;
5910     final BiFun<? super U, ? super U, ? extends U> reducer;
5911     U result;
5912 dl 1.128 MapReduceMappingsTask<K,V,U> rights, nextRight;
5913 dl 1.119 MapReduceMappingsTask
5914 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5915 dl 1.128 MapReduceMappingsTask<K,V,U> nextRight,
5916 dl 1.119 BiFun<? super K, ? super V, ? extends U> transformer,
5917     BiFun<? super U, ? super U, ? extends U> reducer) {
5918 dl 1.130 super(m, p, b); this.nextRight = nextRight;
5919 dl 1.119 this.transformer = transformer;
5920     this.reducer = reducer;
5921     }
5922 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5923 dl 1.119 final BiFun<? super K, ? super V, ? extends U> transformer =
5924     this.transformer;
5925     final BiFun<? super U, ? super U, ? extends U> reducer =
5926     this.reducer;
5927     if (transformer == null || reducer == null)
5928 dl 1.128 return abortOnNullFunction();
5929     try {
5930     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5931     do {} while (!casPending(c = pending, c+1));
5932     (rights = new MapReduceMappingsTask<K,V,U>
5933 dl 1.130 (map, this, b >>>= 1, rights, transformer, reducer)).fork();
5934 dl 1.128 }
5935     U r = null, u;
5936     Object v;
5937     while ((v = advance()) != null) {
5938     if ((u = transformer.apply((K)nextKey, (V)v)) != null)
5939     r = (r == null) ? u : reducer.apply(r, u);
5940 dl 1.119 }
5941 dl 1.128 result = r;
5942     for (MapReduceMappingsTask<K,V,U> t = this, s;;) {
5943     int c; BulkTask<K,V,?> par; U tr, sr;
5944     if ((c = t.pending) == 0) {
5945     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5946     if ((sr = s.result) != null)
5947 jsr166 1.132 t.result = ((tr = t.result) == null) ? sr : reducer.apply(tr, sr);
5948 dl 1.128 }
5949     if ((par = t.parent) == null ||
5950     !(par instanceof MapReduceMappingsTask)) {
5951     t.quietlyComplete();
5952     break;
5953     }
5954     t = (MapReduceMappingsTask<K,V,U>)par;
5955     }
5956     else if (t.casPending(c, c - 1))
5957     break;
5958 dl 1.119 }
5959 dl 1.128 } catch (Throwable ex) {
5960     return tryCompleteComputation(ex);
5961 dl 1.119 }
5962 dl 1.138 MapReduceMappingsTask<K,V,U> s = rights;
5963     if (s != null && !inForkJoinPool()) {
5964     do {
5965     if (s.tryUnfork())
5966     s.exec();
5967     } while ((s = s.nextRight) != null);
5968     }
5969 dl 1.128 return false;
5970 dl 1.119 }
5971     public final U getRawResult() { return result; }
5972     }
5973 jsr166 1.114
5974 dl 1.128 @SuppressWarnings("serial") static final class MapReduceKeysToDoubleTask<K,V>
5975 dl 1.119 extends BulkTask<K,V,Double> {
5976     final ObjectToDouble<? super K> transformer;
5977     final DoubleByDoubleToDouble reducer;
5978     final double basis;
5979     double result;
5980 dl 1.128 MapReduceKeysToDoubleTask<K,V> rights, nextRight;
5981 dl 1.119 MapReduceKeysToDoubleTask
5982 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5983 dl 1.128 MapReduceKeysToDoubleTask<K,V> nextRight,
5984 dl 1.119 ObjectToDouble<? super K> transformer,
5985     double basis,
5986     DoubleByDoubleToDouble reducer) {
5987 dl 1.130 super(m, p, b); this.nextRight = nextRight;
5988 dl 1.119 this.transformer = transformer;
5989     this.basis = basis; this.reducer = reducer;
5990     }
5991 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
5992 dl 1.119 final ObjectToDouble<? super K> transformer =
5993     this.transformer;
5994     final DoubleByDoubleToDouble reducer = this.reducer;
5995     if (transformer == null || reducer == null)
5996 dl 1.128 return abortOnNullFunction();
5997     try {
5998     final double id = this.basis;
5999     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6000     do {} while (!casPending(c = pending, c+1));
6001     (rights = new MapReduceKeysToDoubleTask<K,V>
6002 dl 1.130 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6003 dl 1.128 }
6004     double r = id;
6005     while (advance() != null)
6006     r = reducer.apply(r, transformer.apply((K)nextKey));
6007     result = r;
6008     for (MapReduceKeysToDoubleTask<K,V> t = this, s;;) {
6009     int c; BulkTask<K,V,?> par;
6010     if ((c = t.pending) == 0) {
6011     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6012     t.result = reducer.apply(t.result, s.result);
6013     }
6014     if ((par = t.parent) == null ||
6015     !(par instanceof MapReduceKeysToDoubleTask)) {
6016     t.quietlyComplete();
6017     break;
6018     }
6019     t = (MapReduceKeysToDoubleTask<K,V>)par;
6020     }
6021     else if (t.casPending(c, c - 1))
6022     break;
6023 dl 1.119 }
6024 dl 1.128 } catch (Throwable ex) {
6025     return tryCompleteComputation(ex);
6026 dl 1.119 }
6027 dl 1.138 MapReduceKeysToDoubleTask<K,V> s = rights;
6028     if (s != null && !inForkJoinPool()) {
6029     do {
6030     if (s.tryUnfork())
6031     s.exec();
6032     } while ((s = s.nextRight) != null);
6033     }
6034 dl 1.128 return false;
6035 dl 1.79 }
6036 dl 1.119 public final Double getRawResult() { return result; }
6037     }
6038 dl 1.79
6039 dl 1.128 @SuppressWarnings("serial") static final class MapReduceValuesToDoubleTask<K,V>
6040 dl 1.119 extends BulkTask<K,V,Double> {
6041     final ObjectToDouble<? super V> transformer;
6042     final DoubleByDoubleToDouble reducer;
6043     final double basis;
6044     double result;
6045 dl 1.128 MapReduceValuesToDoubleTask<K,V> rights, nextRight;
6046 dl 1.119 MapReduceValuesToDoubleTask
6047 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6048 dl 1.128 MapReduceValuesToDoubleTask<K,V> nextRight,
6049 dl 1.119 ObjectToDouble<? super V> transformer,
6050     double basis,
6051     DoubleByDoubleToDouble reducer) {
6052 dl 1.130 super(m, p, b); this.nextRight = nextRight;
6053 dl 1.119 this.transformer = transformer;
6054     this.basis = basis; this.reducer = reducer;
6055     }
6056 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
6057 dl 1.119 final ObjectToDouble<? super V> transformer =
6058     this.transformer;
6059     final DoubleByDoubleToDouble reducer = this.reducer;
6060     if (transformer == null || reducer == null)
6061 dl 1.128 return abortOnNullFunction();
6062     try {
6063     final double id = this.basis;
6064     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6065     do {} while (!casPending(c = pending, c+1));
6066     (rights = new MapReduceValuesToDoubleTask<K,V>
6067 dl 1.130 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6068 dl 1.128 }
6069     double r = id;
6070     Object v;
6071     while ((v = advance()) != null)
6072     r = reducer.apply(r, transformer.apply((V)v));
6073     result = r;
6074     for (MapReduceValuesToDoubleTask<K,V> t = this, s;;) {
6075     int c; BulkTask<K,V,?> par;
6076     if ((c = t.pending) == 0) {
6077     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6078     t.result = reducer.apply(t.result, s.result);
6079     }
6080     if ((par = t.parent) == null ||
6081     !(par instanceof MapReduceValuesToDoubleTask)) {
6082     t.quietlyComplete();
6083     break;
6084     }
6085     t = (MapReduceValuesToDoubleTask<K,V>)par;
6086     }
6087     else if (t.casPending(c, c - 1))
6088     break;
6089 dl 1.119 }
6090 dl 1.128 } catch (Throwable ex) {
6091     return tryCompleteComputation(ex);
6092 dl 1.119 }
6093 dl 1.138 MapReduceValuesToDoubleTask<K,V> s = rights;
6094     if (s != null && !inForkJoinPool()) {
6095     do {
6096     if (s.tryUnfork())
6097     s.exec();
6098     } while ((s = s.nextRight) != null);
6099     }
6100 dl 1.128 return false;
6101 dl 1.30 }
6102 dl 1.119 public final Double getRawResult() { return result; }
6103 dl 1.79 }
6104 dl 1.30
6105 dl 1.128 @SuppressWarnings("serial") static final class MapReduceEntriesToDoubleTask<K,V>
6106 dl 1.119 extends BulkTask<K,V,Double> {
6107     final ObjectToDouble<Map.Entry<K,V>> transformer;
6108     final DoubleByDoubleToDouble reducer;
6109     final double basis;
6110     double result;
6111 dl 1.128 MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
6112 dl 1.119 MapReduceEntriesToDoubleTask
6113 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6114 dl 1.128 MapReduceEntriesToDoubleTask<K,V> nextRight,
6115 dl 1.119 ObjectToDouble<Map.Entry<K,V>> transformer,
6116     double basis,
6117     DoubleByDoubleToDouble reducer) {
6118 dl 1.130 super(m, p, b); this.nextRight = nextRight;
6119 dl 1.119 this.transformer = transformer;
6120     this.basis = basis; this.reducer = reducer;
6121     }
6122 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
6123 dl 1.119 final ObjectToDouble<Map.Entry<K,V>> transformer =
6124     this.transformer;
6125     final DoubleByDoubleToDouble reducer = this.reducer;
6126     if (transformer == null || reducer == null)
6127 dl 1.128 return abortOnNullFunction();
6128     try {
6129     final double id = this.basis;
6130     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6131     do {} while (!casPending(c = pending, c+1));
6132     (rights = new MapReduceEntriesToDoubleTask<K,V>
6133 dl 1.130 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6134 dl 1.128 }
6135     double r = id;
6136     Object v;
6137     while ((v = advance()) != null)
6138     r = reducer.apply(r, transformer.apply(entryFor((K)nextKey, (V)v)));
6139     result = r;
6140     for (MapReduceEntriesToDoubleTask<K,V> t = this, s;;) {
6141     int c; BulkTask<K,V,?> par;
6142     if ((c = t.pending) == 0) {
6143     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6144     t.result = reducer.apply(t.result, s.result);
6145     }
6146     if ((par = t.parent) == null ||
6147     !(par instanceof MapReduceEntriesToDoubleTask)) {
6148     t.quietlyComplete();
6149     break;
6150     }
6151     t = (MapReduceEntriesToDoubleTask<K,V>)par;
6152     }
6153     else if (t.casPending(c, c - 1))
6154     break;
6155 dl 1.119 }
6156 dl 1.128 } catch (Throwable ex) {
6157     return tryCompleteComputation(ex);
6158 dl 1.119 }
6159 dl 1.138 MapReduceEntriesToDoubleTask<K,V> s = rights;
6160     if (s != null && !inForkJoinPool()) {
6161     do {
6162     if (s.tryUnfork())
6163     s.exec();
6164     } while ((s = s.nextRight) != null);
6165     }
6166 dl 1.128 return false;
6167 dl 1.30 }
6168 dl 1.119 public final Double getRawResult() { return result; }
6169 tim 1.1 }
6170    
6171 dl 1.128 @SuppressWarnings("serial") static final class MapReduceMappingsToDoubleTask<K,V>
6172 dl 1.119 extends BulkTask<K,V,Double> {
6173     final ObjectByObjectToDouble<? super K, ? super V> transformer;
6174     final DoubleByDoubleToDouble reducer;
6175     final double basis;
6176     double result;
6177 dl 1.128 MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
6178 dl 1.119 MapReduceMappingsToDoubleTask
6179 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6180 dl 1.128 MapReduceMappingsToDoubleTask<K,V> nextRight,
6181 dl 1.119 ObjectByObjectToDouble<? super K, ? super V> transformer,
6182     double basis,
6183     DoubleByDoubleToDouble reducer) {
6184 dl 1.130 super(m, p, b); this.nextRight = nextRight;
6185 dl 1.119 this.transformer = transformer;
6186     this.basis = basis; this.reducer = reducer;
6187     }
6188 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
6189 dl 1.119 final ObjectByObjectToDouble<? super K, ? super V> transformer =
6190     this.transformer;
6191     final DoubleByDoubleToDouble reducer = this.reducer;
6192     if (transformer == null || reducer == null)
6193 dl 1.128 return abortOnNullFunction();
6194     try {
6195     final double id = this.basis;
6196     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6197     do {} while (!casPending(c = pending, c+1));
6198     (rights = new MapReduceMappingsToDoubleTask<K,V>
6199 dl 1.130 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6200 dl 1.128 }
6201     double r = id;
6202     Object v;
6203     while ((v = advance()) != null)
6204     r = reducer.apply(r, transformer.apply((K)nextKey, (V)v));
6205     result = r;
6206     for (MapReduceMappingsToDoubleTask<K,V> t = this, s;;) {
6207     int c; BulkTask<K,V,?> par;
6208     if ((c = t.pending) == 0) {
6209     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6210     t.result = reducer.apply(t.result, s.result);
6211     }
6212     if ((par = t.parent) == null ||
6213     !(par instanceof MapReduceMappingsToDoubleTask)) {
6214     t.quietlyComplete();
6215     break;
6216     }
6217     t = (MapReduceMappingsToDoubleTask<K,V>)par;
6218     }
6219     else if (t.casPending(c, c - 1))
6220     break;
6221 dl 1.119 }
6222 dl 1.128 } catch (Throwable ex) {
6223     return tryCompleteComputation(ex);
6224 dl 1.119 }
6225 dl 1.138 MapReduceMappingsToDoubleTask<K,V> s = rights;
6226     if (s != null && !inForkJoinPool()) {
6227     do {
6228     if (s.tryUnfork())
6229     s.exec();
6230     } while ((s = s.nextRight) != null);
6231     }
6232 dl 1.128 return false;
6233 dl 1.4 }
6234 dl 1.119 public final Double getRawResult() { return result; }
6235     }
6236    
6237 dl 1.128 @SuppressWarnings("serial") static final class MapReduceKeysToLongTask<K,V>
6238 dl 1.119 extends BulkTask<K,V,Long> {
6239     final ObjectToLong<? super K> transformer;
6240     final LongByLongToLong reducer;
6241     final long basis;
6242     long result;
6243 dl 1.128 MapReduceKeysToLongTask<K,V> rights, nextRight;
6244 dl 1.119 MapReduceKeysToLongTask
6245 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6246 dl 1.128 MapReduceKeysToLongTask<K,V> nextRight,
6247 dl 1.119 ObjectToLong<? super K> transformer,
6248     long basis,
6249     LongByLongToLong reducer) {
6250 dl 1.130 super(m, p, b); this.nextRight = nextRight;
6251 dl 1.119 this.transformer = transformer;
6252     this.basis = basis; this.reducer = reducer;
6253     }
6254 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
6255 dl 1.119 final ObjectToLong<? super K> transformer =
6256     this.transformer;
6257     final LongByLongToLong reducer = this.reducer;
6258     if (transformer == null || reducer == null)
6259 dl 1.128 return abortOnNullFunction();
6260     try {
6261     final long id = this.basis;
6262     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6263     do {} while (!casPending(c = pending, c+1));
6264     (rights = new MapReduceKeysToLongTask<K,V>
6265 dl 1.130 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6266 dl 1.128 }
6267     long r = id;
6268     while (advance() != null)
6269     r = reducer.apply(r, transformer.apply((K)nextKey));
6270     result = r;
6271     for (MapReduceKeysToLongTask<K,V> t = this, s;;) {
6272     int c; BulkTask<K,V,?> par;
6273     if ((c = t.pending) == 0) {
6274     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6275     t.result = reducer.apply(t.result, s.result);
6276     }
6277     if ((par = t.parent) == null ||
6278     !(par instanceof MapReduceKeysToLongTask)) {
6279     t.quietlyComplete();
6280     break;
6281     }
6282     t = (MapReduceKeysToLongTask<K,V>)par;
6283     }
6284     else if (t.casPending(c, c - 1))
6285     break;
6286 dl 1.119 }
6287 dl 1.128 } catch (Throwable ex) {
6288     return tryCompleteComputation(ex);
6289 dl 1.119 }
6290 dl 1.138 MapReduceKeysToLongTask<K,V> s = rights;
6291     if (s != null && !inForkJoinPool()) {
6292     do {
6293     if (s.tryUnfork())
6294     s.exec();
6295     } while ((s = s.nextRight) != null);
6296     }
6297 dl 1.128 return false;
6298 dl 1.4 }
6299 dl 1.119 public final Long getRawResult() { return result; }
6300     }
6301    
6302 dl 1.128 @SuppressWarnings("serial") static final class MapReduceValuesToLongTask<K,V>
6303 dl 1.119 extends BulkTask<K,V,Long> {
6304     final ObjectToLong<? super V> transformer;
6305     final LongByLongToLong reducer;
6306     final long basis;
6307     long result;
6308 dl 1.128 MapReduceValuesToLongTask<K,V> rights, nextRight;
6309 dl 1.119 MapReduceValuesToLongTask
6310 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6311 dl 1.128 MapReduceValuesToLongTask<K,V> nextRight,
6312 dl 1.119 ObjectToLong<? super V> transformer,
6313     long basis,
6314     LongByLongToLong reducer) {
6315 dl 1.130 super(m, p, b); this.nextRight = nextRight;
6316 dl 1.119 this.transformer = transformer;
6317     this.basis = basis; this.reducer = reducer;
6318     }
6319 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
6320 dl 1.119 final ObjectToLong<? super V> transformer =
6321     this.transformer;
6322     final LongByLongToLong reducer = this.reducer;
6323     if (transformer == null || reducer == null)
6324 dl 1.128 return abortOnNullFunction();
6325     try {
6326     final long id = this.basis;
6327     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6328     do {} while (!casPending(c = pending, c+1));
6329     (rights = new MapReduceValuesToLongTask<K,V>
6330 dl 1.130 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6331 dl 1.128 }
6332     long r = id;
6333     Object v;
6334     while ((v = advance()) != null)
6335     r = reducer.apply(r, transformer.apply((V)v));
6336     result = r;
6337     for (MapReduceValuesToLongTask<K,V> t = this, s;;) {
6338     int c; BulkTask<K,V,?> par;
6339     if ((c = t.pending) == 0) {
6340     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6341     t.result = reducer.apply(t.result, s.result);
6342     }
6343     if ((par = t.parent) == null ||
6344     !(par instanceof MapReduceValuesToLongTask)) {
6345     t.quietlyComplete();
6346     break;
6347     }
6348     t = (MapReduceValuesToLongTask<K,V>)par;
6349     }
6350     else if (t.casPending(c, c - 1))
6351     break;
6352 dl 1.119 }
6353 dl 1.128 } catch (Throwable ex) {
6354     return tryCompleteComputation(ex);
6355 dl 1.119 }
6356 dl 1.138 MapReduceValuesToLongTask<K,V> s = rights;
6357     if (s != null && !inForkJoinPool()) {
6358     do {
6359     if (s.tryUnfork())
6360     s.exec();
6361     } while ((s = s.nextRight) != null);
6362     }
6363 dl 1.128 return false;
6364 jsr166 1.95 }
6365 dl 1.119 public final Long getRawResult() { return result; }
6366     }
6367    
6368 dl 1.128 @SuppressWarnings("serial") static final class MapReduceEntriesToLongTask<K,V>
6369 dl 1.119 extends BulkTask<K,V,Long> {
6370     final ObjectToLong<Map.Entry<K,V>> transformer;
6371     final LongByLongToLong reducer;
6372     final long basis;
6373     long result;
6374 dl 1.128 MapReduceEntriesToLongTask<K,V> rights, nextRight;
6375 dl 1.119 MapReduceEntriesToLongTask
6376 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6377 dl 1.128 MapReduceEntriesToLongTask<K,V> nextRight,
6378 dl 1.119 ObjectToLong<Map.Entry<K,V>> transformer,
6379     long basis,
6380     LongByLongToLong reducer) {
6381 dl 1.130 super(m, p, b); this.nextRight = nextRight;
6382 dl 1.119 this.transformer = transformer;
6383     this.basis = basis; this.reducer = reducer;
6384     }
6385 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
6386 dl 1.119 final ObjectToLong<Map.Entry<K,V>> transformer =
6387     this.transformer;
6388     final LongByLongToLong reducer = this.reducer;
6389     if (transformer == null || reducer == null)
6390 dl 1.128 return abortOnNullFunction();
6391     try {
6392     final long id = this.basis;
6393     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6394     do {} while (!casPending(c = pending, c+1));
6395     (rights = new MapReduceEntriesToLongTask<K,V>
6396 dl 1.130 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6397 dl 1.128 }
6398     long r = id;
6399     Object v;
6400     while ((v = advance()) != null)
6401     r = reducer.apply(r, transformer.apply(entryFor((K)nextKey, (V)v)));
6402     result = r;
6403     for (MapReduceEntriesToLongTask<K,V> t = this, s;;) {
6404     int c; BulkTask<K,V,?> par;
6405     if ((c = t.pending) == 0) {
6406     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6407     t.result = reducer.apply(t.result, s.result);
6408     }
6409     if ((par = t.parent) == null ||
6410     !(par instanceof MapReduceEntriesToLongTask)) {
6411     t.quietlyComplete();
6412     break;
6413     }
6414     t = (MapReduceEntriesToLongTask<K,V>)par;
6415     }
6416     else if (t.casPending(c, c - 1))
6417     break;
6418 dl 1.119 }
6419 dl 1.128 } catch (Throwable ex) {
6420     return tryCompleteComputation(ex);
6421 dl 1.119 }
6422 dl 1.138 MapReduceEntriesToLongTask<K,V> s = rights;
6423     if (s != null && !inForkJoinPool()) {
6424     do {
6425     if (s.tryUnfork())
6426     s.exec();
6427     } while ((s = s.nextRight) != null);
6428     }
6429 dl 1.128 return false;
6430 dl 1.4 }
6431 dl 1.119 public final Long getRawResult() { return result; }
6432 tim 1.1 }
6433    
6434 dl 1.128 @SuppressWarnings("serial") static final class MapReduceMappingsToLongTask<K,V>
6435 dl 1.119 extends BulkTask<K,V,Long> {
6436     final ObjectByObjectToLong<? super K, ? super V> transformer;
6437     final LongByLongToLong reducer;
6438     final long basis;
6439     long result;
6440 dl 1.128 MapReduceMappingsToLongTask<K,V> rights, nextRight;
6441 dl 1.119 MapReduceMappingsToLongTask
6442 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6443 dl 1.128 MapReduceMappingsToLongTask<K,V> nextRight,
6444 dl 1.119 ObjectByObjectToLong<? super K, ? super V> transformer,
6445     long basis,
6446     LongByLongToLong reducer) {
6447 dl 1.130 super(m, p, b); this.nextRight = nextRight;
6448 dl 1.119 this.transformer = transformer;
6449     this.basis = basis; this.reducer = reducer;
6450     }
6451 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
6452 dl 1.119 final ObjectByObjectToLong<? super K, ? super V> transformer =
6453     this.transformer;
6454     final LongByLongToLong reducer = this.reducer;
6455     if (transformer == null || reducer == null)
6456 dl 1.128 return abortOnNullFunction();
6457     try {
6458     final long id = this.basis;
6459     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6460     do {} while (!casPending(c = pending, c+1));
6461     (rights = new MapReduceMappingsToLongTask<K,V>
6462 dl 1.130 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6463 dl 1.128 }
6464     long r = id;
6465     Object v;
6466     while ((v = advance()) != null)
6467     r = reducer.apply(r, transformer.apply((K)nextKey, (V)v));
6468     result = r;
6469     for (MapReduceMappingsToLongTask<K,V> t = this, s;;) {
6470     int c; BulkTask<K,V,?> par;
6471     if ((c = t.pending) == 0) {
6472     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6473     t.result = reducer.apply(t.result, s.result);
6474     }
6475     if ((par = t.parent) == null ||
6476     !(par instanceof MapReduceMappingsToLongTask)) {
6477     t.quietlyComplete();
6478     break;
6479     }
6480     t = (MapReduceMappingsToLongTask<K,V>)par;
6481     }
6482     else if (t.casPending(c, c - 1))
6483     break;
6484 dl 1.119 }
6485 dl 1.128 } catch (Throwable ex) {
6486     return tryCompleteComputation(ex);
6487 dl 1.119 }
6488 dl 1.138 MapReduceMappingsToLongTask<K,V> s = rights;
6489     if (s != null && !inForkJoinPool()) {
6490     do {
6491     if (s.tryUnfork())
6492     s.exec();
6493     } while ((s = s.nextRight) != null);
6494     }
6495 dl 1.128 return false;
6496 dl 1.4 }
6497 dl 1.119 public final Long getRawResult() { return result; }
6498 tim 1.1 }
6499    
6500 dl 1.128 @SuppressWarnings("serial") static final class MapReduceKeysToIntTask<K,V>
6501 dl 1.119 extends BulkTask<K,V,Integer> {
6502     final ObjectToInt<? super K> transformer;
6503     final IntByIntToInt reducer;
6504     final int basis;
6505     int result;
6506 dl 1.128 MapReduceKeysToIntTask<K,V> rights, nextRight;
6507 dl 1.119 MapReduceKeysToIntTask
6508 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6509 dl 1.128 MapReduceKeysToIntTask<K,V> nextRight,
6510 dl 1.119 ObjectToInt<? super K> transformer,
6511     int basis,
6512     IntByIntToInt reducer) {
6513 dl 1.130 super(m, p, b); this.nextRight = nextRight;
6514 dl 1.119 this.transformer = transformer;
6515     this.basis = basis; this.reducer = reducer;
6516     }
6517 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
6518 dl 1.119 final ObjectToInt<? super K> transformer =
6519     this.transformer;
6520     final IntByIntToInt reducer = this.reducer;
6521     if (transformer == null || reducer == null)
6522 dl 1.128 return abortOnNullFunction();
6523     try {
6524     final int id = this.basis;
6525     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6526     do {} while (!casPending(c = pending, c+1));
6527     (rights = new MapReduceKeysToIntTask<K,V>
6528 dl 1.130 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6529 dl 1.128 }
6530     int r = id;
6531     while (advance() != null)
6532     r = reducer.apply(r, transformer.apply((K)nextKey));
6533     result = r;
6534     for (MapReduceKeysToIntTask<K,V> t = this, s;;) {
6535     int c; BulkTask<K,V,?> par;
6536     if ((c = t.pending) == 0) {
6537     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6538     t.result = reducer.apply(t.result, s.result);
6539     }
6540     if ((par = t.parent) == null ||
6541     !(par instanceof MapReduceKeysToIntTask)) {
6542     t.quietlyComplete();
6543     break;
6544     }
6545     t = (MapReduceKeysToIntTask<K,V>)par;
6546     }
6547     else if (t.casPending(c, c - 1))
6548     break;
6549 dl 1.119 }
6550 dl 1.128 } catch (Throwable ex) {
6551     return tryCompleteComputation(ex);
6552 dl 1.119 }
6553 dl 1.138 MapReduceKeysToIntTask<K,V> s = rights;
6554     if (s != null && !inForkJoinPool()) {
6555     do {
6556     if (s.tryUnfork())
6557     s.exec();
6558     } while ((s = s.nextRight) != null);
6559     }
6560 dl 1.128 return false;
6561 dl 1.30 }
6562 dl 1.119 public final Integer getRawResult() { return result; }
6563 dl 1.30 }
6564    
6565 dl 1.128 @SuppressWarnings("serial") static final class MapReduceValuesToIntTask<K,V>
6566 dl 1.119 extends BulkTask<K,V,Integer> {
6567     final ObjectToInt<? super V> transformer;
6568     final IntByIntToInt reducer;
6569     final int basis;
6570     int result;
6571 dl 1.128 MapReduceValuesToIntTask<K,V> rights, nextRight;
6572 dl 1.119 MapReduceValuesToIntTask
6573 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6574 dl 1.128 MapReduceValuesToIntTask<K,V> nextRight,
6575 dl 1.119 ObjectToInt<? super V> transformer,
6576     int basis,
6577     IntByIntToInt reducer) {
6578 dl 1.130 super(m, p, b); this.nextRight = nextRight;
6579 dl 1.119 this.transformer = transformer;
6580     this.basis = basis; this.reducer = reducer;
6581     }
6582 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
6583 dl 1.119 final ObjectToInt<? super V> transformer =
6584     this.transformer;
6585     final IntByIntToInt reducer = this.reducer;
6586     if (transformer == null || reducer == null)
6587 dl 1.128 return abortOnNullFunction();
6588     try {
6589     final int id = this.basis;
6590     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6591     do {} while (!casPending(c = pending, c+1));
6592     (rights = new MapReduceValuesToIntTask<K,V>
6593 dl 1.130 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6594 dl 1.128 }
6595     int r = id;
6596     Object v;
6597     while ((v = advance()) != null)
6598     r = reducer.apply(r, transformer.apply((V)v));
6599     result = r;
6600     for (MapReduceValuesToIntTask<K,V> t = this, s;;) {
6601     int c; BulkTask<K,V,?> par;
6602     if ((c = t.pending) == 0) {
6603     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6604     t.result = reducer.apply(t.result, s.result);
6605     }
6606     if ((par = t.parent) == null ||
6607     !(par instanceof MapReduceValuesToIntTask)) {
6608     t.quietlyComplete();
6609     break;
6610     }
6611     t = (MapReduceValuesToIntTask<K,V>)par;
6612     }
6613     else if (t.casPending(c, c - 1))
6614     break;
6615 dl 1.119 }
6616 dl 1.128 } catch (Throwable ex) {
6617     return tryCompleteComputation(ex);
6618 dl 1.2 }
6619 dl 1.138 MapReduceValuesToIntTask<K,V> s = rights;
6620     if (s != null && !inForkJoinPool()) {
6621     do {
6622     if (s.tryUnfork())
6623     s.exec();
6624     } while ((s = s.nextRight) != null);
6625     }
6626 dl 1.128 return false;
6627 tim 1.1 }
6628 dl 1.119 public final Integer getRawResult() { return result; }
6629 tim 1.1 }
6630    
6631 dl 1.128 @SuppressWarnings("serial") static final class MapReduceEntriesToIntTask<K,V>
6632 dl 1.119 extends BulkTask<K,V,Integer> {
6633     final ObjectToInt<Map.Entry<K,V>> transformer;
6634     final IntByIntToInt reducer;
6635     final int basis;
6636     int result;
6637 dl 1.128 MapReduceEntriesToIntTask<K,V> rights, nextRight;
6638 dl 1.119 MapReduceEntriesToIntTask
6639 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6640 dl 1.128 MapReduceEntriesToIntTask<K,V> nextRight,
6641 dl 1.119 ObjectToInt<Map.Entry<K,V>> transformer,
6642     int basis,
6643     IntByIntToInt reducer) {
6644 dl 1.130 super(m, p, b); this.nextRight = nextRight;
6645 dl 1.119 this.transformer = transformer;
6646     this.basis = basis; this.reducer = reducer;
6647     }
6648 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
6649 dl 1.119 final ObjectToInt<Map.Entry<K,V>> transformer =
6650     this.transformer;
6651     final IntByIntToInt reducer = this.reducer;
6652     if (transformer == null || reducer == null)
6653 dl 1.128 return abortOnNullFunction();
6654     try {
6655     final int id = this.basis;
6656     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6657     do {} while (!casPending(c = pending, c+1));
6658     (rights = new MapReduceEntriesToIntTask<K,V>
6659 dl 1.130 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6660 dl 1.128 }
6661     int r = id;
6662     Object v;
6663     while ((v = advance()) != null)
6664     r = reducer.apply(r, transformer.apply(entryFor((K)nextKey, (V)v)));
6665     result = r;
6666     for (MapReduceEntriesToIntTask<K,V> t = this, s;;) {
6667     int c; BulkTask<K,V,?> par;
6668     if ((c = t.pending) == 0) {
6669     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6670     t.result = reducer.apply(t.result, s.result);
6671     }
6672     if ((par = t.parent) == null ||
6673     !(par instanceof MapReduceEntriesToIntTask)) {
6674     t.quietlyComplete();
6675     break;
6676     }
6677     t = (MapReduceEntriesToIntTask<K,V>)par;
6678     }
6679     else if (t.casPending(c, c - 1))
6680     break;
6681 dl 1.119 }
6682 dl 1.128 } catch (Throwable ex) {
6683     return tryCompleteComputation(ex);
6684 dl 1.99 }
6685 dl 1.138 MapReduceEntriesToIntTask<K,V> s = rights;
6686     if (s != null && !inForkJoinPool()) {
6687     do {
6688     if (s.tryUnfork())
6689     s.exec();
6690     } while ((s = s.nextRight) != null);
6691     }
6692 dl 1.128 return false;
6693 dl 1.4 }
6694 dl 1.119 public final Integer getRawResult() { return result; }
6695     }
6696 tim 1.1
6697 dl 1.128 @SuppressWarnings("serial") static final class MapReduceMappingsToIntTask<K,V>
6698 dl 1.119 extends BulkTask<K,V,Integer> {
6699     final ObjectByObjectToInt<? super K, ? super V> transformer;
6700     final IntByIntToInt reducer;
6701     final int basis;
6702     int result;
6703 dl 1.128 MapReduceMappingsToIntTask<K,V> rights, nextRight;
6704 dl 1.119 MapReduceMappingsToIntTask
6705 dl 1.130 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6706     MapReduceMappingsToIntTask<K,V> rights,
6707 dl 1.119 ObjectByObjectToInt<? super K, ? super V> transformer,
6708     int basis,
6709     IntByIntToInt reducer) {
6710 dl 1.130 super(m, p, b); this.nextRight = nextRight;
6711 dl 1.119 this.transformer = transformer;
6712     this.basis = basis; this.reducer = reducer;
6713     }
6714 dl 1.128 @SuppressWarnings("unchecked") public final boolean exec() {
6715 dl 1.119 final ObjectByObjectToInt<? super K, ? super V> transformer =
6716     this.transformer;
6717     final IntByIntToInt reducer = this.reducer;
6718     if (transformer == null || reducer == null)
6719 dl 1.128 return abortOnNullFunction();
6720     try {
6721     final int id = this.basis;
6722     for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6723     do {} while (!casPending(c = pending, c+1));
6724     (rights = new MapReduceMappingsToIntTask<K,V>
6725 dl 1.130 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6726 dl 1.128 }
6727     int r = id;
6728     Object v;
6729     while ((v = advance()) != null)
6730     r = reducer.apply(r, transformer.apply((K)nextKey, (V)v));
6731     result = r;
6732     for (MapReduceMappingsToIntTask<K,V> t = this, s;;) {
6733     int c; BulkTask<K,V,?> par;
6734     if ((c = t.pending) == 0) {
6735     for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6736     t.result = reducer.apply(t.result, s.result);
6737     }
6738     if ((par = t.parent) == null ||
6739     !(par instanceof MapReduceMappingsToIntTask)) {
6740     t.quietlyComplete();
6741     break;
6742     }
6743     t = (MapReduceMappingsToIntTask<K,V>)par;
6744     }
6745     else if (t.casPending(c, c - 1))
6746     break;
6747 dl 1.119 }
6748 dl 1.128 } catch (Throwable ex) {
6749     return tryCompleteComputation(ex);
6750 dl 1.119 }
6751 dl 1.138 MapReduceMappingsToIntTask<K,V> s = rights;
6752     if (s != null && !inForkJoinPool()) {
6753     do {
6754     if (s.tryUnfork())
6755     s.exec();
6756     } while ((s = s.nextRight) != null);
6757     }
6758 dl 1.128 return false;
6759 tim 1.1 }
6760 dl 1.119 public final Integer getRawResult() { return result; }
6761 tim 1.1 }
6762 dl 1.99
6763     // Unsafe mechanics
6764     private static final sun.misc.Unsafe UNSAFE;
6765 dl 1.119 private static final long counterOffset;
6766     private static final long sizeCtlOffset;
6767     private static final long ABASE;
6768     private static final int ASHIFT;
6769 dl 1.99
6770     static {
6771 dl 1.119 int ss;
6772 dl 1.99 try {
6773 dl 1.126 UNSAFE = sun.misc.Unsafe.getUnsafe();
6774 dl 1.119 Class<?> k = ConcurrentHashMap.class;
6775     counterOffset = UNSAFE.objectFieldOffset
6776     (k.getDeclaredField("counter"));
6777     sizeCtlOffset = UNSAFE.objectFieldOffset
6778     (k.getDeclaredField("sizeCtl"));
6779     Class<?> sc = Node[].class;
6780     ABASE = UNSAFE.arrayBaseOffset(sc);
6781 dl 1.99 ss = UNSAFE.arrayIndexScale(sc);
6782     } catch (Exception e) {
6783     throw new Error(e);
6784     }
6785 dl 1.119 if ((ss & (ss-1)) != 0)
6786 dl 1.99 throw new Error("data type scale not a power of two");
6787 dl 1.119 ASHIFT = 31 - Integer.numberOfLeadingZeros(ss);
6788 dl 1.99 }
6789 dl 1.128 }